diff --git a/.conf-struct.yml b/.conf-struct.yml deleted file mode 100644 index 4619eec..0000000 --- a/.conf-struct.yml +++ /dev/null @@ -1,70 +0,0 @@ -registry: - self-name: capitalizone - -log: - log-proxy: - host: "" - port: 6379 - key: ca_log - -redis: - nodes: [] - -# 级联 CA 配置项 -keymanager: - upper-ca: - - "" # https://user:password@server.ca - self-sign: false # 开启表示 ROOT CA - csr-templates: - root-ca: - o: CI123 ROOT AUTHORITY - expiry: 175200h - intermediate-ca: - o: SITE CA IDENTIFY - ou: "spiffe://site/cluster" - expiry: 175200h - -singleca: - config-path: "" - -election: - enabled: false - id: capitalizone-leader - baseon: configmap - always-leader: false - -gateway-nervs: - enabled: false - endpoint: "" - -http: - ca-listen: 0.0.0.0:8081 - listen: 0.0.0.0:8080 - -mysql: - dsn: "" - -influxdb: - enabled: true - address: "MSP_CUSTOM_INFLUXDB_ADDRESS" #192.168.2.80:8086 - port: 8086 - udp_address: "MSP_CUSTOM_INFLUXDB_UDP_ADDRESS" #influxdb msp数据库的udp地址,ip:port - database: "MSP_CUSTOM_INFLUXDB_DATABASES" # 数据库名称 - precision: "ms" #精度 n, u, ms, s, m or h - username: "MSP_CUSTOM_INFLUXDB_USERNAME" - password: "MSP_CUSTOM_INFLUXDB_PASSWORD" - read-username: "MSP_CUSTOM_INFLUXDB_USERNAME" - read-password: "MSP_CUSTOM_INFLUXDB_PASSWORD" - max-idle-conns: 30 - max-idle-conns-per-host: 30 - flush-size: 20 #批量发送的点的个数 - flush-time: 10 #定时批量发送点的时间,单位:s - -mesh: - msp-portal-api: "http://msp-portal:9080" - -swagger-enabled: false - -debug: false - -version: "0.1" \ No newline at end of file diff --git a/.env.example b/.env.example index 99194d9..de91b6b 100644 --- a/.env.example +++ b/.env.example @@ -1,26 +1,21 @@ -# Log Proxy -IS_LOG_LOG_PROXY_HOST=192.168.2.80 -IS_LOG_LOG_PROXY_PORT=6381 -# Redis -IS_REDIS_NODES="192.168.2.80:9001 192.168.2.80:9002 192.168.2.80:9003" -IS_GATEWAY_NERVS_ENABLED=false -IS_GATEWAY_NERVS_ENDPOINT="" -# Mysql -IS_MYSQL_DSN=root:123456@tcp(192.168.2.80:3306)/cap?charset=utf8mb4&parseTime=True&loc=Local -# Influxdb -IS_INFLUXDB_ENABLED=false -IS_INFLUXDB_ADDRESS= -IS_INFLUXDB_PORT= -IS_INFLUXDB_DATABASE= -IS_INFLUXDB_USERNAME= -IS_INFLUXDB_PASSWORD= -# Keymanager -IS_KEYMANAGER_UPPER_CA= +IS_INFLUXDB_ENABLED: true +IS_INFLUXDB_ADDRESS: 127.0.0.1 +IS_INFLUXDB_DATABASE: victoria +IS_INFLUXDB_PASSWORD: victoria +IS_INFLUXDB_PORT: "8427" +IS_INFLUXDB_READ_PASSWORD: victoria +IS_INFLUXDB_READ_USERNAME: victoria +IS_INFLUXDB_USERNAME: victoria IS_KEYMANAGER_SELF_SIGN=false -IS_CSR_TEMPLATES_INTERMEDIATE_CA_O= -IS_CSR_TEMPLATES_INTERMEDIATE_CA_OU= -IS_CSR_TEMPLATES_INTERMEDIATE_CA_EXPIRY=175200h -IS_CSR_TEMPLATES_ROOT_CA_O="CI123 ROOT AUTHORITY" -IS_CSR_TEMPLATES_ROOT_CA_EXPIRY=175200h -# HTTP -IS_HTTP_CA_LISTEN=0.0.0.0:8081 \ No newline at end of file +IS_KEYMANAGER_CSR_TEMPLATES_INTERMEDIATE_CA_O: site s105 huawei-shanghai-105 +IS_KEYMANAGER_CSR_TEMPLATES_INTERMEDIATE_CA_OU: spiffe://spiffeid/cluster +IS_KEYMANAGER_UPPER_CA: https://rootca-tls:8081 +IS_LOG_LOG_PROXY_HOST: redis-host +IS_LOG_LOG_PROXY_PORT: 6379 +IS_MYSQL_DSN: root:root@tcp(127.0.0.1:3306)/cap?charset=utf8mb4&parseTime=True&loc=Local +IS_OCSP_CACHE_TIME: 60 +IS_SINGLECA_CONFIG_PATH: /etc/capitalizone/config.json +IS_VAULT_ADDR: http://127.0.0.1:8200 +IS_VAULT_ENABLED: "false" +IS_VAULT_INIT: "true" +IS_VAULT_PREFIX: ca/ \ No newline at end of file diff --git a/.env.prod b/.env.prod deleted file mode 100644 index 473d1b3..0000000 --- a/.env.prod +++ /dev/null @@ -1,18 +0,0 @@ -IS_ENV: 'test' -IS_SINGLECA_CONFIG_PATH: './config.json' -IS_INFLUXDB_ENABLED: 'false' -IS_INFLUXDB_ADDRESS: '192.168.2.97' -IS_INFLUXDB_PORT: 9428 -IS_INFLUXDB_USERNAME: "influx-msp" -IS_INFLUXDB_PASSWORD: "sunyangpassword" -IS_INFLUXDB_READ_USERNAME: "influx-msp_select" -IS_INFLUXDB_READ_PASSWORD: "sunyangpassword" -IS_KEYMANAGER_UPPER_CA: "https://192.168.2.80:8380 https://192.168.2.97:8380" -IS_CSR_TEMPLATES_INTERMEDIATE_CA_O: 'TEST SERVER 80' -IS_CSR_TEMPLATES_INTERMEDIATE_CA_OU: 'spiffe://local-test/80' -IS_MYSQL_DSN: 'msp:msp123123@tcp(192.168.9.2:9002)/cap?charset=utf8mb4&parseTime=True&loc=Local' -IS_VAULT_ENABLED: 'true' -IS_VAULT_ADDR: 'https://vault.gw002.oneitfarm.com' -IS_VAULT_TOKEN: 's.8RRbqjcSRq2OYZVuwONeVQVu' -IS_VAULT_PREFIX: 'ca/' -IS_MIGRATION: 'true' \ No newline at end of file diff --git a/.gitignore b/.gitignore index 71658fa..a5570d6 100644 --- a/.gitignore +++ b/.gitignore @@ -19,3 +19,4 @@ # IDE configs .idea .vscode +./bin diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml deleted file mode 100644 index 74ba8a1..0000000 --- a/.gitlab-ci.yml +++ /dev/null @@ -1,12 +0,0 @@ -stages: - - build - -build: - stage: build - image: harbor.oneitfarm.com/zhirenyun/docker:20.10-buildx - tags: - - aliyun-sh - script: - - docker buildx build -t $HARBOR_HOST/bifrost/capitalizone:$CI_COMMIT_REF_NAME --platform=linux/arm64,linux/amd64 . --push - only: - - tags \ No newline at end of file diff --git a/Dockerfile b/Dockerfile index c8badae..11977d3 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,4 +1,4 @@ -FROM hub.oneitfarm.com/library/golang:1.17.8-alpine AS builder +FROM golang:1.17.8-alpine AS builder ENV GO111MODULE=on \ GOPROXY=https://goproxy.oneitfarm.com,https://goproxy.cn,direct @@ -6,17 +6,23 @@ ENV GO111MODULE=on \ WORKDIR /build COPY . . -RUN CGO_ENABLED=0 go build -o capitalizone . +RUN CGO_ENABLED=0 go build -o zaca . -FROM harbor.oneitfarm.com/bifrost/ubuntu:20.04 +FROM ubuntu:20.04 -WORKDIR /capitalizone +WORKDIR /zaca -COPY --from=builder /build/capitalizone . +COPY --from=builder /build/zaca . COPY --from=builder /build/database/mysql/migrations ./database/mysql/migrations -COPY --from=builder /build/conf.default.yml . COPY --from=builder /build/conf.prod.yml . COPY --from=builder /build/conf.test.yml . RUN chmod +x capitalizone -CMD ["./capitalizone", "http"] \ No newline at end of file +# API service +CMD ["./zaca", "api"] + +# TLS service +# CMD ["./zaca", "api"] + +# OCSP service +# CMD ["./zaca", "api"] \ No newline at end of file diff --git a/Makefile b/Makefile index 28f830d..6bbf519 100644 --- a/Makefile +++ b/Makefile @@ -1,21 +1,20 @@ .PHONY: all build clean -GOPROXY=https://goproxy.oneitfarm.com,https://goproxy.cn,direct -PROG=bin/capitalizone +PROG=bin/zaca SRCS=. # git commit hash COMMIT_HASH=$(shell git rev-parse --short HEAD || echo "GitNotFound") -# 编译日期 +# Compilation date BUILD_DATE=$(shell date '+%Y-%m-%d %H:%M:%S') -# 编译条件 +# Compilation conditions CFLAGS = -ldflags "-s -w -X \"main.BuildVersion=${COMMIT_HASH}\" -X \"main.BuildDate=$(BUILD_DATE)\"" all: if [ ! -d "./bin/" ]; then \ mkdir bin; \ fi - GOPROXY=$(GOPROXY) CGO_ENABLED=0 GOOS=linux GOARCH=amd64 go build $(CFLAGS) -o $(PROG) $(SRCS) + CGO_ENABLED=0 GOOS=linux GOARCH=amd64 go build $(CFLAGS) -o $(PROG) $(SRCS) build: go build -race -tags=jsoniter @@ -33,19 +32,10 @@ test: go run main.go -env test rootca: - go run main.go -env test -envfile ".env.rootca" -rootca - -fake: - go run test/fake/fake_server.go -env test -ca https://192.168.2.80:8381 - -cfssl-model: - gen --sqltype=mysql -c "root:123456@tcp(192.168.2.80:3306)/cap?charset=utf8mb4&parseTime=True&loc=Local" -d cap --json --generate-dao --overwrite --gorm --db --module "gitlab.oneitfarm.com/bifrost/capitalizone/examples/cfssl-model" --out ./examples/cfssl-model - -telegraf: - sudo docker run --network=host -v `pwd`/telegraf.conf:/telegraf.conf --rm -it telegraf:1.19.0 telegraf --config /telegraf.conf + go run main.go -env test -envfile ".env.example" -rootca migration: - go run main.go -envfile ".env.prod" + go run main.go -envfile ".env.example" clean: rm -rf ./bin \ No newline at end of file diff --git a/README.md b/README.md index 09deab6..9b5d9b4 100644 --- a/README.md +++ b/README.md @@ -1,2 +1,188 @@ -# CApitaliZone +# ZACA + +Zaca is a Ca pkitls toolkit developed based on cloudflare cfssl + +Zaca includes the following components: + +1. TLS service, as the CA center, is used for certificate issuance, revocation, signature and other operations. +2. API services, as some API services for certificate management. +2. OCSP service is a service that queries the online status of certificates and has been signed by OCSP. +2. SDK component, which is used for other services to access the CA SDK as a toolkit for certificate issuance and automatic rotation. + +### Building + +Building cfssl requires a [working Go 1.12+ installation](http://golang.org/doc/install). + +``` +$ git clone git@github.com:ztalab/ZACA.git +$ cd ZACA +$ make +``` + +You can set GOOS and GOARCH environment variables to allow Go to cross-compile alternative platforms. + +The resulting binaries will be in the bin folder: + +``` +$ tree bin +bin +├── zaca + +0 directories, 1 files +``` + +### Configuration reference + +Zaca configuration can be set through environment variables or configured through configuration files. The priority of environment variables is higher than that of configuration files + +Environment variable configuration reference: + +``` +IS_ENV:test +# Timing configuration +IS_INFLUXDB_ENABLED: true +IS_INFLUXDB_ADDRESS: 127.0.0.1 +IS_INFLUXDB_DATABASE: victoria +IS_INFLUXDB_PASSWORD: victoria +IS_INFLUXDB_PORT: "8427" +IS_INFLUXDB_READ_PASSWORD: victoria +IS_INFLUXDB_READ_USERNAME: victoria +IS_INFLUXDB_USERNAME: victoria +# Self certificate configuration +IS_KEYMANAGER_CSR_TEMPLATES_INTERMEDIATE_CA_O: site +IS_KEYMANAGER_CSR_TEMPLATES_INTERMEDIATE_CA_OU: spiffe://spiffeid/cluster +# Self signed configuration +IS_KEYMANAGER_SELF_SIGN=false +# Parent CA address +IS_KEYMANAGER_UPPER_CA: https://rootca-tls:8081 +// Log hook address +IS_LOG_LOG_PROXY_HOST: redis-host +IS_LOG_LOG_PROXY_PORT: 6379 +# Database mysql address +IS_MYSQL_DSN: root:root@tcp(127.0.0.1:3306)/cap?charset=utf8mb4&parseTime=True&loc=Local +# OCSP cache time in seconds +IS_OCSP_CACHE_TIME: 60 +# Certificate issuance configuration +IS_SINGLECA_CONFIG_PATH: /etc/capitalizone/config.json +# Confidential storage configuration +IS_VAULT_ADDR: http://127.0.0.1:8200 +IS_VAULT_ENABLED: "false" +IS_VAULT_INIT: "true" +IS_VAULT_PREFIX: ca/ +``` + +### Service Installation + +#### TLS service + +TLS service is used to issue certificates through control`IS_KEYMANAGER_SELF_SIGN` Environment variable to control whether to start as root ca. + +- Started as root Ca, TLS service will self sign certificate. +- When starting as an intermediate Ca, the TLS service needs to request the root CA signing certificate as its own CA certificate. + +Start command:`zaca tls`,Default listening port 8081 + +#### OCSP service + +OCSP online certificate status is used to query the certificate status information. OCSP returns the certificate online status information to quickly check whether the certificate has expired, whether it has been revoked and so on. +Start command:`zaca ocsp`,Default listening port 8082 + +#### API services + +Provide CA center API service, which can be accessed after the service is started`http://localhost:8080/swagger/index.html`,View API documentation. + +Start command:`zaca api`,Default listening port 8080 + + + +#### SDK Installation + +``` +$ go get github.com:ztalab/ZACA +``` + +Then in your Go app you can do something like + +##### Server + +```go +import ( + "github.com/pkg/errors" + "github.com/ztalab/ZACA/pkg/caclient" + "github.com/ztalab/ZACA/pkg/spiffe" +) + +// mTLS Server Use example +func NewMTLSServer() error { + // role: default + // CA Server Address,eg: https://zaca-tls.msp:8081 + // Ocsp Server Address, eg: http://zaca-ocsp:8082 + // CA Auth Key + c := caclient.NewCAI( + caclient.WithCAServer(caclient.RoleDefault, *caAddr), + aclient.WithOcspAddr(*ocspAddr), + caclient.WithSignAlgo(keygen.Sm2SigAlg), + caclient.WithAuthKey(authKey), + ) + // Fill in workload parameters + serverEx, err := c.NewExchanger(&spiffe.IDGIdentity{ + SiteID: "test_site", + ClusterID: "cluster_test", + UniqueID: "client1", + }) + if err != nil { + return errors.Wrap(err, "Exchanger initialization failed") + } + // Obtain tls.Config + tlsCfg, err := serverEx.ServerTLSConfig() + go func() { + // Handle with tls.Config + httpsServer(tlsCfg) + }() + // Start certificate rotation + go serverEx.RotateController().Run() + return nil +} +``` + +#### Client + +```go +import ( + "github.com/pkg/errors" + "github.com/ztalab/ZACA/pkg/caclient" + "github.com/ztalab/ZACA/pkg/spiffe" +) + +// mTLS Client Use example +func NewMTLSClient() (*http.Client, error) { +// role: default +// CA Server Address,eg: https://zaca-tls.msp:8081 +// Ocsp Server Address, eg: http://zaca-ocsp:8082 +// CA Auth Key + c := caclient.NewCAI( + caclient.WithCAServer(caclient.RoleDefault, *caAddr), + aclient.WithOcspAddr(*ocspAddr), + caclient.WithAuthKey(authKey), + caclient.WithSignAlgo(keygen.Sm2SigAlg), + ) + // Fill in workload parameters + serverEx, err := c.NewExchanger(&spiffe.IDGIdentity{ + SiteID: "test_site", + ClusterID: "cluster_test", + UniqueID: "client1", + }) + if err != nil { + return nil, errors.Wrap(err, "Exchanger initialization failed") + } + // Obtain tls.Config + // Server Name It can be '', which is not filled in by default for inter service calls + tlsCfg, err := serverEx.ClientTLSConfig("") + // Handle With tls.Config + client := httpClient(tlsCfg) + // Start certificate rotation + go serverEx.RotateController().Run() + return client, nil +} +``` diff --git a/api/helper/api_helper.go b/api/helper/api_helper.go index 8e46ee5..abe98e9 100644 --- a/api/helper/api_helper.go +++ b/api/helper/api_helper.go @@ -7,7 +7,7 @@ import ( "time" "github.com/gin-gonic/gin" - logger "gitlab.oneitfarm.com/bifrost/cilog/v2" + "github.com/ztalab/ZACA/pkg/logger" ) const ( diff --git a/api/router.go b/api/router.go index f0b6569..6636ac6 100644 --- a/api/router.go +++ b/api/router.go @@ -6,14 +6,13 @@ import ( "github.com/prometheus/client_golang/prometheus/promhttp" swaggerFiles "github.com/swaggo/files" ginSwagger "github.com/swaggo/gin-swagger" - "gitlab.oneitfarm.com/bifrost/capitalizone/api/helper" - "gitlab.oneitfarm.com/bifrost/capitalizone/api/v1/ca" - "gitlab.oneitfarm.com/bifrost/capitalizone/api/v1/certleaf" - "gitlab.oneitfarm.com/bifrost/capitalizone/api/v1/health" - "gitlab.oneitfarm.com/bifrost/capitalizone/api/v1/vault" - "gitlab.oneitfarm.com/bifrost/capitalizone/api/v1/workload" - "gitlab.oneitfarm.com/bifrost/capitalizone/core" - "gitlab.oneitfarm.com/bifrost/capitalizone/docs" + "github.com/ztalab/ZACA/api/helper" + "github.com/ztalab/ZACA/api/v1/ca" + "github.com/ztalab/ZACA/api/v1/certleaf" + "github.com/ztalab/ZACA/api/v1/health" + "github.com/ztalab/ZACA/api/v1/workload" + "github.com/ztalab/ZACA/core" + "github.com/ztalab/ZACA/docs" ) func Serve() *gin.Engine { @@ -44,7 +43,7 @@ func Serve() *gin.Engine { prefix.GET("/cert", helper.WrapH(handler.CertDetail)) prefix.GET("/units_forbid_query", helper.WrapH(handler.UnitsForbidQuery)) prefix.GET("/units_certs_list", helper.WrapH(handler.UnitsCertsList)) - // Root CA 禁止操作 + // Root CA Prohibit operation if !core.Is.Config.Keymanager.SelfSign { lifeCyclePrefix := prefix.Group("/lifecycle") { @@ -78,16 +77,5 @@ func Serve() *gin.Engine { prefix.GET("/cert_chain", helper.WrapH(handler.CertChain)) prefix.GET("/cert_chain_from_root", helper.WrapH(handler.CertChainFromRoot)) } - - { - // Vault - prefix := v1.Group("/vault") - prefix.GET("/token", helper.WrapH(vault.RootToken)) - } - - //if err := router.Run(core.Is.Config.HTTP.Listen); err != nil { - // v2log.Named("router").Fatalf("listen err: %v", err) - // return err - //} return router } diff --git a/api/v1/ca/ca.go b/api/v1/ca/ca.go index 7934f30..a8dbc6f 100644 --- a/api/v1/ca/ca.go +++ b/api/v1/ca/ca.go @@ -1,10 +1,10 @@ package ca import ( - v2log "gitlab.oneitfarm.com/bifrost/cilog/v2" + "github.com/ztalab/ZACA/pkg/logger" "go.uber.org/zap" - logic "gitlab.oneitfarm.com/bifrost/capitalizone/logic/ca" + logic "github.com/ztalab/ZACA/logic/ca" ) type API struct { @@ -14,7 +14,7 @@ type API struct { func NewAPI() *API { return &API{ - logger: v2log.Named("api").SugaredLogger, + logger: logger.Named("api").SugaredLogger, logic: logic.NewLogic(), } } diff --git a/api/v1/ca/config.go b/api/v1/ca/config.go index 5b7e2bf..6d65138 100644 --- a/api/v1/ca/config.go +++ b/api/v1/ca/config.go @@ -3,21 +3,21 @@ package ca import ( "strings" - "gitlab.oneitfarm.com/bifrost/capitalizone/api/helper" - logic "gitlab.oneitfarm.com/bifrost/capitalizone/logic/ca" + "github.com/ztalab/ZACA/api/helper" + logic "github.com/ztalab/ZACA/logic/ca" ) func init() { - // 载入类型... + // Load type... logic.DoNothing() } -// RoleProfiles 环境隔离类型 +// RoleProfiles Environmental isolation type // @Tags CA -// @Summary (p1)环境隔离类型 -// @Description 环境隔离类型 +// @Summary (p1)Environmental isolation type +// @Description Environmental isolation type // @Produce json -// @Param short query bool false "只返回类型列表, 供搜索条件" +// @Param short query bool false "Only a list of types is returned for search criteria" // @Success 200 {object} helper.MSPNormalizeHTTPResponseBody{data=logic.RoleProfile} " " // @Success 200 {object} helper.MSPNormalizeHTTPResponseBody{data=[]string} " " // @Failure 400 {object} helper.HTTPWrapErrorResponse diff --git a/api/v1/ca/intermediate.go b/api/v1/ca/intermediate.go index e7aba72..d6d452c 100644 --- a/api/v1/ca/intermediate.go +++ b/api/v1/ca/intermediate.go @@ -1,19 +1,19 @@ package ca import ( - "gitlab.oneitfarm.com/bifrost/capitalizone/api/helper" - logic "gitlab.oneitfarm.com/bifrost/capitalizone/logic/ca" + "github.com/ztalab/ZACA/api/helper" + logic "github.com/ztalab/ZACA/logic/ca" ) func init() { - // 载入类型... + // load type... logic.DoNothing() } -// IntermediateTopology 子CA拓扑 +// IntermediateTopology Sub-CA topology // @Tags CA -// @Summary 子CA拓扑 -// @Description 子CA拓扑 +// @Summary Sub-CA topology +// @Description Sub-CA topology // @Produce json // @Success 200 {object} helper.MSPNormalizeHTTPResponseBody{data=[]logic.IntermediateObject} " " // @Failure 400 {object} helper.HTTPWrapErrorResponse @@ -23,10 +23,10 @@ func (a *API) IntermediateTopology(c *helper.HTTPWrapContext) (interface{}, erro return a.logic.IntermediateTopology() } -// UpperCaIntermediateTopology 上层CA拓扑 +// UpperCaIntermediateTopology Upper CA topology // @Tags CA -// @Summary 上层CA拓扑 -// @Description 上层CA拓扑 +// @Summary Upper CA topology +// @Description Upper CA topology // @Produce json // @Success 200 {object} helper.MSPNormalizeHTTPResponseBody{data=[]logic.IntermediateObject} " " // @Failure 400 {object} helper.HTTPWrapErrorResponse diff --git a/api/v1/ca/overall.go b/api/v1/ca/overall.go index f23aab0..2edc10f 100644 --- a/api/v1/ca/overall.go +++ b/api/v1/ca/overall.go @@ -5,18 +5,18 @@ import ( "time" "github.com/pkg/errors" - v2log "gitlab.oneitfarm.com/bifrost/cilog/v2" + "github.com/ztalab/ZACA/pkg/logger" "gorm.io/gorm" - "gitlab.oneitfarm.com/bifrost/capitalizone/api/helper" - "gitlab.oneitfarm.com/bifrost/capitalizone/core" - "gitlab.oneitfarm.com/bifrost/capitalizone/database/mysql/cfssl-model/model" + "github.com/ztalab/ZACA/api/helper" + "github.com/ztalab/ZACA/core" + "github.com/ztalab/ZACA/database/mysql/cfssl-model/model" ) type OverallCertsCountItem struct { - Role string `json:"role"` // 类别 - Total int64 `json:"total"` // 证书总数 - UnitsCount int64 `json:"units_count"` // 服务数量 + Role string `json:"role"` + Total int64 `json:"total"` // Total number of certificates + UnitsCount int64 `json:"units_count"` // number of services } type OverallCertsCountResponse struct { @@ -24,10 +24,10 @@ type OverallCertsCountResponse struct { Certs []OverallCertsCountItem `json:"certs"` } -// OverallCertsCount 证书分类 +// OverallCertsCount Certificate classification // @Tags CA -// @Summary (p2)证书分类 -// @Description 证书总数、根据分类划分的数量、对应服务数量 +// @Summary (p2)Certificate classification +// @Description Total number of certificates, number by classification, number of corresponding services // @Produce json // @Success 200 {object} helper.MSPNormalizeHTTPResponseBody{data=OverallCertsCountResponse} " " // @Failure 400 {object} helper.HTTPWrapErrorResponse @@ -48,8 +48,8 @@ func (a *API) OverallCertsCount(c *helper.HTTPWrapContext) (interface{}, error) roleProfiles, err := a.logic.RoleProfiles() if err != nil { - a.logger.Errorf("获取 role profiles 错误: %s", err) - return nil, errors.New("获取 role profiles 错误") + a.logger.Errorf("Error getting role profiles: %s", err) + return nil, errors.New("Error getting role profiles") } res := &OverallCertsCountResponse{ @@ -86,10 +86,10 @@ type OverallExpiryCertsResponse struct { ExpiryCerts []OverallExpiryGroup `json:"expiry_certs"` } -// OverallExpiryCerts 证书有效期 +// OverallExpiryCerts Certificate validity // @Tags CA -// @Summary (p2)证书有效期 -// @Description 证书已过期数量, 一周内过期数量, 1/3个月内过期数量, 3个月后过期数量 +// @Summary (p2)Certificate validity +// @Description Number of certificates expired: within one week, within 1/3 months and after 3 months // @Produce json // @Success 200 {object} helper.MSPNormalizeHTTPResponseBody{data=OverallExpiryCertsResponse} " " // @Failure 400 {object} helper.HTTPWrapErrorResponse @@ -113,7 +113,7 @@ func (a *API) OverallExpiryCerts(c *helper.HTTPWrapContext) (interface{}, error) ExpiryCerts: make([]OverallExpiryGroup, 0), } - // 一周内 + // Within a week { item := OverallExpiryGroup{Name: "1w"} count, err := getExpiryCountByDuration(7*24*time.Hour, time.Now()) @@ -124,7 +124,7 @@ func (a *API) OverallExpiryCerts(c *helper.HTTPWrapContext) (interface{}, error) res.ExpiryCerts = append(res.ExpiryCerts, item) } - // 一个月内 + // Within one month { item := OverallExpiryGroup{Name: "1m"} count, err := getExpiryCountByDuration(30*24*time.Hour, time.Now()) @@ -135,7 +135,7 @@ func (a *API) OverallExpiryCerts(c *helper.HTTPWrapContext) (interface{}, error) res.ExpiryCerts = append(res.ExpiryCerts, item) } - // 三个月内 + // Within three months { item := OverallExpiryGroup{Name: "3m"} count, err := getExpiryCountByDuration(3*30*24*time.Hour, time.Now()) @@ -146,7 +146,7 @@ func (a *API) OverallExpiryCerts(c *helper.HTTPWrapContext) (interface{}, error) res.ExpiryCerts = append(res.ExpiryCerts, item) } - // 三个月后 + // Three months later { item := OverallExpiryGroup{Name: "3m+"} count, err := getExpiryCountByDuration(999*30*24*time.Hour, time.Now().AddDate(0, 3, 0)) @@ -161,9 +161,9 @@ func (a *API) OverallExpiryCerts(c *helper.HTTPWrapContext) (interface{}, error) } func getExpiryCountByDuration(period time.Duration, before time.Time) (int64, error) { - // 一周内 - // 过期时间 - 当前时间 <= 一周 - // 过期时间 <= 当前时间 + 一周 + // Within a week + // Expiration time - current time < = one week + // Expiration time < = current time + one week expiryDate := time.Now().Add(period) query := core.Is.Db.Session(&gorm.Session{}).Model(&model.Certificates{}). Where("expiry > ?", before). @@ -173,7 +173,7 @@ func getExpiryCountByDuration(period time.Duration, before time.Time) (int64, er var count int64 if err := query.Count(&count).Error; err != nil { - v2log.Errorf("mysql query err: %s", err) + logger.Errorf("mysql query err: %s", err) return 0, err } @@ -190,10 +190,10 @@ type OverallUnitsEnableStatus struct { Disable OverallUnitsEnableItem `json:"disable"` } -// OverallUnitsEnableStatus 启用情况 +// OverallUnitsEnableStatus Enabling condition // @Tags CA -// @Summary (p2)启用情况 -// @Description 已启用总数, 禁用总数, 对应服务数 +// @Summary (p2)Enabling condition +// @Description Total enabled, total disabled, corresponding services // @Produce json // @Success 200 {object} helper.MSPNormalizeHTTPResponseBody{data=OverallUnitsEnableStatus} " " // @Failure 400 {object} helper.HTTPWrapErrorResponse diff --git a/api/v1/ca/workload_topo.go b/api/v1/ca/workload_topo.go index 29cff4c..343b338 100644 --- a/api/v1/ca/workload_topo.go +++ b/api/v1/ca/workload_topo.go @@ -1,24 +1,24 @@ package ca import ( - "gitlab.oneitfarm.com/bifrost/capitalizone/api/helper" - logic "gitlab.oneitfarm.com/bifrost/capitalizone/logic/ca" + "github.com/ztalab/ZACA/api/helper" + logic "github.com/ztalab/ZACA/logic/ca" ) func init() { - // 载入类型... + // load type... logic.DoNothing() } -// WorkloadUnits CA 下 Units -// 以 UniqueID 为单元 +// WorkloadUnits CA Units +// UniqueID as unit // @Tags CA -// @Summary (p1)服务单元 -// @Description CA 下 Units +// @Summary (p1)Service unit +// @Description CA Units // @Produce json -// @Param page query int false "页数, 默认1" -// @Param limit_num query int false "页数限制, 默认20" -// @Param unique_id query string false "UniqueID 查询" +// @Param page query int false "Number of pages, default 1" +// @Param limit_num query int false "Page limit, default 20" +// @Param unique_id query string false "UniqueID Query" // @Success 200 {object} helper.MSPNormalizeHTTPResponseBody{data=helper.MSPNormalizeList{list=[]logic.WorkloadUnit}} " " // @Failure 400 {object} helper.HTTPWrapErrorResponse // @Failure 500 {object} helper.HTTPWrapErrorResponse diff --git a/api/v1/certleaf/base.go b/api/v1/certleaf/base.go index ed928fb..39db2ee 100644 --- a/api/v1/certleaf/base.go +++ b/api/v1/certleaf/base.go @@ -1,10 +1,10 @@ package certleaf import ( - v2log "gitlab.oneitfarm.com/bifrost/cilog/v2" + "github.com/ztalab/ZACA/pkg/logger" "go.uber.org/zap" - logic "gitlab.oneitfarm.com/bifrost/capitalizone/logic/certleaf" + logic "github.com/ztalab/ZACA/logic/certleaf" ) type API struct { @@ -14,7 +14,7 @@ type API struct { func NewAPI() *API { return &API{ - logger: v2log.Named("api").SugaredLogger, + logger: logger.Named("api").SugaredLogger, logic: logic.NewLogic(), } } diff --git a/api/v1/certleaf/cert_chain.go b/api/v1/certleaf/cert_chain.go index a31adda..0e47de6 100644 --- a/api/v1/certleaf/cert_chain.go +++ b/api/v1/certleaf/cert_chain.go @@ -3,20 +3,20 @@ package certleaf import ( "errors" - "gitlab.oneitfarm.com/bifrost/capitalizone/api/helper" - caLogic "gitlab.oneitfarm.com/bifrost/capitalizone/logic/ca" - logic "gitlab.oneitfarm.com/bifrost/capitalizone/logic/certleaf" - "gitlab.oneitfarm.com/bifrost/capitalizone/logic/schema" + "github.com/ztalab/ZACA/api/helper" + caLogic "github.com/ztalab/ZACA/logic/ca" + logic "github.com/ztalab/ZACA/logic/certleaf" + "github.com/ztalab/ZACA/logic/schema" ) -// CertChain 证书链 +// CertChain Certificate chain // @Tags certleaf // @Summary (p1)CertChain -// @Description 获取证书链信息 +// @Description Get certificate chain information // @Produce json -// @Param self_cert query bool false "展示 CA 自身证书链" -// @Param sn query string false "SN+AKI 查询指定证书" -// @Param aki query string false "SN+AKI 查询指定证书" +// @Param self_cert query bool false "Show CA's own certificate chain" +// @Param sn query string false "SN+AKI Query the specified certificate" +// @Param aki query string false "SN+AKI Query the specified certificate" // @Success 200 {object} helper.MSPNormalizeHTTPResponseBody{data=logic.LeafCert} " " // @Failure 400 {object} helper.HTTPWrapErrorResponse // @Failure 500 {object} helper.HTTPWrapErrorResponse @@ -37,10 +37,10 @@ type RootCertChains struct { Root *caLogic.IntermediateObject `json:"root"` } -// CertChainFromRoot Root视角下所有证书链 +// CertChainFromRoot All certificate chains from the root Perspective // @Tags certleaf -// @Summary (p1)根视角证书链 -// @Description Root视角下所有证书链 +// @Summary (p1)Root view certificate chain +// @Description All certificate chains from the root Perspective // @Produce json // @Success 200 {object} helper.MSPNormalizeHTTPResponseBody{data=RootCertChains} " " // @Failure 400 {object} helper.HTTPWrapErrorResponse @@ -68,7 +68,7 @@ func (a *API) CertChainFromRoot(c *helper.HTTPWrapContext) (interface{}, error) children, err := caLogic.NewLogic().UpperCaIntermediateTopology() if err != nil { - a.logger.Errorf("获取上层 CA 拓扑结构错误: %s", err) + a.logger.Errorf("Error getting upper CA topology: %s", err) } chain.Root.Children = children diff --git a/api/v1/health/health.go b/api/v1/health/health.go index c2b611e..ed3d7cd 100644 --- a/api/v1/health/health.go +++ b/api/v1/health/health.go @@ -2,21 +2,13 @@ package health import ( "crypto/tls" - "fmt" - "log" "net/http" - "os" "time" - "github.com/hashicorp/go-discover" - "github.com/hashicorp/go-discover/provider/k8s" - vaultAPI "github.com/hashicorp/vault/api" - - "gitlab.oneitfarm.com/bifrost/capitalizone/api/helper" - "gitlab.oneitfarm.com/bifrost/capitalizone/ca/keymanager" - "gitlab.oneitfarm.com/bifrost/capitalizone/core" - cfClient "gitlab.oneitfarm.com/bifrost/cfssl/api/client" - "gitlab.oneitfarm.com/bifrost/cfssl/hook" + "github.com/ztalab/ZACA/api/helper" + "github.com/ztalab/ZACA/ca/keymanager" + "github.com/ztalab/ZACA/core" + cfClient "github.com/ztalab/cfssl/api/client" ) // CfsslHealthAPI ... @@ -59,19 +51,6 @@ func Health(c *helper.HTTPWrapContext) (interface{}, error) { } hm = append(hm, module) } - //{ - // // VictoriaMetrics - // module := &HealthModule{ - // Name: "VictoriaMetrics", - // DisplayName: "VictoriaMetrics", - // State: 200, - // } - // if _, _, err := core.Is.Metrics.InfluxDBHttpClient.Client.Ping(2 * time.Second); err != nil { - // module.Message = err.Error() - // module.State = 500 - // } - // hm = append(hm, module) - //} { // RootCA module := &HealthModule{ @@ -94,57 +73,5 @@ func Health(c *helper.HTTPWrapContext) (interface{}, error) { }) hm = append(hm, module) } - if hook.EnableVaultStorage { - // Vault - module := &HealthModule{ - Name: "Vault", - DisplayName: "Vault", - State: 200, - } - d := discover.Discover{ - Providers: map[string]discover.Provider{ - "k8s": &k8s.Provider{}, - }, - } - // use ioutil.Discard for no log output - l := log.New(os.Stderr, "", log.LstdFlags) - addrs, err := d.Addrs(core.Is.Config.Vault.Discover, l) - if err != nil { - module.State = 500 - module.Message = fmt.Sprintf("Vault K8s IP 发现失败: %s", err) - } else { - if len(addrs) == 0 { - module.State = 500 - module.Message = "Vault K8s 节点不可用" - } else { - for _, addr := range addrs { - conf := &vaultAPI.Config{ - Address: "http://" + addr + ":8200", - HttpClient: httpClient, - } - cli, _ := vaultAPI.NewClient(conf) - cli.SetToken(core.Is.Config.Vault.Token) - var retryTimes int - RETRY: - status, err := cli.Sys().SealStatus() - if err != nil { - if retryTimes == 0 { - retryTimes++ - goto RETRY - } - module.State = 500 - module.Message += fmt.Sprintf("Vault 节点 %s 获取 seal status 错误: %s\n", addr, err) - } else { - if status.Sealed { - module.State = 500 - module.Message += fmt.Sprintf("Vault 节点 %s 未解封\n", addr) - module.Desc = "未解封可能原因是 K8s Node 节点异常重启" - } - } - } - } - } - hm = append(hm, module) - } return hm, nil } diff --git a/api/v1/vault/vault.go b/api/v1/vault/vault.go deleted file mode 100644 index 29c71fb..0000000 --- a/api/v1/vault/vault.go +++ /dev/null @@ -1,50 +0,0 @@ -package vault - -import ( - "errors" - "os" - - vaultAPI "github.com/hashicorp/vault/api" - jsoniter "github.com/json-iterator/go" - "gitlab.oneitfarm.com/bifrost/capitalizone/api/helper" - "gitlab.oneitfarm.com/bifrost/capitalizone/core" - "gitlab.oneitfarm.com/bifrost/capitalizone/database/mysql/cfssl-model/model" - "gitlab.oneitfarm.com/bifrost/capitalizone/pkg/vaultinit" - logger "gitlab.oneitfarm.com/bifrost/cilog/v2" - "gorm.io/gorm" -) - -// RootToken ... -func RootToken(c *helper.HTTPWrapContext) (interface{}, error) { - verifyKey := os.Getenv("IS_VAULT_VERIFY_KEY") - if verifyKey == "" { - return nil, errors.New("verify key not found") - } - - if c.G.Query("verify_key") != verifyKey { - return nil, errors.New("verify key error") - } - - envRootToken := core.Is.Config.Vault.Token - if envRootToken != "" { - return envRootToken, nil - } - - keyPair := &model.SelfKeypair{} - if err := core.Is.Db.Where("name = ?", vaultinit.StoreKeyName).Order("id desc").First(keyPair).Error; err != nil { - if errors.Is(err, gorm.ErrRecordNotFound) { - logger.Errorf("Vault key not found") - return nil, errors.New("vault key not found") - } - logger.Errorf("DB query err: %s", err) - return nil, err - } - key := keyPair.PrivateKey.String - keys := new(vaultAPI.InitResponse) - if err := jsoniter.UnmarshalFromString(key, keys); err != nil { - logger.Errorf("Unmarshal keys err: %s", err) - return nil, err - } - - return keys.RootToken, nil -} diff --git a/api/v1/workload/lifecycle.go b/api/v1/workload/lifecycle.go index 4859e24..1f710dc 100644 --- a/api/v1/workload/lifecycle.go +++ b/api/v1/workload/lifecycle.go @@ -1,17 +1,17 @@ -// Package workload 证书生命周期管理 +// Package workload Certificate Lifecycle Management package workload import ( - "gitlab.oneitfarm.com/bifrost/capitalizone/api/helper" - logic "gitlab.oneitfarm.com/bifrost/capitalizone/logic/workload" + "github.com/ztalab/ZACA/api/helper" + logic "github.com/ztalab/ZACA/logic/workload" ) -// RevokeCerts 吊销证书 +// RevokeCerts revoked certificate // @Tags Workload // @Summary (p3)Revoke -// @Description 吊销证书 +// @Description revoked certificate // @Produce json -// @Param body body logic.RevokeCertsParams true "sn+aki / unique_id 二选一" +// @Param body body logic.RevokeCertsParams true "sn+aki / unique_id pick one of two" // @Success 200 {object} helper.MSPNormalizeHTTPResponseBody " " // @Failure 400 {object} helper.HTTPWrapErrorResponse // @Failure 500 {object} helper.HTTPWrapErrorResponse @@ -28,12 +28,12 @@ func (a *API) RevokeCerts(c *helper.HTTPWrapContext) (interface{}, error) { return "revoked", nil } -// RecoverCerts 恢复证书 +// RecoverCerts Restore certificate // @Tags Workload // @Summary (p3)Recover -// @Description 恢复证书 +// @Description Restore certificate // @Produce json -// @Param body body logic.RecoverCertsParams true "sn+aki / unique_id 二选一" +// @Param body body logic.RecoverCertsParams true "sn+aki / unique_id either-or" // @Success 200 {object} helper.MSPNormalizeHTTPResponseBody " " // @Failure 400 {object} helper.HTTPWrapErrorResponse // @Failure 500 {object} helper.HTTPWrapErrorResponse @@ -50,10 +50,10 @@ func (a *API) RecoverCerts(c *helper.HTTPWrapContext) (interface{}, error) { return "recovered", nil } -// ForbidNewCerts 禁止某个 UniqueID 申请证书 +// ForbidNewCerts Prohibit a uniqueID from requesting a certificate // @Tags Workload -// @Summary 禁止申请证书 -// @Description 禁止某个 UniqueID 申请证书 +// @Summary Application for certificate is prohibited +// @Description Prohibit a uniqueID from requesting a certificate // @Produce json // @Param body body logic.ForbidNewCertsParams true " " // @Success 200 {object} helper.MSPNormalizeHTTPResponseBody " " @@ -72,10 +72,10 @@ func (a *API) ForbidNewCerts(c *helper.HTTPWrapContext) (interface{}, error) { return "success", nil } -// RecoverForbidNewCerts 恢复允许某个 UniqueID 申请证书 +// RecoverForbidNewCerts Recovery allows a uniqueID to request a certificate // @Tags Workload -// @Summary 恢复申请证书 -// @Description 恢复允许某个 UniqueID 申请证书 +// @Summary Resume application certificate +// @Description Recovery allows a uniqueID to request a certificate // @Produce json // @Param body body logic.ForbidNewCertsParams true " " // @Success 200 {object} helper.MSPNormalizeHTTPResponseBody " " @@ -98,10 +98,10 @@ type ForbidUnitParams struct { UniqueID string `json:"unique_id" binding:"required"` } -// ForbidUnit 吊销并禁止服务证书 +// ForbidUnit Revoke and prohibit service certificates // @Tags Workload -// @Summary (p1)吊销并禁止服务证书 -// @Description 吊销并禁止服务证书 +// @Summary (p1)Revoke and prohibit service certificates +// @Description Revoke and prohibit service certificates // @Produce json // @Param json body ForbidUnitParams true " " // @Success 200 {object} helper.MSPNormalizeHTTPResponseBody " " @@ -116,26 +116,26 @@ func (a *API) ForbidUnit(c *helper.HTTPWrapContext) (interface{}, error) { UniqueIds: []string{req.UniqueID}, }) if err != nil { - a.logger.With("req", req).Errorf("禁止申请证书失败: %s", err) + a.logger.With("req", req).Errorf("Failed to prohibit certificate application: %s", err) return nil, err } - // 2021.04.15 (功能性调整) 证书启用/禁用影响证书通信 OCSP 认证、Sidecar mTLS 使用,不会吊销证书 + // 2021.04.15 (functional adjustment) certificate enabling and disabling will affect certificate communication, OCSP authentication and MTLs use, and the certificate will not be revoked // err = a.logic.RevokeCerts(&logic.RevokeCertsParams{ // UniqueId: req.UniqueID, // }) // if err != nil { - // a.logger.With("req", req).Errorf("吊销服务证书失败: %s", err) + // a.logger.With("req", req).Errorf("Revocation of service certificate failed: %s", err) // return nil, err // } return "success", nil } -// RecoverUnit 恢复并允许服务证书 +// RecoverUnit Restore and allow service certificates // @Tags Workload -// @Summary (p1)恢复并允许服务证书 -// @Description 恢复并允许服务证书 +// @Summary (p1)Restore and allow service certificates +// @Description Restore and allow service certificates // @Produce json // @Param json body ForbidUnitParams true " " // @Success 200 {object} helper.MSPNormalizeHTTPResponseBody " " @@ -150,7 +150,7 @@ func (a *API) RecoverUnit(c *helper.HTTPWrapContext) (interface{}, error) { UniqueIds: []string{req.UniqueID}, }) if err != nil { - a.logger.With("req", req).Errorf("恢复申请证书失败: %s", err) + a.logger.With("req", req).Errorf("Failed to restore the requested certificate: %s", err) return nil, err } @@ -158,7 +158,7 @@ func (a *API) RecoverUnit(c *helper.HTTPWrapContext) (interface{}, error) { // UniqueId: req.UniqueID, // }) // if err != nil { - // a.logger.With("req", req).Errorf("恢复服务证书失败: %s", err) + // a.logger.With("req", req).Errorf("Failed to restore service certificate: %s", err) // return nil, err // } diff --git a/api/v1/workload/workload.go b/api/v1/workload/workload.go index b8deef0..337611d 100644 --- a/api/v1/workload/workload.go +++ b/api/v1/workload/workload.go @@ -7,16 +7,16 @@ import ( "github.com/araddon/dateparse" "github.com/pkg/errors" "github.com/tal-tech/go-zero/core/fx" - v2log "gitlab.oneitfarm.com/bifrost/cilog/v2" + "github.com/ztalab/ZACA/pkg/logger" "go.uber.org/zap" "gorm.io/gorm" - "gitlab.oneitfarm.com/bifrost/capitalizone/api/helper" - "gitlab.oneitfarm.com/bifrost/capitalizone/core" - "gitlab.oneitfarm.com/bifrost/capitalizone/database/mysql/cfssl-model/dao" - "gitlab.oneitfarm.com/bifrost/capitalizone/database/mysql/cfssl-model/model" - "gitlab.oneitfarm.com/bifrost/capitalizone/logic/schema" - logic "gitlab.oneitfarm.com/bifrost/capitalizone/logic/workload" + "github.com/ztalab/ZACA/api/helper" + "github.com/ztalab/ZACA/core" + "github.com/ztalab/ZACA/database/mysql/cfssl-model/dao" + "github.com/ztalab/ZACA/database/mysql/cfssl-model/model" + "github.com/ztalab/ZACA/logic/schema" + logic "github.com/ztalab/ZACA/logic/workload" ) type API struct { @@ -27,31 +27,30 @@ type API struct { func NewAPI() *API { return &API{ logic: logic.NewLogic(), - logger: v2log.Named("api").SugaredLogger, + logger: logger.Named("api").SugaredLogger, } } -// CertList 证书列表 +// CertList Certificate list // @Tags Workload // @Summary (p3)List -// @Description 证书列表 +// @Description Certificate list // @Produce json -// @Param role query string false "证书类型 gateway/sidecar/standalone" -// @Param unique_id query string false "根据UniqueID查询" -// @Param cert_sn query string false "根据证书序列号查询" -// @Param status query string false "证书状态 good/revoked" -// @Param order query string false "排序,默认 issued_at desc" -// @Param expiry_start_time query string false "过期, 起始时间点" -// @Param expiry_end_time query string false "过期, 结束时间点" -// @Param limit_num query int false "分页参数, 默认 20" -// @Param page query int false "页数, 默认 1" +// @Param role query string false "Certificate type default" +// @Param unique_id query string false "Query by unique ID" +// @Param cert_sn query string false "Query by certificate serial number" +// @Param status query string false "Certificate status good/revoked" +// @Param order query string false "Sort, default issued_at desc" +// @Param expiry_start_time query string false "Expiration, starting point" +// @Param expiry_end_time query string false "Expiration, end time point" +// @Param limit_num query int false "Paging parameters, default 20" +// @Param page query int false "Number of pages, default 1" // @Success 200 {object} helper.MSPNormalizeHTTPResponseBody{data=helper.MSPNormalizeList{list=[]schema.SampleCert}} " " // @Failure 400 {object} helper.HTTPWrapErrorResponse // @Failure 500 {object} helper.HTTPWrapErrorResponse // @Router /workload/certs [get] func (a *API) CertList(c *helper.HTTPWrapContext) (interface{}, error) { var req = struct { - // 查询条件 Role string `form:"role"` UniqueID string `form:"unique_id"` Status string `form:"status"` @@ -91,20 +90,19 @@ func (a *API) CertList(c *helper.HTTPWrapContext) (interface{}, error) { return result, nil } -// CertDetail 证书详情 +// CertDetail Certificate details // @Tags Workload // @Summary Detail -// @Description 证书详情 +// @Description Certificate details // @Produce json -// @Param sn query string true "证书 sn" -// @Param aki query string true "证书 aki" +// @Param sn query string true "Certificate sn" +// @Param aki query string true "Certificate aki" // @Success 200 {object} helper.MSPNormalizeHTTPResponseBody{data=schema.FullCert} " " // @Failure 400 {object} helper.HTTPWrapErrorResponse // @Failure 500 {object} helper.HTTPWrapErrorResponse // @Router /workload/cert [get] func (a *API) CertDetail(c *helper.HTTPWrapContext) (interface{}, error) { var req struct { - // 查询条件 SN string `form:"sn" binding:"required"` AKI string `form:"aki" binding:"required"` } @@ -129,19 +127,18 @@ func (a *API) CertDetail(c *helper.HTTPWrapContext) (interface{}, error) { return result, nil } -// UnitsForbidQuery 查询 unique_id 是否被禁止申请证书 +// UnitsForbidQuery Query unique_ Is ID prohibited from applying for certificate // @Tags Workload -// @Summary 禁止申请证书查询 -// @Description 查询 unique_id 是否被禁止申请证书 +// @Summary Prohibit applying for certificate query +// @Description Query unique_id Is it forbidden to apply for certificate // @Produce json -// @Param unique_ids query []string true "查询 unique_id 数组" collectionFormat(multi) +// @Param unique_ids query []string true "Query unique_ID array" collectionFormat(multi) // @Success 200 {object} helper.MSPNormalizeHTTPResponseBody{data=logic.UnitsForbidQueryResult} " " // @Failure 400 {object} helper.HTTPWrapErrorResponse // @Failure 500 {object} helper.HTTPWrapErrorResponse // @Router /workload/units_forbid_query [get] func (a *API) UnitsForbidQuery(c *helper.HTTPWrapContext) (interface{}, error) { var req struct { - // 查询条件 UniqueIds []string `form:"unique_ids" binding:"required"` } c.BindG(&req) @@ -159,12 +156,12 @@ type UnitsStatusReq struct { UniqueIds []string `json:"unique_ids" binding:"required"` } -// UnitsStatus 服务对应状态查询 +// UnitsStatus Service corresponding status query // @Tags Workload -// @Summary (p1)服务对应状态查询 -// @Description 服务对应状态查询 +// @Summary (p1)Service corresponding status query +// @Description Service corresponding status query // @Produce json -// @Param json body UnitsStatusReq true "查询 unique_id 数组" +// @Param json body UnitsStatusReq true "Query unique_ID array" // @Success 200 {object} helper.MSPNormalizeHTTPResponseBody{data=object} " " // @Failure 400 {object} helper.HTTPWrapErrorResponse // @Failure 500 {object} helper.HTTPWrapErrorResponse @@ -216,7 +213,7 @@ func (a *API) getUnitsStatus(uniqueIds []string) (UnitsStatusMap, error) { var list []*model.Certificates if err := query.Find(&list).Error; err != nil && !errors.Is(err, gorm.ErrRecordNotFound) { - a.logger.Errorf("数据库查询错误: %s", err) + a.logger.Errorf("Database query error: %s", err) return nil, err } @@ -245,26 +242,25 @@ type UnitsCertsItem struct { Forbidden bool `json:"forbidden"` } -// UnitsCertsList 服务证书列表 +// UnitsCertsList List of service certificates // Deprecated // @Tags Workload -// @Summary (p1)服务证书列表 -// @Description 服务证书列表 +// @Summary (p1)List of service certificates +// @Description List of service certificates // @Produce json -// @Param unique_id query string false "查询 unique_id" -// @Param role query string false "证书类型" -// @Param expiry_start_time query string false "过期, 起始时间点" -// @Param expiry_end_time query string false "过期, 结束时间点" -// @Param is_forbid query int false "是否禁用, 1禁用 2启用" -// @Param limit_num query int false "分页参数, 默认 20" -// @Param page query int false "页数, 默认 1" +// @Param unique_id query string false "Query unique_id" +// @Param role query string false "Certificate type" +// @Param expiry_start_time query string false "Expiration, starting point" +// @Param expiry_end_time query string false "Expiration, end time point" +// @Param is_forbid query int false "Disable, 1 disable, 2 enable" +// @Param limit_num query int false "Paging parameters, default 20" +// @Param page query int false "Number of pages, default 1" // @Success 200 {object} helper.MSPNormalizeHTTPResponseBody{data=[]UnitsCertsItem} " " // @Failure 400 {object} helper.HTTPWrapErrorResponse // @Failure 500 {object} helper.HTTPWrapErrorResponse // @Router /workload/units_certs_list [get] func (a *API) UnitsCertsList(c *helper.HTTPWrapContext) (interface{}, error) { var req = struct { - // 查询条件 UniqueID string `form:"unique_id"` Role string `form:"role"` ExpiryStartTime string `form:"expiry_start_time"` @@ -298,7 +294,7 @@ func (a *API) UnitsCertsList(c *helper.HTTPWrapContext) (interface{}, error) { if req.ExpiryStartTime != "" { date, err := dateparse.ParseAny(req.ExpiryStartTime) if err != nil { - return nil, errors.Wrap(err, "过期起始时间错误") + return nil, errors.Wrap(err, "Expiration start time error") } query = query.Where("expiry > ?", date) expiryStartDate = &date @@ -307,7 +303,7 @@ func (a *API) UnitsCertsList(c *helper.HTTPWrapContext) (interface{}, error) { if req.ExpiryEndTime != "" { date, err := dateparse.ParseAny(req.ExpiryEndTime) if err != nil { - return nil, errors.Wrap(err, "过期结束时间错误") + return nil, errors.Wrap(err, "Expiration end time error") } query = query.Where("expiry < ?", date) expiryEndDate = &date @@ -325,7 +321,7 @@ func (a *API) UnitsCertsList(c *helper.HTTPWrapContext) (interface{}, error) { if len(uniqueIds) == 0 { list, total, err := dao.GetAllCertificates(query, req.Page, req.LimitNum, "common_name asc") if err != nil { - a.logger.Errorf("数据库查询错误: %s", err) + a.logger.Errorf("Database query error: %s", err) return nil, err } @@ -364,17 +360,17 @@ func (a *API) UnitsCertsList(c *helper.HTTPWrapContext) (interface{}, error) { list, _, err := dao.GetAllCertificates(query, 1, 100, "issued_at desc") if err != nil { - a.logger.Errorf("数据库查询错误: %s", err) + a.logger.Errorf("Database query error: %s", err) return nil, err } - a.logger.Debugf("返回证书数量: %v", len(list)) + a.logger.Debugf("Number of returned certificates: %v", len(list)) forbidMap, err := a.logic.UnitsForbidQuery(&logic.UnitsForbidQueryParams{ UniqueIds: uniqueIds, }) if err != nil { - a.logger.Errorf("服务禁止状态查询错误: %s", err) + a.logger.Errorf("Service prohibition status query error: %s", err) return nil, err } @@ -390,7 +386,7 @@ func (a *API) UnitsCertsList(c *helper.HTTPWrapContext) (interface{}, error) { fullCert, err := schema.GetFullCertByModelCert(row) if err != nil { - a.logger.Errorf("获取 full cert 错误: %s", err) + a.logger.Errorf("Get full cert error: %s", err) continue } unitsCertsMap[uid].Certs = append(unitsCertsMap[uid].Certs, fullCert) @@ -401,7 +397,7 @@ func (a *API) UnitsCertsList(c *helper.HTTPWrapContext) (interface{}, error) { result = append(result, v) } - a.logger.Debugf("返回服务数量: %v", len(result)) + a.logger.Debugf("Return service quantity: %v", len(result)) return helper.MSPNormalizeList{ List: result, @@ -414,9 +410,6 @@ func (a *API) UnitsCertsList(c *helper.HTTPWrapContext) (interface{}, error) { } //func getExpiryCountByDuration(sign string) (before, after time.Time, err error) { -// // 一周内 -// // 过期时间 - 当前时间 <= 一周 -// // 过期时间 <= 当前时间 + 一周 // expiryDate := func(du time.Duration) time.Time { // return time.Now().Add(du) // } diff --git a/api/v1/workload/workload_test.go b/api/v1/workload/workload_test.go deleted file mode 100644 index dccca72..0000000 --- a/api/v1/workload/workload_test.go +++ /dev/null @@ -1,23 +0,0 @@ -package workload - -import ( - "fmt" - "strconv" - "strings" - "testing" - - fuzz "github.com/google/gofuzz" -) - -func TestRandomUniqueIds(t *testing.T) { - fuzzer := fuzz.New() - - var arr []string - for i := 0; i < 5000; i++ { - var str int - fuzzer.Fuzz(&str) - arr = append(arr, strconv.Itoa(str)) - } - - fmt.Println(`""` + strings.Join(arr, `","`) + `""`) -} diff --git a/bin/capitalizone b/bin/capitalizone deleted file mode 100755 index e6ed68f..0000000 Binary files a/bin/capitalizone and /dev/null differ diff --git a/ca/certmanager/cleaner.go b/ca/certmanager/cleaner.go deleted file mode 100644 index c380db1..0000000 --- a/ca/certmanager/cleaner.go +++ /dev/null @@ -1,142 +0,0 @@ -package certmanager - -import ( - "time" - - mapset "github.com/deckarep/golang-set" - "github.com/pkg/errors" - "github.com/spf13/cast" - "github.com/tidwall/gjson" - "gitlab.oneitfarm.com/bifrost/capitalizone/database/mysql/cfssl-model/model" - "gitlab.oneitfarm.com/bifrost/capitalizone/pkg/caclient" - "gitlab.oneitfarm.com/bifrost/cfssl/hook" - v2log "gitlab.oneitfarm.com/bifrost/cilog/v2" - "go.uber.org/zap" - "gorm.io/gorm" - - "gitlab.oneitfarm.com/bifrost/capitalizone/core" - "gitlab.oneitfarm.com/bifrost/capitalizone/pkg/mesh" - "gitlab.oneitfarm.com/bifrost/capitalizone/util" -) - -// CertCleaner ... -type CertCleaner struct { - logger *zap.SugaredLogger - db *gorm.DB -} - -// NewCertCleaner ... -func NewCertCleaner() *CertCleaner { - return &CertCleaner{ - logger: v2log.Named("cleaner").SugaredLogger, - db: core.Is.Db, - } -} - -// AutoGC ... -func (cc *CertCleaner) AutoGC() { - cc.logger.Info("开启自动 GC") - t := time.NewTicker(time.Hour) - for { - cc.logger.Info("执行自动清理") - cc.GarbageCollect() - <-t.C - } -} - -// GarbageCollect ... -func (cc *CertCleaner) GarbageCollect() { - cc.logger.Debug("获取 Mesh 元数据") - body, err := mesh.GetAllDynamicServiceMetadataRaw() - if err != nil { - cc.logger.Errorf("获取 Mesh 元数据错误: %s", err) - return - } - cc.logger.Debugf("Mesh 元数据: %s", string(body)) - runtimeUniqueIDs := make([]string, 0) - for _, uid := range gjson.GetBytes(body, "data.list.#.unique_id").Array() { - runtimeUniqueIDs = append(runtimeUniqueIDs, uid.String()) - } - cc.logger.With("unique_ids", runtimeUniqueIDs).Info("清理下线 Sidecar 证书") - if err := cc.cleanDownSidecarCerts(runtimeUniqueIDs); err != nil { - cc.logger.Errorf("清理下线 Sidecar 证书: %s", err) - } - if err := cc.cleanRevokedSidecarCerts(); err != nil { - cc.logger.Errorf("清理 Sidecar 主动吊销证书: %s", err) - } -} - -// 若模式为 Vault 储存, 删除 vault 对应 KV -func (cc *CertCleaner) cleanDownSidecarCerts(runtimeUniqueIDs []string) error { - query := cc.db.Model(&model.Certificates{}). - Where("ca_label = ?", caclient.RoleSidecar). - Select("common_name"). - Group("common_name") - var certs []model.Certificates - if err := query.Find(&certs).Error; err != nil && !errors.Is(err, gorm.ErrRecordNotFound) { - return errors.Wrap(err, "证书查询错误") - } - dbUniqueIDs := make([]string, 0, len(certs)) - for _, cert := range certs { - dbUniqueIDs = append(dbUniqueIDs, cert.CommonName.String) - } - // cc.logger.With("unique_ids", dbUniqueIDs).Debug("db unique_ids") - - deleteIDs := cast.ToStringSlice( - mapset.NewSetFromSlice(util.StringSliceToInterfaceSlice(dbUniqueIDs)). - Difference(mapset.NewSetFromSlice(util.StringSliceToInterfaceSlice(runtimeUniqueIDs))). - ToSlice()) - - if len(deleteIDs) == 0 { - return nil - } - - deleteIDMap := make(map[string]bool, len(deleteIDs)) - for _, deleteID := range deleteIDs { - deleteIDMap[deleteID] = true - } - - if hook.EnableVaultStorage { - for _, certRow := range certs { - if _, ok := deleteIDMap[certRow.CaLabel.String]; ok { - if err := core.Is.VaultSecret.DeleteCertPEM(certRow.SerialNumber); err != nil { - core.Is.Logger.Warnf("vault 删除错误: %s, sn: %s", err, certRow.SerialNumber) - } - } - } - } - - cc.logger.With("unique_ids", deleteIDs).Info("清理下线 Sidecar 证书") - if err := cc.db.Where("common_name IN (?)", deleteIDs).Delete(&model.Certificates{}).Error; err != nil { - return errors.Wrap(err, "证书批量删除出错") - } - - return nil -} - -// 若模式为 Vault 储存, 删除 vault 对应 KV -func (cc *CertCleaner) cleanRevokedSidecarCerts() error { - query := cc.db.Where("ca_label = ?", caclient.RoleSidecar). - Where("status = ?", "revoked"). - Where("reason = ?", 1) - var certs []model.Certificates - if err := query.Find(&certs).Error; err != nil && !errors.Is(err, gorm.ErrRecordNotFound) { - return errors.Wrap(err, "证书查询错误") - } - - if hook.EnableVaultStorage { - for _, certRow := range certs { - if err := core.Is.VaultSecret.DeleteCertPEM(certRow.SerialNumber); err != nil { - core.Is.Logger.Warnf("vault 删除错误: %s, sn: %s", err, certRow.SerialNumber) - } - } - } - - err := query. - Delete(&model.Certificates{}).Error - if err != nil { - return errors.Wrap(err, "证书批量删除出错") - } - cc.logger.Info("清理 Sidecar 主动吊销证书") - return nil -} diff --git a/ca/datastore/datastore.go b/ca/datastore/datastore.go index cfe8426..a15cc3f 100644 --- a/ca/datastore/datastore.go +++ b/ca/datastore/datastore.go @@ -1,13 +1,13 @@ -// Package datastore 数据储存 +// Package datastore Data storage package datastore import ( "errors" + "github.com/ztalab/ZACA/pkg/logger" - "gitlab.oneitfarm.com/bifrost/capitalizone/core" - "gitlab.oneitfarm.com/bifrost/capitalizone/database/mysql/cfssl-model/model" - "gitlab.oneitfarm.com/bifrost/capitalizone/pkg/vaultsecret" - v2 "gitlab.oneitfarm.com/bifrost/cilog/v2" + "github.com/ztalab/ZACA/core" + "github.com/ztalab/ZACA/database/mysql/cfssl-model/model" + "github.com/ztalab/ZACA/pkg/vaultsecret" "go.uber.org/zap" "gorm.io/gorm" ) @@ -22,7 +22,7 @@ const ( PolicyMixed = "mix" ) -// DataStorer 数据储存 +// DataStorer Data storage type DataStorer struct { logger *zap.SugaredLogger db *gorm.DB @@ -33,14 +33,14 @@ type DataStorer struct { // DefaultDataStorer ... func DefaultDataStorer() *DataStorer { return &DataStorer{ - logger: v2.S().Named("datastore"), + logger: logger.S().Named("datastore"), db: core.Is.Db, vaultSecret: core.Is.VaultSecret, policy: PolicyMixed, } } -// GetWorkloadCertPEM 根据 SN 获取 Workload 证书 +// GetWorkloadCertPEM Obtain workload certificate according to SN func (ds *DataStorer) GetWorkloadCertPEM(sn string) ([]byte, error) { getFromDB := func() ([]byte, error) { certModel := new(model.Certificates) diff --git a/ca/datastore/migration.go b/ca/datastore/migration.go index d66aebe..b18efe5 100644 --- a/ca/datastore/migration.go +++ b/ca/datastore/migration.go @@ -1,17 +1,17 @@ package datastore import ( + "github.com/ztalab/ZACA/pkg/logger" "time" - "gitlab.oneitfarm.com/bifrost/capitalizone/core" - "gitlab.oneitfarm.com/bifrost/capitalizone/database/mysql/cfssl-model/model" - "gitlab.oneitfarm.com/bifrost/capitalizone/pkg/vaultsecret" - v2 "gitlab.oneitfarm.com/bifrost/cilog/v2" + "github.com/ztalab/ZACA/core" + "github.com/ztalab/ZACA/database/mysql/cfssl-model/model" + "github.com/ztalab/ZACA/pkg/vaultsecret" ) -// RunMigration 迁移 MySQL 数据到 Vault +// RunMigration Migrate MySQL data to vault func RunMigration() { - v2.Debug("MySQL -> Vault 数据库迁移") + logger.Debug("MySQL -> Vault Database migration") certRows := make([]*model.Certificates, 0) result := core.Is.Db.Model(&model.Certificates{}).Where("expiry > ? AND revoked_at is NULL", time.Now()).Limit(10000). Find(&certRows) @@ -20,28 +20,28 @@ func RunMigration() { continue } if pemStr, err := core.Is.VaultSecret.GetCertPEM(row.SerialNumber); err != nil || *pemStr == "" { - core.Is.Logger.Debugf("Vault 迁移 %s", row.SerialNumber) + core.Is.Logger.Debugf("Vault Transfer %s", row.SerialNumber) if err := core.Is.VaultSecret.StoreCertPEM(row.SerialNumber, row.Pem); err != nil { - core.Is.Logger.Errorf("Vault store cert %s 错误: %s", row.SerialNumber, err) + core.Is.Logger.Errorf("Vault store cert %s Error: %s", row.SerialNumber, err) } } } if result.Error != nil { - core.Is.Logger.Errorf("迁移 MySQL 到 Vault 错误: %s", result.Error) + core.Is.Logger.Errorf("Error migrating Mysql to vault: %s", result.Error) } caKeyPair := new(model.SelfKeypair) if err := core.Is.Db.Model(&model.SelfKeypair{}).Where("name = ?", "ca").First(caKeyPair).Error; err == nil { if err := core.Is.VaultSecret.StoreCertPEMKey(vaultsecret.CALocalStoreKey, caKeyPair.Certificate.String, caKeyPair.PrivateKey.String); err != nil { - core.Is.Logger.Errorf("Vault ca cert 储存错误: %s", err) + core.Is.Logger.Errorf("Vault ca cert Storage error: %s", err) } } trustKeyPair := new(model.SelfKeypair) if err := core.Is.Db.Model(&model.SelfKeypair{}).Where("name = ?", "trust").First(trustKeyPair).Error; err == nil { if err := core.Is.VaultSecret.StoreCertPEM(vaultsecret.CATructCertsKey, trustKeyPair.Certificate.String); err != nil { - core.Is.Logger.Errorf("Vault trust cert 储存错误: %s", err) + core.Is.Logger.Errorf("Vault trust cert Storage error: %s", err) } } } diff --git a/ca/keymanager/csr_templates.go b/ca/keymanager/csr_templates.go index 6d9fa03..8440cd8 100644 --- a/ca/keymanager/csr_templates.go +++ b/ca/keymanager/csr_templates.go @@ -1,8 +1,8 @@ package keymanager import ( - "gitlab.oneitfarm.com/bifrost/capitalizone/core" - "gitlab.oneitfarm.com/bifrost/cfssl/csr" + "github.com/ztalab/ZACA/core" + "github.com/ztalab/cfssl/csr" ) // getRootCSRTemplate Root CA @@ -21,12 +21,12 @@ var getRootCSRTemplate = func() *csr.CertificateRequest { } } -// getIntermediateCSRTemplate 中间 CA 模板 +// getIntermediateCSRTemplate var getIntermediateCSRTemplate = func() *csr.CertificateRequest { return &csr.CertificateRequest{ Names: []csr.Name{ { - O: core.Is.Config.Keymanager.CsrTemplates.IntermediateCa.O, // 从配置文件获取 + O: core.Is.Config.Keymanager.CsrTemplates.IntermediateCa.O, OU: core.Is.Config.Keymanager.CsrTemplates.IntermediateCa.Ou, }, }, diff --git a/ca/keymanager/keeper.go b/ca/keymanager/keeper.go index 04024a5..203fb2d 100644 --- a/ca/keymanager/keeper.go +++ b/ca/keymanager/keeper.go @@ -10,17 +10,17 @@ import ( jsoniter "github.com/json-iterator/go" "github.com/pkg/errors" - "gitlab.oneitfarm.com/bifrost/capitalizone/core" - "gitlab.oneitfarm.com/bifrost/capitalizone/database/mysql/cfssl-model/model" - "gitlab.oneitfarm.com/bifrost/capitalizone/logic/schema" - "gitlab.oneitfarm.com/bifrost/capitalizone/pkg/influxdb" - "gitlab.oneitfarm.com/bifrost/capitalizone/pkg/vaultsecret" - cfssl_client "gitlab.oneitfarm.com/bifrost/cfssl/api/client" - "gitlab.oneitfarm.com/bifrost/cfssl/helpers" - "gitlab.oneitfarm.com/bifrost/cfssl/hook" - "gitlab.oneitfarm.com/bifrost/cfssl/info" - v2log "gitlab.oneitfarm.com/bifrost/cilog/v2" - "gitlab.oneitfarm.com/bifrost/go-toolbox/memorycacher" + "github.com/ztalab/ZACA/core" + "github.com/ztalab/ZACA/database/mysql/cfssl-model/model" + "github.com/ztalab/ZACA/logic/schema" + "github.com/ztalab/ZACA/pkg/influxdb" + "github.com/ztalab/ZACA/pkg/logger" + "github.com/ztalab/ZACA/pkg/memorycacher" + "github.com/ztalab/ZACA/pkg/vaultsecret" + cfssl_client "github.com/ztalab/cfssl/api/client" + "github.com/ztalab/cfssl/helpers" + "github.com/ztalab/cfssl/hook" + "github.com/ztalab/cfssl/info" "gorm.io/gorm" ) @@ -28,12 +28,11 @@ import ( type Keeper struct { DB *gorm.DB cache *memorycacher.Cache - logger *v2log.Logger + logger *logger.Logger RootClient UpperClients } var ( - // Std 单例 Std *Keeper ) @@ -60,11 +59,11 @@ func InitKeeper() error { rootClients, err = NewUpperClients(core.Is.Config.Keymanager.UpperCa) } if err != nil { - return errors.Wrap(err, "upper client 创建错误") + return errors.Wrap(err, "upper client Create error") } Std = &Keeper{ DB: db, - logger: v2log.Named("keeper"), + logger: logger.Named("keeper"), cache: memorycacher.New(time.Hour, memorycacher.NoExpiration, math.MaxInt64), RootClient: rootClients, } @@ -75,7 +74,7 @@ func InitKeeper() error { func GetKeeper() *Keeper { defer func() { if err := recover(); err != nil { - v2log.Named("keeper").Fatal("未初始化") + logger.Named("keeper").Fatal("Uninitialized") } }() return Std @@ -88,10 +87,10 @@ func (k *Keeper) GetDBSelfKeyPairPEM() (key, cert []byte, err error) { err = k.DB.Where("name = ?", SelfKeyPairName).Order("id desc").First(keyPair).Error if err != nil { if errors.Is(err, gorm.ErrRecordNotFound) { - k.logger.Warn("self 密钥与证书 not found") + k.logger.Warn("self Keys and certificates not found") return nil, nil, err } - k.logger.Errorf("self-pair 查询错误: %v", err) + k.logger.Errorf("self-pair query error: %v", err) return nil, nil, err } if keyPair.PrivateKey.Valid { @@ -105,10 +104,10 @@ func (k *Keeper) GetDBSelfKeyPairPEM() (key, cert []byte, err error) { if hook.EnableVaultStorage { certStr, keyStr, err := core.Is.VaultSecret.GetCertPEMKey(vaultsecret.CALocalStoreKey) if err != nil { - k.logger.Errorf("vault 密钥与证书读取错误: %s", err) + k.logger.Errorf("vault Key and certificate read error: %s", err) return nil, nil, err } - core.Is.Logger.With("key", keyStr, "cert", certStr).Debugf("Vault 获取 CA KEYPAIR") + core.Is.Logger.With("key", keyStr, "cert", certStr).Debugf("Vault CA KEYPAIR") key = []byte(*keyStr) cert = []byte(*certStr) } @@ -120,12 +119,12 @@ func (k *Keeper) GetDBSelfKeyPairPEM() (key, cert []byte, err error) { func (k *Keeper) GetCachedTLSKeyPair() (*tls.Certificate, error) { keyPEM, certPEM, err := k.GetCachedSelfKeyPairPEM() if err != nil { - k.logger.Errorf("tls.Cert 获取出错: %v", err) + k.logger.Errorf("tls.Cert Get error: %v", err) return nil, err } cert, err := tls.X509KeyPair(certPEM, keyPEM) if err != nil { - k.logger.Errorf("tls.X509 出错: %v", err) + k.logger.Errorf("tls.X509 error: %v", err) return nil, err } return &cert, nil @@ -150,19 +149,19 @@ func (k *Keeper) GetCachedSelfKeyPair() (key crypto.Signer, cert *x509.Certifica keyPEM, certPEM, err := k.GetCachedSelfKeyPairPEM() if err != nil { - k.logger.Errorf("获取 cache keypair PEM 出错: %v", err) + k.logger.Errorf("Error getting cache keypair PEM: %v", err) return } priv, err := helpers.ParsePrivateKeyPEM(keyPEM) if err != nil { - k.logger.With("key", string(keyPEM)).Errorf("证书 KEY 解析错误: %v", err) + k.logger.With("key", string(keyPEM)).Errorf("Certificate key parsing error: %v", err) return } key = priv cert, err = helpers.ParseCertificatePEM(certPEM) if err != nil { - k.logger.With("cert", string(certPEM)).Errorf("证书 PEM 解析错误: %v", err) + k.logger.With("cert", string(certPEM)).Errorf("Certificate PEM parsing error: %v", err) return } @@ -204,23 +203,21 @@ func (k *Keeper) SetKeyPairPEM(key, cert []byte) error { UpdatedAt: time.Now(), } if hook.EnableVaultStorage { - // 只允许数据库储存 Cert - // Key 只允许储存在 Vault 总 keyPair.PrivateKey = sql.NullString{String: "", Valid: true} if err := core.Is.VaultSecret.StoreCertPEMKey(vaultsecret.CALocalStoreKey, string(cert), string(key)); err != nil { - k.logger.Errorf("Vault 写入 ca local store 错误: %s", err) + k.logger.Errorf("Vault write CA local store error: %s", err) return err } } if err := k.DB.Create(keyPair).Error; err != nil { - k.logger.Errorf("数据库插入错误: %v", err) + k.logger.Errorf("Database insert error: %v", err) return err } k.cache.Flush() return nil } -// GetL3CachedTrustCerts 多级缓存信任证书, 进程内存 > DB > 远程 +// GetL3CachedTrustCerts Memory > multi level cache > remote process > certificate func (k *Keeper) GetL3CachedTrustCerts() (certs []*x509.Certificate, err error) { if cachedCerts, ok := k.cache.Get(cacheTrusts); ok { if v, ok := cachedCerts.([]*x509.Certificate); ok { @@ -236,45 +233,43 @@ func (k *Keeper) GetL3CachedTrustCerts() (certs []*x509.Certificate, err error) k.cache.SetDefault(cacheTrusts, certs) return certs, nil } - k.logger.Errorf("DB Trust 证书解析错误: %v", err) + k.logger.Errorf("DB Trust Certificate parsing error: %v", err) } if dbErr != nil && !errors.Is(dbErr, gorm.ErrRecordNotFound) { - k.logger.Errorf("DB 获取 Trust 证书错误: %v", err) + k.logger.Errorf("DB get trust certificate error: %v", err) } } if hook.EnableVaultStorage { certsPEM, err := core.Is.VaultSecret.GetCertPEM(vaultsecret.CATructCertsKey) if err != nil { - k.logger.Errorf("Vault 获取 Trust 证书错误: %s", err) + k.logger.Errorf("Vault get trust certificate error: %s", err) } certs, err := helpers.ParseCertificatesPEM([]byte(*certsPEM)) if err == nil { k.cache.SetDefault(cacheTrusts, certs) return certs, nil } - k.logger.Errorf("Vault Trust 证书解析错误: %v", err) + k.logger.Errorf("Vault Trust Certificate parsing error: %v", err) } certs, err = k.GetRemoteTrustCerts() if err != nil { - k.logger.Errorf("远程获取 Trust 证书出错: %v", err) + k.logger.Errorf("Error getting trust certificate remotely: %v", err) return nil, err } if len(certs) > 0 { - // 协程运行 - // TODO 定时获取最新 Remote Trust 证书插入到数据库 go func() { if err := k.saveTrustCerts(certs); err != nil { - k.logger.Errorf("certs 储存错误: %s", err) + k.logger.Errorf("certs Storage error: %s", err) } }() } return certs, nil } -// GetRemoteTrustCerts 获取远程信任证书 (包含 ROOT 证书, 中间 CA 证书) +// GetRemoteTrustCerts Obtain remote trust certificate (including root certificate and intermediate CA certificate) func (k *Keeper) GetRemoteTrustCerts() (certs []*x509.Certificate, err error) { if core.Is.Config.Keymanager.SelfSign { return @@ -293,7 +288,6 @@ func (k *Keeper) GetRemoteTrustCerts() (certs []*x509.Certificate, err error) { core.Is.Metrics.AddPoint(&influxdb.MetricsData{ Measurement: schema.MetricsUpperCaInfo, Fields: map[string]interface{}{ - // TODO 请求耗时 "trust_certs_num": len(infoResp.TrustCertificates) + 1, }, Tags: map[string]string{ @@ -306,7 +300,7 @@ func (k *Keeper) GetRemoteTrustCerts() (certs []*x509.Certificate, err error) { return nil }) if err != nil { - k.logger.Errorf("获取 Root 证书错误: %s", err) + k.logger.Errorf("Error getting root certificate: %s", err) return nil, err } @@ -316,7 +310,7 @@ func (k *Keeper) GetRemoteTrustCerts() (certs []*x509.Certificate, err error) { for _, certStr := range resp.TrustCertificates { cert, err := helpers.ParseCertificatePEM([]byte(certStr)) if err != nil { - k.logger.Errorf("ROOT 证书解析错误: %v", err) + k.logger.Errorf("ROOT Certificate parsing error: %v", err) return nil, err } certsMap[cert.SerialNumber.String()] = cert @@ -340,15 +334,15 @@ func (k *Keeper) saveTrustCerts(certs []*x509.Certificate) error { if hook.EnableVaultStorage { trustKeypair.Certificate = sql.NullString{String: "", Valid: true} if err := core.Is.VaultSecret.StoreCertPEM(vaultsecret.CATructCertsKey, string(certsPEM)); err != nil { - k.logger.Errorf("vault 储存 trust certs 错误: %s", err) + k.logger.Errorf("vault Error saving trust certs: %s", err) return err } } - // 这里插入而不是更新, 保证每次都有记录 + // Insert here instead of update to ensure that there are records every time if err := k.DB.Create(trustKeypair).Error; err != nil { - k.logger.Errorf("数据库插入错误: %v", err) + k.logger.Errorf("Database insert error: %v", err) return err } - k.logger.With("num", len(certs)).Infof("Trust 证书插入到数据库") + k.logger.With("num", len(certs)).Infof("Trust Insert certificate into database") return nil } diff --git a/ca/keymanager/remote_signer.go b/ca/keymanager/remote_signer.go index d4eebe7..22bb131 100644 --- a/ca/keymanager/remote_signer.go +++ b/ca/keymanager/remote_signer.go @@ -2,42 +2,42 @@ package keymanager import ( jsoniter "github.com/json-iterator/go" - cfssl_client "gitlab.oneitfarm.com/bifrost/cfssl/api/client" - "gitlab.oneitfarm.com/bifrost/cfssl/cli/genkey" - "gitlab.oneitfarm.com/bifrost/cfssl/csr" - "gitlab.oneitfarm.com/bifrost/cfssl/signer" - v2log "gitlab.oneitfarm.com/bifrost/cilog/v2" + "github.com/ztalab/ZACA/pkg/logger" + cfssl_client "github.com/ztalab/cfssl/api/client" + "github.com/ztalab/cfssl/cli/genkey" + "github.com/ztalab/cfssl/csr" + "github.com/ztalab/cfssl/signer" - "gitlab.oneitfarm.com/bifrost/capitalizone/core" + "github.com/ztalab/ZACA/core" ) // RemoteSigner ... type RemoteSigner struct { - logger *v2log.Logger + logger *logger.Logger } // NewRemoteSigner ... func NewRemoteSigner() *RemoteSigner { return &RemoteSigner{ - logger: v2log.Named("remote-signer"), + logger: logger.Named("remote-signer"), } } -// Run 调用远程 CA 签名证书并持久化储存 +//Run calls the remote CA to sign the certificate and persist it func (ss *RemoteSigner) Run() error { if core.Is.Config.Keymanager.SelfSign { return nil } key, cert, _ := GetKeeper().GetCachedSelfKeyPairPEM() if key != nil && cert != nil { - ss.logger.Info("证书已存在, 跳过远程签名过程") + ss.logger.Info("The certificate already exists. Skip the remote signing process") return nil } - ss.logger.Warn("没有证书, 即将远程签名证书") + ss.logger.Warn("There is no certificate. You will sign the certificate remotely") g := &csr.Generator{Validator: genkey.Validator} csrBytes, key, err := g.ProcessRequest(getIntermediateCSRTemplate()) if err != nil { - ss.logger.Errorf("key, csr 生产错误: %v", err) + ss.logger.Errorf("key, csr Production error: %v", err) return err } @@ -56,16 +56,14 @@ func (ss *RemoteSigner) Run() error { return nil }) if err != nil { - ss.logger.Errorf("initca 创建错误: %v", err) + ss.logger.Errorf("initca Create error: %v", err) return err } - ss.logger.With("key", string(key), "cert", string(cert)).Debugf("自签证书完成") + ss.logger.With("key", string(key), "cert", string(cert)).Debugf("Self signed certificate completed") if err = GetKeeper().SetKeyPairPEM(key, cert); err != nil { - ss.logger.Errorf("储存证书错误: %v", err) + ss.logger.Errorf("Error saving certificate: %v", err) return err } - // TODO 开启协程自动轮换证书 - return nil } diff --git a/ca/keymanager/self_signer.go b/ca/keymanager/self_signer.go index 651b3de..5696e60 100644 --- a/ca/keymanager/self_signer.go +++ b/ca/keymanager/self_signer.go @@ -1,42 +1,40 @@ package keymanager import ( - "gitlab.oneitfarm.com/bifrost/cfssl/initca" - v2log "gitlab.oneitfarm.com/bifrost/cilog/v2" + "github.com/ztalab/ZACA/pkg/logger" + "github.com/ztalab/cfssl/initca" ) // SelfSigner ... type SelfSigner struct { - logger *v2log.Logger + logger *logger.Logger } // NewSelfSigner ... func NewSelfSigner() *SelfSigner { return &SelfSigner{ - logger: v2log.Named("self-signer"), + logger: logger.Named("self-signer"), } } -// Run 自签名证书并储存 +// Run Self signed certificate and saved func (ss *SelfSigner) Run() error { key, cert, _ := GetKeeper().GetCachedSelfKeyPairPEM() if key != nil && cert != nil { - ss.logger.Info("证书已存在, 跳过自签名过程") + ss.logger.Info("The certificate already exists. Skip the self signing process") return nil } - ss.logger.Warn("没有证书, 即将自签名证书") + ss.logger.Warn("No certificate, self signed certificate") cert, _, key, err := initca.New(getRootCSRTemplate()) if err != nil { - ss.logger.Errorf("initca 创建错误: %v", err) + ss.logger.Errorf("initca Create error: %v", err) return err } - ss.logger.With("key", string(key), "cert", string(cert)).Debugf("自签证书完成") + ss.logger.With("key", string(key), "cert", string(cert)).Debugf("Self signed certificate completed") if err = GetKeeper().SetKeyPairPEM(key, cert); err != nil { - ss.logger.Errorf("储存证书错误: %v", err) + ss.logger.Errorf("Error saving certificate: %v", err) return err } - // TODO 开启协程自动轮换证书 - return nil } diff --git a/ca/keymanager/upper_client.go b/ca/keymanager/upper_client.go index 0107f77..69afb45 100644 --- a/ca/keymanager/upper_client.go +++ b/ca/keymanager/upper_client.go @@ -5,13 +5,13 @@ import ( "net/url" "github.com/pkg/errors" - "gitlab.oneitfarm.com/bifrost/cfssl/api/client" - "gitlab.oneitfarm.com/bifrost/cfssl/auth" - v2log "gitlab.oneitfarm.com/bifrost/cilog/v2" + "github.com/ztalab/ZACA/pkg/logger" + "github.com/ztalab/cfssl/api/client" + "github.com/ztalab/cfssl/auth" "go.uber.org/multierr" "go.uber.org/zap" - "gitlab.oneitfarm.com/bifrost/capitalizone/core" + "github.com/ztalab/ZACA/core" ) type UpperClients interface { @@ -27,7 +27,7 @@ type upperClients struct { func (uc *upperClients) DoWithRetry(f func(*client.AuthRemote) error) error { if len(uc.clients) == 0 { - return errors.New("没有可用客户端") + return errors.New("No clients available") } var errGroup error for _, upperClient := range uc.clients { @@ -36,7 +36,7 @@ func (uc *upperClients) DoWithRetry(f func(*client.AuthRemote) error) error { // success return nil } - uc.logger.With("upper", upperClient.Hosts()).Warnf("upper ca 执行错误: %s", err) + uc.logger.With("upper", upperClient.Hosts()).Warnf("upper ca Execution error: %s", err) multierr.AppendInto(&errGroup, err) } return errGroup @@ -48,26 +48,26 @@ func (uc *upperClients) AllClients() map[string]*client.AuthRemote { func NewUpperClients(adds []string) (UpperClients, error) { if len(adds) == 0 { - return nil, errors.New("Upper CA 地址配置错误") + return nil, errors.New("Upper CA Address configuration error") } ap, err := auth.New(core.Is.Config.Singleca.CfsslConfig.AuthKeys["intermediate"].Key, nil) if err != nil { - return nil, errors.Wrap(err, "Auth key 配置错误") + return nil, errors.Wrap(err, "Auth key Configuration error") } clients := make(map[string]*client.AuthRemote) for _, addr := range adds { upperAddr, err := url.Parse(addr) if err != nil { - return nil, errors.Wrap(err, "Upper CA 地址解析错误") + return nil, errors.Wrap(err, "Upper CA Address resolution error") } upperClient := client.NewAuthServer(addr, &tls.Config{ InsecureSkipVerify: true, //nolint:gosec }, ap) clients[upperAddr.Host] = upperClient } - v2log.Infof("Upper CA Client 数量: %v", len(clients)) + logger.Infof("Upper CA Client Quantity: %v", len(clients)) return &upperClients{ clients: clients, - logger: v2log.Named("upperca").SugaredLogger, + logger: logger.Named("upperca").SugaredLogger, }, nil } diff --git a/ca/ocsp/metrics.go b/ca/ocsp/metrics.go index 72e820d..c494a1f 100644 --- a/ca/ocsp/metrics.go +++ b/ca/ocsp/metrics.go @@ -4,9 +4,9 @@ import ( "sync/atomic" "time" - "gitlab.oneitfarm.com/bifrost/capitalizone/core" - "gitlab.oneitfarm.com/bifrost/capitalizone/logic/schema" - "gitlab.oneitfarm.com/bifrost/capitalizone/pkg/influxdb" + "github.com/ztalab/ZACA/core" + "github.com/ztalab/ZACA/logic/schema" + "github.com/ztalab/ZACA/pkg/influxdb" ) var ( diff --git a/ca/ocsp/source.go b/ca/ocsp/source.go index de8ab3d..4e5f21a 100644 --- a/ca/ocsp/source.go +++ b/ca/ocsp/source.go @@ -7,18 +7,18 @@ import ( "time" "github.com/pkg/errors" - "gitlab.oneitfarm.com/bifrost/cfssl/helpers" - "gitlab.oneitfarm.com/bifrost/cfssl/hook" - "gitlab.oneitfarm.com/bifrost/cfssl/ocsp" - v2log "gitlab.oneitfarm.com/bifrost/cilog/v2" - "gitlab.oneitfarm.com/bifrost/go-toolbox/memorycacher" + "github.com/ztalab/ZACA/pkg/logger" + "github.com/ztalab/ZACA/pkg/memorycacher" + "github.com/ztalab/cfssl/helpers" + "github.com/ztalab/cfssl/hook" + "github.com/ztalab/cfssl/ocsp" "go.uber.org/zap" stdocsp "golang.org/x/crypto/ocsp" "gorm.io/gorm" - "gitlab.oneitfarm.com/bifrost/capitalizone/core" - "gitlab.oneitfarm.com/bifrost/capitalizone/database/mysql/cfssl-model/model" - "gitlab.oneitfarm.com/bifrost/capitalizone/logic/events" + "github.com/ztalab/ZACA/core" + "github.com/ztalab/ZACA/database/mysql/cfssl-model/model" + "github.com/ztalab/ZACA/logic/events" ) const ( @@ -39,7 +39,7 @@ var CertStatusIntMap = map[string]int{ CertStatusOCSPSignError: 502, } -// SharedSources 进程 Cache 保障高效访问, 后续若访问量大可以考虑 Redis +// SharedSources type SharedSources struct { DB *gorm.DB Cache *memorycacher.Cache @@ -55,13 +55,13 @@ func NewSharedSources(signer ocsp.Signer) (*SharedSources, error) { cacheTime := time.Duration(core.Is.Config.Ocsp.CacheTime) return &SharedSources{ DB: core.Is.Db, - Logger: v2log.Named("ocsp-ss").SugaredLogger, + Logger: logger.Named("ocsp-ss").SugaredLogger, Cache: memorycacher.New(cacheTime*time.Minute, memorycacher.NoExpiration, math.MaxInt64), OcspSigner: signer, }, nil } -// Response 查询 DB 返回 OCSP 数据结构 +// Response func (ss *SharedSources) Response(req *stdocsp.Request) ([]byte, http.Header, error) { if req == nil { return nil, nil, errors.New("called with nil request") @@ -77,32 +77,30 @@ func (ss *SharedSources) Response(req *stdocsp.Request) ([]byte, http.Header, er if cachedResp, ok := ss.Cache.Get(strSN + aki); ok { if resp, ok := cachedResp.([]byte); ok { - ss.Logger.With("sn", strSN, "aki", aki).Debugf("ocspResp cache 击中") - // TODO 获取 UniqueID + ss.Logger.With("sn", strSN, "aki", aki).Debugf("ocspResp cache") AddMetricsPoint("", true, CertStatusUnknown) return resp, nil, nil } - ss.Logger.With("sn", strSN, "aki", aki).Errorf("cache 值解析错误") + ss.Logger.With("sn", strSN, "aki", aki).Errorf("cache Value parsing error") } - // 数据库查询 + // Database query certRecord := &model.Certificates{} if err := ss.DB.Where("serial_number = ? AND authority_key_identifier = ?", strSN, aki).First(certRecord).Error; err != nil { if errors.Is(err, gorm.ErrRecordNotFound) { - ss.Logger.With("sn", strSN, "aki", aki).Warnw("证书不存在") + ss.Logger.With("sn", strSN, "aki", aki).Warnw("Certificate does not exist") AddMetricsPoint("", false, CertStatusNotFound) return nil, nil, ocsp.ErrNotFound } - ss.Logger.With("sn", strSN, "aki", aki).Errorf("证书获取错误: %v", err) + ss.Logger.With("sn", strSN, "aki", aki).Errorf("Certificate acquisition error: %v", err) AddMetricsPoint("", false, CertStatusServerError) return nil, nil, errors.Wrap(err, "server error") } - // 从 vault 获取证书 PEM if hook.EnableVaultStorage { pem, err := core.Is.VaultSecret.GetCertPEM(strSN) if err != nil { - ss.Logger.With("sn", strSN, "aki", aki).Warnf("Vault 获取错误: %v", err) + ss.Logger.With("sn", strSN, "aki", aki).Warnf("Vault Get error: %v", err) } else { certRecord.Pem = *pem } @@ -110,7 +108,7 @@ func (ss *SharedSources) Response(req *stdocsp.Request) ([]byte, http.Header, er cert, err := helpers.ParseCertificatePEM([]byte(certRecord.Pem)) if err != nil { - ss.Logger.With("sn", strSN, "aki", aki).Errorf("证书 PEM 解析错误: %v", err) + ss.Logger.With("sn", strSN, "aki", aki).Errorf("Certificate PEM parsing error: %v", err) AddMetricsPoint("", false, CertStatusCertParseError) return nil, nil, errors.Wrap(err, "cert err") } @@ -124,7 +122,7 @@ func (ss *SharedSources) Response(req *stdocsp.Request) ([]byte, http.Header, er ocspResp, err := ss.OcspSigner.Sign(*signReq) if err != nil { - ss.Logger.With("sn", strSN, "aki", aki).Errorf("OCSP Sign 错误: %v", err) + ss.Logger.With("sn", strSN, "aki", aki).Errorf("OCSP Sign error: %v", err) AddMetricsPoint(cert.Subject.CommonName, false, CertStatusOCSPSignError) return nil, nil, errors.Wrap(err, "internal err") } @@ -137,7 +135,7 @@ func (ss *SharedSources) Response(req *stdocsp.Request) ([]byte, http.Header, er ss.Cache.SetDefault(strSN+aki, ocspResp) - ss.Logger.With("sn", strSN, "aki", aki).Infof("OCSP 签名完成") + ss.Logger.With("sn", strSN, "aki", aki).Infof("OCSP Signature Complete") AddMetricsPoint(cert.Subject.CommonName, false, CertStatusGood) return ocspResp, nil, nil diff --git a/ca/revoke/metrics.go b/ca/revoke/metrics.go index c5b3396..6b3ca1b 100644 --- a/ca/revoke/metrics.go +++ b/ca/revoke/metrics.go @@ -5,9 +5,9 @@ import ( "sync/atomic" "time" - "gitlab.oneitfarm.com/bifrost/capitalizone/core" - "gitlab.oneitfarm.com/bifrost/capitalizone/logic/schema" - "gitlab.oneitfarm.com/bifrost/capitalizone/pkg/influxdb" + "github.com/ztalab/ZACA/core" + "github.com/ztalab/ZACA/logic/schema" + "github.com/ztalab/ZACA/pkg/influxdb" ) var overallRevokeCounter uint64 diff --git a/ca/revoke/revoke.go b/ca/revoke/revoke.go index b5b8026..d294ea5 100644 --- a/ca/revoke/revoke.go +++ b/ca/revoke/revoke.go @@ -7,28 +7,27 @@ import ( "io/ioutil" "net/http" - "gitlab.oneitfarm.com/bifrost/cfssl/api" - "gitlab.oneitfarm.com/bifrost/cfssl/certdb" - cf_err "gitlab.oneitfarm.com/bifrost/cfssl/errors" - "gitlab.oneitfarm.com/bifrost/cfssl/helpers" - "gitlab.oneitfarm.com/bifrost/cfssl/hook" - "gitlab.oneitfarm.com/bifrost/cfssl/ocsp" - v2log "gitlab.oneitfarm.com/bifrost/cilog/v2" + "github.com/ztalab/ZACA/pkg/logger" + "github.com/ztalab/cfssl/api" + "github.com/ztalab/cfssl/certdb" + cf_err "github.com/ztalab/cfssl/errors" + "github.com/ztalab/cfssl/helpers" + "github.com/ztalab/cfssl/hook" + "github.com/ztalab/cfssl/ocsp" "gorm.io/gorm" - "gitlab.oneitfarm.com/bifrost/capitalizone/core" - "gitlab.oneitfarm.com/bifrost/capitalizone/database/mysql/cfssl-model/model" - "gitlab.oneitfarm.com/bifrost/capitalizone/logic/events" - "gitlab.oneitfarm.com/bifrost/capitalizone/pkg/caclient" - "gitlab.oneitfarm.com/bifrost/capitalizone/pkg/signature" - "gitlab.oneitfarm.com/bifrost/capitalizone/util" + "github.com/ztalab/ZACA/core" + "github.com/ztalab/ZACA/database/mysql/cfssl-model/model" + "github.com/ztalab/ZACA/logic/events" + "github.com/ztalab/ZACA/pkg/signature" + "github.com/ztalab/ZACA/util" ) // A Handler accepts requests with a serial number parameter // and revokes type Handler struct { dbAccessor certdb.Accessor - logger *v2log.Logger + logger *logger.Logger } // NewHandler returns a new http.Handler that handles a revoke request. @@ -36,7 +35,7 @@ func NewHandler(dbAccessor certdb.Accessor) http.Handler { return &api.HTTPHandler{ Handler: &Handler{ dbAccessor: dbAccessor, - logger: v2log.Named("revoke"), + logger: logger.Named("revoke"), }, Methods: []string{"POST"}, } @@ -76,18 +75,18 @@ func (h *Handler) Handle(w http.ResponseWriter, r *http.Request) error { certRecord := &model.Certificates{} if err := core.Is.Db.Where("serial_number = ? AND authority_key_identifier = ?", req.Serial, req.AKI).First(certRecord).Error; err != nil { if err == gorm.ErrRecordNotFound { - h.logger.With("sn", req.Serial, "aki", req.AKI).Warn("证书不存在") + h.logger.With("sn", req.Serial, "aki", req.AKI).Warn("Certificate does not exist") } else { - h.logger.With("sn", req.Serial, "aki", req.AKI).Errorf("证书获取错误: %v", err) + h.logger.With("sn", req.Serial, "aki", req.AKI).Errorf("Certificate acquisition error: %v", err) } return cf_err.NewBadRequest(err) } - // 从 vault 获取证书 PEM + // Get certificate PEM from vault if hook.EnableVaultStorage { pem, err := core.Is.VaultSecret.GetCertPEM(req.Serial) if err != nil { - h.logger.With("sn", req.Serial, "aki", req.AKI).Warnf("Vault 获取错误: %v", err) + h.logger.With("sn", req.Serial, "aki", req.AKI).Warnf("Vault Get error: %v", err) } else { certRecord.Pem = *pem } @@ -95,25 +94,22 @@ func (h *Handler) Handle(w http.ResponseWriter, r *http.Request) error { cert, err := helpers.ParseCertificatePEM([]byte(certRecord.Pem)) if err != nil { - h.logger.With("sn", req.Serial, "aki", req.AKI).Errorf("证书 PEM 解析错误: %v", err) + h.logger.With("sn", req.Serial, "aki", req.AKI).Errorf("Certificate PEM parsing error: %v", err) return cf_err.NewBadRequest(err) } - // TODO 兼容标准 CFSSL 认证方式 + // TODO Compatible with standard cfssl authentication mode var valid bool if req.AuthKey == "" { v := signature.NewVerifier(cert.PublicKey) valid, err = v.Verify([]byte(req.Nonce), req.Sign) if err != nil { - h.logger.With("sn", req.Serial, "aki", req.AKI).Warnf("验证错误: %v", err) + h.logger.With("sn", req.Serial, "aki", req.AKI).Warnf("Validation error: %v", err) return cf_err.NewBadRequest(err) } } else { if req.Profile == "" { - return cf_err.NewBadRequest(errors.New("profile 未指定")) - } - if req.Profile != string(caclient.RoleIDGRegistry) { - return cf_err.NewBadRequest(errors.New("profile 不被允许进行吊销操作")) + return cf_err.NewBadRequest(errors.New("profile Unspecified")) } if authKey, ok := core.Is.Config.Singleca.CfsslConfig.AuthKeys[req.Profile]; ok { if authKey.Key == req.AuthKey { @@ -123,7 +119,7 @@ func (h *Handler) Handle(w http.ResponseWriter, r *http.Request) error { } if !valid { - h.logger.With("sn", req.Serial, "aki", req.AKI).Warnf("证书无法对应: %v", err) + h.logger.With("sn", req.Serial, "aki", req.AKI).Warnf("Certificate cannot correspond: %v", err) return cf_err.NewBadRequest(err) } @@ -133,16 +129,16 @@ func (h *Handler) Handle(w http.ResponseWriter, r *http.Request) error { return cf_err.NewBadRequestString("Invalid reason code") } - // 删除 vault 对应的证书 KV + // Delete the certificate corresponding to vault if hook.EnableVaultStorage { if err := core.Is.VaultSecret.DeleteCertPEM(req.Serial); err != nil { - h.logger.With("sn", req.Serial, "aki", req.AKI).Warnf("Vault 删除错误: %v", err) + h.logger.With("sn", req.Serial, "aki", req.AKI).Warnf("Vault Delete error: %v", err) } } err = h.dbAccessor.RevokeCertificate(req.Serial, req.AKI, reasonCode) if err != nil { - h.logger.With("sn", req.Serial, "aki", req.AKI).Warnf("数据库操作错误: %v", err) + h.logger.With("sn", req.Serial, "aki", req.AKI).Warnf("Database operation error: %v", err) return err } @@ -154,7 +150,7 @@ func (h *Handler) Handle(w http.ResponseWriter, r *http.Request) error { AKI: req.AKI, }).Log() - h.logger.With("sn", req.Serial, "aki", req.AKI, "uri", util.GetSanURI(cert)).Info("Workload 主动吊销证书") + h.logger.With("sn", req.Serial, "aki", req.AKI, "uri", util.GetSanURI(cert)).Info("Workload Active revocation of certificate") result := map[string]string{} return api.SendResponse(w, result) diff --git a/ca/signer/handler.go b/ca/signer/handler.go index 10f780f..00f9c5b 100644 --- a/ca/signer/handler.go +++ b/ca/signer/handler.go @@ -7,19 +7,19 @@ import ( "math/big" "net/http" - "gitlab.oneitfarm.com/bifrost/cfssl/api" - "gitlab.oneitfarm.com/bifrost/cfssl/auth" - "gitlab.oneitfarm.com/bifrost/cfssl/bundler" - "gitlab.oneitfarm.com/bifrost/cfssl/errors" - "gitlab.oneitfarm.com/bifrost/cfssl/helpers" - "gitlab.oneitfarm.com/bifrost/cfssl/hook" - "gitlab.oneitfarm.com/bifrost/cfssl/log" - "gitlab.oneitfarm.com/bifrost/cfssl/signer" - - "gitlab.oneitfarm.com/bifrost/capitalizone/core" - "gitlab.oneitfarm.com/bifrost/capitalizone/database/mysql/cfssl-model/dao" - "gitlab.oneitfarm.com/bifrost/capitalizone/logic/events" - "gitlab.oneitfarm.com/bifrost/capitalizone/pkg/spiffe" + "github.com/ztalab/cfssl/api" + "github.com/ztalab/cfssl/auth" + "github.com/ztalab/cfssl/bundler" + "github.com/ztalab/cfssl/errors" + "github.com/ztalab/cfssl/helpers" + "github.com/ztalab/cfssl/hook" + "github.com/ztalab/cfssl/log" + "github.com/ztalab/cfssl/signer" + + "github.com/ztalab/ZACA/core" + "github.com/ztalab/ZACA/database/mysql/cfssl-model/dao" + "github.com/ztalab/ZACA/logic/events" + "github.com/ztalab/ZACA/pkg/spiffe" ) // NoBundlerMessage is used to alert the user that the server does not have a bundler initialized. @@ -121,7 +121,7 @@ func jsonReqToTrue(js jsonSignRequest) signer.SignRequest { // in the "hostname" parameter. The certificate should be PEM-encoded. If // provided, subject information from the "subject" parameter will be used // in place of the subject information from the CSR. -// 该 Handler 不会被调用到, 我们通常使用下述的 AuthHandler +// The Handler will not be called, we usually use the following AuthHandler func (h *Handler) Handle(w http.ResponseWriter, r *http.Request) error { log.Info("signature request received") @@ -187,7 +187,7 @@ type AuthHandler struct { // NewAuthHandlerFromSigner creates a new AuthHandler from the signer // that is passed in. -// 签发证书的 API Handler +// issued the certificate API Handler func NewAuthHandlerFromSigner(signer signer.Signer) (http.Handler, error) { policy := signer.Policy() if policy == nil { @@ -228,7 +228,7 @@ func (h *AuthHandler) SetBundler(caBundleFile, intBundleFile string) (err error) } // Handle receives the incoming request, validates it, and processes it. -// 处理认证的签名证书请求 +// Process signed certificate requests for authentication func (h *AuthHandler) Handle(w http.ResponseWriter, r *http.Request) error { log.Info("signature request received") @@ -288,8 +288,8 @@ func (h *AuthHandler) Handle(w http.ResponseWriter, r *http.Request) error { return errors.NewBadRequestString("missing parameter 'certificate_request'") } - // 审计是否能够申请证书 - // 查询 DB 是否有标记 UniqueID 禁止申请 + // Can audit apply for certificate + // Query whether the DB is marked with uniqueID to prohibit application for _, signHost := range signReq.Hosts { id, err := spiffe.ParseIDGIdentity(signHost) if err != nil { @@ -309,7 +309,7 @@ func (h *AuthHandler) Handle(w http.ResponseWriter, r *http.Request) error { } } - // CFSSL 签发逻辑中增加, 若证书储存模式为 Vault, 增加数据库标志位, 不实际储存证书 PEM + // CFSSL In the issuing logic, if the certificate storage mode is vault, the database flag bit is added, and the certificate PEM is not actually stored cert, err := h.signer.Sign(signReq) if err != nil { log.Errorf("signature failed: %v", err) @@ -318,7 +318,7 @@ func (h *AuthHandler) Handle(w http.ResponseWriter, r *http.Request) error { x509Cert, _ := helpers.ParseCertificatePEM(cert) - // 签发证书后增加储存到 Vault + // After the certificate is issued, it is added and stored in the vault if hook.EnableVaultStorage { if err := core.Is.VaultSecret.StoreCertPEM(x509Cert.SerialNumber.String(), string(cert)); err != nil { core.Is.Logger.Errorf("vault store err: %s", err) @@ -326,7 +326,7 @@ func (h *AuthHandler) Handle(w http.ResponseWriter, r *http.Request) error { } } - // Metrics 时序记录 + // Metrics Timing record AddMetricsPoint(x509Cert) if x509Cert != nil { diff --git a/ca/signer/metrics.go b/ca/signer/metrics.go index 4fec8b5..6007e05 100644 --- a/ca/signer/metrics.go +++ b/ca/signer/metrics.go @@ -5,9 +5,9 @@ import ( "sync/atomic" "time" - "gitlab.oneitfarm.com/bifrost/capitalizone/core" - "gitlab.oneitfarm.com/bifrost/capitalizone/logic/schema" - "gitlab.oneitfarm.com/bifrost/capitalizone/pkg/influxdb" + "github.com/ztalab/ZACA/core" + "github.com/ztalab/ZACA/logic/schema" + "github.com/ztalab/ZACA/pkg/influxdb" ) var overallSignCounter uint64 diff --git a/ca/singleca/endpoint.go b/ca/singleca/endpoint.go index cb6f2dd..75fefcf 100644 --- a/ca/singleca/endpoint.go +++ b/ca/singleca/endpoint.go @@ -6,24 +6,24 @@ import ( "net/url" "strings" - "gitlab.oneitfarm.com/bifrost/cfssl/api" - "gitlab.oneitfarm.com/bifrost/cfssl/api/bundle" - "gitlab.oneitfarm.com/bifrost/cfssl/api/certinfo" - "gitlab.oneitfarm.com/bifrost/cfssl/api/crl" - "gitlab.oneitfarm.com/bifrost/cfssl/api/gencrl" - "gitlab.oneitfarm.com/bifrost/cfssl/api/generator" - "gitlab.oneitfarm.com/bifrost/cfssl/api/health" - "gitlab.oneitfarm.com/bifrost/cfssl/api/info" - "gitlab.oneitfarm.com/bifrost/cfssl/api/initca" - apiocsp "gitlab.oneitfarm.com/bifrost/cfssl/api/ocsp" - "gitlab.oneitfarm.com/bifrost/cfssl/api/scan" - "gitlab.oneitfarm.com/bifrost/cfssl/api/signhandler" - certsql "gitlab.oneitfarm.com/bifrost/cfssl/certdb/sql" - logger "gitlab.oneitfarm.com/bifrost/cilog/v2" - - "gitlab.oneitfarm.com/bifrost/capitalizone/ca/keymanager" - "gitlab.oneitfarm.com/bifrost/capitalizone/ca/revoke" - "gitlab.oneitfarm.com/bifrost/capitalizone/ca/signer" + "github.com/ztalab/ZACA/pkg/logger" + "github.com/ztalab/cfssl/api" + "github.com/ztalab/cfssl/api/bundle" + "github.com/ztalab/cfssl/api/certinfo" + "github.com/ztalab/cfssl/api/crl" + "github.com/ztalab/cfssl/api/gencrl" + "github.com/ztalab/cfssl/api/generator" + "github.com/ztalab/cfssl/api/health" + "github.com/ztalab/cfssl/api/info" + "github.com/ztalab/cfssl/api/initca" + apiocsp "github.com/ztalab/cfssl/api/ocsp" + "github.com/ztalab/cfssl/api/scan" + "github.com/ztalab/cfssl/api/signhandler" + certsql "github.com/ztalab/cfssl/certdb/sql" + + "github.com/ztalab/ZACA/ca/keymanager" + "github.com/ztalab/ZACA/ca/revoke" + "github.com/ztalab/ZACA/ca/signer" ) // V1APIPrefix is the prefix of all CFSSL V1 API Endpoints. @@ -90,14 +90,14 @@ var endpoints = map[string]func() (http.Handler, error){ if s == nil { return nil, errBadSigner } - // Prefetch, 在初始化时运行, 保证证书在启动时被加载完成 + // Prefetch, Run during initialization to ensure that the certificate is loaded at startup if _, err := keymanager.GetKeeper().GetL3CachedTrustCerts(); err != nil { - logger.Fatal("证书获取错误: %v", err) + logger.Fatal("Certificate acquisition error: %v", err) } return info.NewTrustCertsHandler(s, func() []*x509.Certificate { certs, err := keymanager.GetKeeper().GetL3CachedTrustCerts() if err != nil { - logger.Errorf("Trust 证书获取错误: %v", err) + logger.Errorf("Trust Certificate acquisition error: %v", err) } return certs }) diff --git a/ca/singleca/file_watcher.go b/ca/singleca/file_watcher.go index e84dbe0..9194f48 100644 --- a/ca/singleca/file_watcher.go +++ b/ca/singleca/file_watcher.go @@ -5,19 +5,19 @@ import ( "fmt" "io/ioutil" - "gitlab.oneitfarm.com/bifrost/cfssl/helpers" - logger "gitlab.oneitfarm.com/bifrost/cilog/v2" + "github.com/ztalab/ZACA/pkg/logger" + "github.com/ztalab/cfssl/helpers" ) func getTrustCerts(path string) ([]*x509.Certificate, error) { pemCerts, err := ioutil.ReadFile(path) if err != nil { - return nil, fmt.Errorf("信任证书文件错误: %v", err) + return nil, fmt.Errorf("trust certificate file error: %v", err) } certs, err := helpers.ParseCertificatesPEM(pemCerts) if err != nil { - return nil, fmt.Errorf("获取信任证书失败: %v", err) + return nil, fmt.Errorf("failed to get trust certificate: %v", err) } - logger.Named("trust-certs").Infof("获取到信任证书数量: %v", len(certs)) + logger.Named("trust-certs").Infof("number of trust certificates obtained: %v", len(certs)) return certs, nil } diff --git a/ca/singleca/root.go b/ca/singleca/root.go index 1bcb321..a1080c6 100644 --- a/ca/singleca/root.go +++ b/ca/singleca/root.go @@ -15,18 +15,18 @@ import ( "github.com/gorilla/mux" "github.com/jmoiron/sqlx" "github.com/pkg/errors" - "gitlab.oneitfarm.com/bifrost/cfssl/certdb/sql" - "gitlab.oneitfarm.com/bifrost/cfssl/cli" + "github.com/ztalab/cfssl/certdb/sql" + "github.com/ztalab/cfssl/cli" // ... - _ "gitlab.oneitfarm.com/bifrost/cfssl/cli/ocspsign" - "gitlab.oneitfarm.com/bifrost/cfssl/ocsp" - "gitlab.oneitfarm.com/bifrost/cfssl/signer" - "gitlab.oneitfarm.com/bifrost/cfssl/signer/local" - - "gitlab.oneitfarm.com/bifrost/capitalizone/ca/keymanager" - ocsp_responder "gitlab.oneitfarm.com/bifrost/capitalizone/ca/ocsp" - "gitlab.oneitfarm.com/bifrost/capitalizone/ca/upperca" - "gitlab.oneitfarm.com/bifrost/capitalizone/core" + _ "github.com/ztalab/cfssl/cli/ocspsign" + "github.com/ztalab/cfssl/ocsp" + "github.com/ztalab/cfssl/signer" + "github.com/ztalab/cfssl/signer/local" + + "github.com/ztalab/ZACA/ca/keymanager" + ocsp_responder "github.com/ztalab/ZACA/ca/ocsp" + "github.com/ztalab/ZACA/ca/upperca" + "github.com/ztalab/ZACA/core" ) var ( @@ -78,13 +78,13 @@ func Server() (*mux.Router, error) { var err error logger := core.Is.Logger.Named("singleca") - // 证书签名 + // Certificate signature if core.Is.Config.Keymanager.SelfSign { conf = cli.Config{ Disable: "sign,crl,gencrl,newcert,bundle,newkey,init_ca,scan,scaninfo,certinfo,ocspsign,/", } if err := keymanager.NewSelfSigner().Run(); err != nil { - logger.Fatalf("自签名证书错误: %v", err) + logger.Fatalf("Self signed certificate error: %v", err) } router.PathPrefix("/api/v1/cap/").HandlerFunc(func(writer http.ResponseWriter, request *http.Request) { localPort := core.Is.Config.HTTP.Listen @@ -109,7 +109,7 @@ func Server() (*mux.Router, error) { } if err != nil { - logger.Errorf("请求错误: %s", err) + logger.Errorf("Request error: %s", err) writer.WriteHeader(500) writer.Write([]byte("server error")) } @@ -122,42 +122,41 @@ func Server() (*mux.Router, error) { Disable: "crl,gencrl,newcert,bundle,newkey,init_ca,scan,scaninfo,certinfo,ocspsign,/", } if err := keymanager.NewRemoteSigner().Run(); err != nil { - logger.Fatalf("远程签名证书错误: %v", err) + logger.Fatalf("Remote signing certificate error: %v", err) } - // 上级 CA 健康检查 + // Superior CA health check go upperca.NewChecker().Run() } logger.Info("Initializing signer") - // signer 赋值给全局变量 s + // signer Assign to global variable s if s, err = local.NewDynamicSigner( func() crypto.Signer { priv, _, err := keymanager.GetKeeper().GetCachedSelfKeyPair() if err != nil { - logger.Errorf("获取 Priv Key 错误: %v", err) + logger.Errorf("Error getting priv key: %v", err) } return priv }, func() *x509.Certificate { _, cert, err := keymanager.GetKeeper().GetCachedSelfKeyPair() if err != nil { - logger.Errorf("获取 Cert 错误: %v", err) + logger.Errorf("Get cert error: %v", err) } return cert }, func() x509.SignatureAlgorithm { priv, _, err := keymanager.GetKeeper().GetCachedSelfKeyPair() if err != nil { - logger.Errorf("获取 Priv Key 错误: %v", err) + logger.Errorf("Error getting priv key: %v", err) } return signer.DefaultSigAlgo(priv) }, core.Is.Config.Singleca.CfsslConfig.Signing); err != nil { logger.Errorf("couldn't initialize signer: %v", err) return nil, err } - // 替换 DB SQL db, err = sqlx.Open("mysql", core.Is.Config.Mysql.Dsn) if err != nil { - logger.Errorf("Sqlx 初始化出错: %v", err) + logger.Errorf("Sqlx Initialization error: %v", err) return nil, err } s.SetDBAccessor(sql.NewAccessor(db)) @@ -166,16 +165,15 @@ func Server() (*mux.Router, error) { func() *x509.Certificate { _, cert, err := keymanager.GetKeeper().GetCachedSelfKeyPair() if err != nil { - logger.Errorf("获取 Cert 错误: %v", err) + logger.Errorf("Get cert error: %v", err) } return cert }, func() crypto.Signer { priv, _, err := keymanager.GetKeeper().GetCachedSelfKeyPair() if err != nil { - logger.Errorf("获取 Priv Key 错误: %v", err) + logger.Errorf("Error getting priv key: %v", err) } return priv - // cfssl 默认 96h }, 4*24*time.Hour); err != nil { logger.Warnf("couldn't initialize ocsp signer: %v", err) } @@ -183,8 +181,8 @@ func Server() (*mux.Router, error) { endpoints["ocsp"] = func() (http.Handler, error) { src, err := ocsp_responder.NewSharedSources(ocspSigner) if err != nil { - logger.Errorf("OCSP Sources 创建错误: %v", err) - return nil, errors.Wrap(err, "sources 创建错误") + logger.Errorf("OCSP Sources Create error: %v", err) + return nil, errors.Wrap(err, "sources Create error") } ocsp_responder.CountAll() return ocsp.NewResponder(src, nil), nil @@ -192,41 +190,6 @@ func Server() (*mux.Router, error) { registerHandlers() - //tlsCfg := tls.Config{ - // GetCertificate: func(info *tls.ClientHelloInfo) (*tls.Certificate, error) { - // return keymanager.GetKeeper().GetCachedTLSKeyPair() - // }, - // InsecureSkipVerify: true, - // ClientAuth: tls.NoClientCert, // 运行集群内客户端单向 TLS 获取 - //} - // - //// 启动 OCSP 服务器 - //go func() { - // src, err := ocsp_responder.NewSharedSources(ocspSigner) - // if err != nil { - // logger.Errorf("OCSP Sources 创建错误: %v", err) - // return - // } - // ocsp_responder.CountAll() - // mux := http.NewServeMux() - // mux.Handle("/", ocsp.NewResponder(src, nil)) - // - // srv := &http.Server{ - // Addr: core.Is.Config.HTTP.OcspListen, - // Handler: mux, - // } - // logger.Infof("Start OCSP Responser at %s, host: %s", srv.Addr, core.Is.Config.OCSPHost) - // if err := srv.ListenAndServe(); err != nil { - // logger.Errorf("OCSP Server 启动失败: %s", err) - // } - //}() - // - //go func() { - // if err := tlsServe(core.Is.Config.HTTP.CaListen, &tlsCfg); err != nil { - // logger.Fatalf("CA TLS Server 启动失败: %s", err) - // } - //}() - return router, nil } @@ -239,23 +202,22 @@ func tlsServe(addr string, tlsConfig *tls.Config) error { return server.ListenAndServeTLS("", "") } -// OcspServer ocsp服务 +// OcspServer func OcspServer() ocsp.Signer { logger := core.Is.Logger.Named("singleca") ocspSigner, err := ocsp.NewDynamicSigner( func() *x509.Certificate { _, cert, err := keymanager.GetKeeper().GetCachedSelfKeyPair() if err != nil { - logger.Errorf("获取 Cert 错误: %v", err) + logger.Errorf("Get cert error: %v", err) } return cert }, func() crypto.Signer { priv, _, err := keymanager.GetKeeper().GetCachedSelfKeyPair() if err != nil { - logger.Errorf("获取 Priv Key 错误: %v", err) + logger.Errorf("Error getting priv key: %v", err) } return priv - // cfssl 默认 96h }, 4*24*time.Hour) if err != nil { logger.Warnf("couldn't initialize ocsp signer: %v", err) diff --git a/ca/upperca/checker.go b/ca/upperca/checker.go index 74a0d04..b219cb1 100644 --- a/ca/upperca/checker.go +++ b/ca/upperca/checker.go @@ -7,14 +7,14 @@ import ( "time" "github.com/go-resty/resty/v2" - "gitlab.oneitfarm.com/bifrost/cfssl/api/client" - v2log "gitlab.oneitfarm.com/bifrost/cilog/v2" + "github.com/ztalab/ZACA/pkg/logger" + "github.com/ztalab/cfssl/api/client" "go.uber.org/zap" - "gitlab.oneitfarm.com/bifrost/capitalizone/ca/keymanager" - "gitlab.oneitfarm.com/bifrost/capitalizone/core" - "gitlab.oneitfarm.com/bifrost/capitalizone/logic/schema" - "gitlab.oneitfarm.com/bifrost/capitalizone/pkg/influxdb" + "github.com/ztalab/ZACA/ca/keymanager" + "github.com/ztalab/ZACA/core" + "github.com/ztalab/ZACA/logic/schema" + "github.com/ztalab/ZACA/pkg/influxdb" ) const CfsslHealthApi = "/api/v1/cfssl/health" @@ -60,7 +60,7 @@ func (hc *checker) checkUpper(upperClient *client.AuthRemote) { caHost := schema.GetHostFromUrl(caUrl) resp, err := httpClient.R().Get(caUrl + CfsslHealthApi) - // 统计信任证书梳 + // Statistical trust certificate comb var statusCode int if err != nil { statusCode = 599 @@ -69,7 +69,7 @@ func (hc *checker) checkUpper(upperClient *client.AuthRemote) { } if err != nil || resp.StatusCode() != http.StatusOK { - hc.logger.Warnf("Upper CA: %s 连接错误: %s", caHost, err) + hc.logger.Warnf("Upper CA: %s Connection error: %s", caHost, err) } hc.influx.AddPoint(&influxdb.MetricsData{ @@ -86,11 +86,11 @@ func (hc *checker) checkUpper(upperClient *client.AuthRemote) { }) } -// NewChecker 只在下级 CA 执行 +// NewChecker Execute only in subordinate CAS func NewChecker() Checker { return &checker{ UpperClients: keymanager.GetKeeper().RootClient, - logger: v2log.Named("upper").SugaredLogger, + logger: logger.Named("upper").SugaredLogger, influx: core.Is.Metrics, } } diff --git a/ca/upperca/client.go b/ca/upperca/client.go index 1dd1471..b38e3b9 100644 --- a/ca/upperca/client.go +++ b/ca/upperca/client.go @@ -3,9 +3,9 @@ package upperca import ( "strings" - cfssl_client "gitlab.oneitfarm.com/bifrost/cfssl/api/client" + cfssl_client "github.com/ztalab/cfssl/api/client" - "gitlab.oneitfarm.com/bifrost/capitalizone/ca/keymanager" + "github.com/ztalab/ZACA/ca/keymanager" ) func ProxyRequest(f func(host string) error) error { diff --git a/cmd/http.go b/cmd/api.go similarity index 81% rename from cmd/http.go rename to cmd/api.go index eea260e..0048b13 100644 --- a/cmd/http.go +++ b/cmd/api.go @@ -2,9 +2,9 @@ package cmd import ( "context" - "gitlab.oneitfarm.com/bifrost/capitalizone/api" - "gitlab.oneitfarm.com/bifrost/capitalizone/core" - logger "gitlab.oneitfarm.com/bifrost/cilog/v2" + "github.com/ztalab/ZACA/api" + "github.com/ztalab/ZACA/core" + "github.com/ztalab/ZACA/pkg/logger" "net/http" "os" "os/signal" @@ -12,7 +12,7 @@ import ( "time" ) -// InitHTTPServer 初始化http服务 +// InitHTTPServer Initialize HTTP service func InitHTTPServer(ctx context.Context, handler http.Handler) func() { addr := core.Is.Config.HTTP.Listen srv := &http.Server{ @@ -42,7 +42,7 @@ func InitHTTPServer(ctx context.Context, handler http.Handler) func() { } } -// Run 运行服务 +// Run Running services func RunHttp(ctx context.Context) error { state := 1 sc := make(chan os.Signal, 1) @@ -53,7 +53,7 @@ func RunHttp(ctx context.Context) error { EXIT: for { sig := <-sc - logger.Infof("接收到信号[%s]", sig.String()) + logger.Infof("Received signal[%s]", sig.String()) switch sig { case syscall.SIGQUIT, syscall.SIGTERM, syscall.SIGINT: state = 0 @@ -65,7 +65,7 @@ EXIT: } cleanFunc() - logger.Infof("Http服务退出") + logger.Infof("HTTP service exit") time.Sleep(time.Second) os.Exit(state) return nil diff --git a/cmd/ocsp.go b/cmd/ocsp.go index e0ba8ee..50313c7 100644 --- a/cmd/ocsp.go +++ b/cmd/ocsp.go @@ -3,11 +3,11 @@ package cmd import ( "context" "github.com/prometheus/client_golang/prometheus/promhttp" - ocsp_responder "gitlab.oneitfarm.com/bifrost/capitalizone/ca/ocsp" - "gitlab.oneitfarm.com/bifrost/capitalizone/ca/singleca" - "gitlab.oneitfarm.com/bifrost/capitalizone/core" - "gitlab.oneitfarm.com/bifrost/cfssl/ocsp" - logger "gitlab.oneitfarm.com/bifrost/cilog/v2" + ocsp_responder "github.com/ztalab/ZACA/ca/ocsp" + "github.com/ztalab/ZACA/ca/singleca" + "github.com/ztalab/ZACA/core" + "github.com/ztalab/ZACA/pkg/logger" + "github.com/ztalab/cfssl/ocsp" "net/http" "net/http/pprof" "os" @@ -16,11 +16,11 @@ import ( "time" ) -// InitOcspServer 初始化ocsp服务 +// InitOcspServer Initialize OCSP service func InitOcspServer(ctx context.Context, ocspSigner ocsp.Signer) func() { src, err := ocsp_responder.NewSharedSources(ocspSigner) if err != nil { - logger.Errorf("OCSP Sources 创建错误: %v", err) + logger.Errorf("OCSP Sources Create error: %v", err) panic(err) } ocsp_responder.CountAll() @@ -44,7 +44,7 @@ func InitOcspServer(ctx context.Context, ocspSigner ocsp.Signer) func() { }() if !core.Is.Config.Debug { - // 时序监控 + // Timing monitoring metrics := http.NewServeMux() metrics.Handle("/metrics", promhttp.Handler()) metrics.HandleFunc("/debug/pprof/", pprof.Index) @@ -82,7 +82,7 @@ func InitOcspServer(ctx context.Context, ocspSigner ocsp.Signer) func() { } } -// RunOcsp 运行服务 +// RunOcsp Running services func RunOcsp(ctx context.Context) error { state := 1 sc := make(chan os.Signal, 1) @@ -93,7 +93,7 @@ func RunOcsp(ctx context.Context) error { EXIT: for { sig := <-sc - logger.Infof("接收到信号[%s]", sig.String()) + logger.Infof("Received signal[%s]", sig.String()) switch sig { case syscall.SIGQUIT, syscall.SIGTERM, syscall.SIGINT: state = 0 @@ -105,7 +105,7 @@ EXIT: } cleanFunc() - logger.Infof("Ocsp服务退出") + logger.Infof("Exit OCSP service") time.Sleep(time.Second) os.Exit(state) return nil diff --git a/cmd/tls.go b/cmd/tls.go index f584674..772c716 100644 --- a/cmd/tls.go +++ b/cmd/tls.go @@ -5,10 +5,10 @@ import ( "crypto/tls" "github.com/gorilla/mux" "github.com/prometheus/client_golang/prometheus/promhttp" - "gitlab.oneitfarm.com/bifrost/capitalizone/ca/keymanager" - "gitlab.oneitfarm.com/bifrost/capitalizone/ca/singleca" - "gitlab.oneitfarm.com/bifrost/capitalizone/core" - logger "gitlab.oneitfarm.com/bifrost/cilog/v2" + "github.com/ztalab/ZACA/ca/keymanager" + "github.com/ztalab/ZACA/ca/singleca" + "github.com/ztalab/ZACA/core" + "github.com/ztalab/ZACA/pkg/logger" "net/http" "net/http/pprof" "os" @@ -17,7 +17,7 @@ import ( "time" ) -// InitTlsServer 初始化Tls服务 +// InitTlsServer Initialize TLS service func InitTlsServer(ctx context.Context, handler *mux.Router) func() { addr := core.Is.Config.HTTP.CaListen tlsCfg := &tls.Config{ @@ -25,7 +25,7 @@ func InitTlsServer(ctx context.Context, handler *mux.Router) func() { return keymanager.GetKeeper().GetCachedTLSKeyPair() }, InsecureSkipVerify: true, - ClientAuth: tls.NoClientCert, // 运行集群内客户端单向 TLS 获取 + ClientAuth: tls.NoClientCert, } srv := &http.Server{ Addr: addr, @@ -44,7 +44,7 @@ func InitTlsServer(ctx context.Context, handler *mux.Router) func() { } }() if !core.Is.Config.Debug { - // 时序监控 + // Timing monitoring metrics := http.NewServeMux() metrics.Handle("/metrics", promhttp.Handler()) metrics.HandleFunc("/debug/pprof/", pprof.Index) @@ -81,7 +81,6 @@ func InitTlsServer(ctx context.Context, handler *mux.Router) func() { } } -// Run 运行服务 func RunTls(ctx context.Context) error { state := 1 sc := make(chan os.Signal, 1) @@ -96,7 +95,7 @@ func RunTls(ctx context.Context) error { EXIT: for { sig := <-sc - logger.Infof("接收到信号[%s]", sig.String()) + logger.Infof("Received signal[%s]", sig.String()) switch sig { case syscall.SIGQUIT, syscall.SIGTERM, syscall.SIGINT: state = 0 @@ -108,7 +107,7 @@ EXIT: } cleanFunc() - logger.Infof("Tls服务退出") + logger.Infof("TLS service exit") time.Sleep(time.Second) os.Exit(state) return nil diff --git a/conf.default.yml b/conf.default.yml deleted file mode 100644 index 4f11899..0000000 --- a/conf.default.yml +++ /dev/null @@ -1,86 +0,0 @@ -registry: - self-name: capitalizone - -log: - log-proxy: - host: "" - port: 6379 - key: ca_log - -redis: - nodes: [] - -# 级联 CA 配置项 -keymanager: - upper-ca: [] # https://user:password@server.ca - self-sign: false # 开启表示 ROOT CA - csr-templates: - root-ca: - o: CI123 ROOT AUTHORITY - expiry: 175200h - intermediate-ca: - o: SITE CA IDENTIFY - ou: "spiffe://site/cluster" - expiry: 175200h - -singleca: - config-path: "/etc/capitalizone/config.json" - -election: - enabled: false - id: capitalizone-leader - baseon: configmap - always-leader: false - -gateway-nervs: - enabled: false - endpoint: "" - -ocsp-host: "http://127.0.0.1:8082" - -http: - ocsp-listen: 0.0.0.0:8082 - ca-listen: 0.0.0.0:8081 - listen: 0.0.0.0:8080 - -mysql: - dsn: "" - -influxdb: - enabled: false - address: "" #192.168.2.80:8086 - port: 8086 - udp_address: "" #influxdb msp数据库的udp地址,ip:port - database: "" # 数据库名称 - precision: "ms" #精度 n, u, ms, s, m or h - username: "" - password: "" - max-idle-conns: 30 - max-idle-conns-per-host: 30 - flush-size: 20 #批量发送的点的个数 - flush-time: 10 #定时批量发送点的时间,单位:s - -vault: - enabled: false - addr: "" - token: "" - prefix: "" - discover: "provider=k8s namespace=msp label_selector=\"app.kubernetes.io/name=vault\"" - -mesh: - msp-portal-api: "http://msp-portal:9080" - -swagger-enabled: false - -debug: false - -version: "0.1" - -# 监控指标 -metrics: - cpu-limit: 90 # cpu阈值 - mem-limit: 80 # 内存阈值 - -# ocsp配置 -ocsp: - cache-time: 60 # 缓存时间 \ No newline at end of file diff --git a/conf.prod.yml b/conf.prod.yml index 3af52c7..1ac3880 100644 --- a/conf.prod.yml +++ b/conf.prod.yml @@ -10,10 +10,10 @@ log: redis: nodes: [] -# 级联 CA 配置项 +# Cascading CA configuration items keymanager: upper-ca: [] # https://user:password@server.ca - self-sign: false # 开启表示 ROOT CA + self-sign: false # Open representation ROOT CA csr-templates: root-ca: o: CI123 ROOT AUTHORITY @@ -59,11 +59,6 @@ debug: false version: "0.1" -# 监控指标 -metrics: - cpu-limit: 90 # cpu阈值 - mem-limit: 80 # 内存阈值 - -# ocsp配置 +# OCSP configuration ocsp: - cache-time: 60 # 缓存时间 \ No newline at end of file + cache-time: 60 # Cache time \ No newline at end of file diff --git a/conf.test.yml b/conf.test.yml index 7b7c171..a22dd03 100644 --- a/conf.test.yml +++ b/conf.test.yml @@ -13,10 +13,10 @@ redis: - 192.168.2.80:9002 - 192.168.2.80:9003 -# 级联 CA 配置项 +# Cascading CA configuration items keymanager: upper-ca: [] # https://@server.ca - self-sign: false # 开启表示 ROOT CA + self-sign: false # Open representation ROOT CA csr-templates: root-ca: o: CI123 ROOT AUTHORITY @@ -53,17 +53,17 @@ influxdb: enabled: true address: "192.168.2.80" #192.168.2.80:8086 port: 8086 - udp_address: "" #influxdb msp数据库的udp地址,ip:port - database: "msp" # 数据库名称 - precision: "ms" #精度 n, u, ms, s, m or h + udp_address: "" #influxdb UDP address of the database,ip:port + database: "msp" # Database name + precision: "ms" #Accuracy n, u, ms, s, m or h username: "influx-msp" password: "sunyangpassword" read-username: "MSP_CUSTOM_INFLUXDB_USERNAME" read-password: "MSP_CUSTOM_INFLUXDB_PASSWORD" max-idle-conns: 30 max-idle-conns-per-host: 30 - flush-size: 20 #批量发送的点的个数 - flush-time: 10 #定时批量发送点的时间,单位:s + flush-size: 20 #Number of points sent in batch + flush-time: 10 #Time of scheduled batch sending point,Company:s vault: enabled: false @@ -79,11 +79,6 @@ swagger-enabled: true debug: true -# 监控指标 -metrics: - cpu-limit: 90 # cpu阈值 - mem-limit: 80 # 内存阈值 - -# ocsp配置 +# OCSP configuration ocsp: - cache-time: 60 # 缓存时间 \ No newline at end of file + cache-time: 60 # Cache time \ No newline at end of file diff --git a/config.json b/config.json index 10557cb..975aff1 100644 --- a/config.json +++ b/config.json @@ -4,26 +4,14 @@ "type": "standard", "key": "52abb3ac91971bb72bce17e7a289cd04476490b19e0d8eb7810dc42d4ac16c41" }, - "sidecar": { + "default": { "type": "standard", "key": "0739a645a7d6601d9d45f6b237c4edeadad904f2fce53625dfdd541ec4fc8134" - }, - "gateway": { - "type": "standard", - "key": "535d9739ce646a30ff4a8754a8f8fbe5693ff49544ba62e54113785cefff81a3" - }, - "idg-registry": { - "type": "standard", - "key": "ea62fa7c27307017694689f0adff09f63186cadfe92fb802133f980b75858fc6" - }, - "gatekeeper": { - "type": "standard", - "key": "1fb4d8144367a1cdc59500a2e81f7902a4cd5da4a1f1b2211eff42202b5b70e8" } }, "signing": { "profiles": { - "sidecar": { + "default": { "usages": [ "signing", "key encipherment", @@ -32,18 +20,7 @@ ], "expiry": "1440h", "copy_extensions": true, - "auth_key": "sidecar" - }, - "gateway": { - "usages": [ - "signing", - "key encipherment", - "server auth", - "client auth" - ], - "expiry": "8760h", - "copy_extensions": true, - "auth_key": "gateway" + "auth_key": "default" }, "intermediate": { "usages": [ @@ -57,38 +34,7 @@ "ca_constraint": { "is_ca": true } - }, - "idg-registry": { - "usages": [ - "signing", - "key encipherment", - "server auth", - "client auth" - ], - "expiry": "8760h", - "copy_extensions": true, - "auth_key": "idg-registry" - }, - "gatekeeper": { - "usages": [ - "signing", - "key encipherment", - "server auth", - "client auth" - ], - "expiry": "8760h", - "copy_extensions": true, - "auth_key": "gatekeeper" } - }, - "default": { - "usages": [ - "signing", - "key encipherment", - "server auth", - "client auth" - ], - "expiry": "8760h" } } } \ No newline at end of file diff --git a/core/config/types.go b/core/config/types.go index cc54ef2..0d2d8e9 100644 --- a/core/config/types.go +++ b/core/config/types.go @@ -1,9 +1,9 @@ package config import ( - cfssl_config "gitlab.oneitfarm.com/bifrost/cfssl/config" + cfssl_config "github.com/ztalab/cfssl/config" - "gitlab.oneitfarm.com/bifrost/capitalizone/pkg/influxdb" + "github.com/ztalab/ZACA/pkg/influxdb" ) const ( @@ -11,42 +11,29 @@ const ( ) type IConfig struct { - Registry Registry `yaml:"registry"` Log Log `yaml:"log"` - Redis Redis `yaml:"redis"` Keymanager Keymanager `yaml:"keymanager"` Singleca Singleca `yaml:"singleca"` - Election Election `yaml:"election"` - GatewayNervs GatewayNervs `yaml:"gateway-nervs"` OCSPHost string `yaml:"ocsp-host"` HTTP HTTP `yaml:"http"` Mysql Mysql `yaml:"mysql"` Vault Vault `yaml:"vault"` Influxdb influxdb.CustomConfig `yaml:"influxdb"` - Mesh Mesh `yaml:"mesh"` SwaggerEnabled bool `yaml:"swagger-enabled"` Debug bool `yaml:"debug"` Version string `yaml:"version"` Hostname string `yaml:"hostname"` - Metrics Metrics `yaml:"metrics"` Ocsp Ocsp `yaml:"ocsp"` } -// 服务注册信息 type Registry struct { SelfName string `yaml:"self-name"` - Command string `yaml:"command"` // 服务command -} - -// 监控指标 -type Metrics struct { - CpuLimit float64 `yaml:"cpu-limit"` // cpu阈值 - MemLimit float64 `yaml:"mem-limit"` // 内存阈值 + Command string `yaml:"command"` } // ocsp type Ocsp struct { - CacheTime int `yaml:"cache-time"` // ocsp缓存时间 + CacheTime int `yaml:"cache-time"` } type LogProxy struct { @@ -57,25 +44,12 @@ type LogProxy struct { type Log struct { LogProxy LogProxy `yaml:"log-proxy"` } -type Redis struct { - Nodes []string `yaml:"nodes"` -} type Singleca struct { ConfigPath string `yaml:"config-path"` // Raw CfsslConfig *cfssl_config.Config } -type Election struct { - Enabled bool `yaml:"enabled"` - ID string `yaml:"id"` - Baseon string `yaml:"baseon"` - AlwaysLeader bool `yaml:"always-leader"` -} -type GatewayNervs struct { - Enabled bool `yaml:"enabled"` - Endpoint string `yaml:"endpoint"` -} type HTTP struct { OcspListen string `yaml:"ocsp-listen"` CaListen string `yaml:"ca-listen"` @@ -102,13 +76,9 @@ type Keymanager struct { SelfSign bool `yaml:"self-sign"` CsrTemplates CsrTemplates `yaml:"csr-templates"` } -type Mesh struct { - MSPPortalAPI string `yaml:"msp-portal-api"` -} type Vault struct { - Enabled bool `yaml:"enabled"` - Addr string `yaml:"addr"` - Token string `yaml:"token"` - Prefix string `yaml:"prefix"` - Discover string `yaml:"discover"` + Enabled bool `yaml:"enabled"` + Addr string `yaml:"addr"` + Token string `yaml:"token"` + Prefix string `yaml:"prefix"` } diff --git a/core/state.go b/core/state.go index a49c0c6..2da3577 100644 --- a/core/state.go +++ b/core/state.go @@ -4,14 +4,11 @@ import ( "context" vaultAPI "github.com/hashicorp/vault/api" - logger "gitlab.oneitfarm.com/bifrost/cilog/v2" - "gitlab.oneitfarm.com/bifrost/go-toolbox/rediscluster" + "github.com/ztalab/ZACA/core/config" + "github.com/ztalab/ZACA/pkg/influxdb" + "github.com/ztalab/ZACA/pkg/logger" + "github.com/ztalab/ZACA/pkg/vaultsecret" "gorm.io/gorm" - "k8s.io/client-go/kubernetes" - - "gitlab.oneitfarm.com/bifrost/capitalizone/core/config" - "gitlab.oneitfarm.com/bifrost/capitalizone/pkg/influxdb" - "gitlab.oneitfarm.com/bifrost/capitalizone/pkg/vaultsecret" ) // Config ... @@ -34,14 +31,12 @@ type Logger struct { // I ... type I struct { - Ctx context.Context - Config *Config - RedisClusterClient *rediscluster.Cluster - Logger *Logger - Db *gorm.DB - KubeClient *kubernetes.Clientset - Elector Elector - Metrics *influxdb.Metrics - VaultClient *vaultAPI.Client - VaultSecret *vaultsecret.VaultSecret + Ctx context.Context + Config *Config + Logger *Logger + Db *gorm.DB + Elector Elector + Metrics *influxdb.Metrics + VaultClient *vaultAPI.Client + VaultSecret *vaultsecret.VaultSecret } diff --git a/database/mysql/cfssl-model/dao/certificates.go b/database/mysql/cfssl-model/dao/certificates.go index 8a5585e..2d4581d 100644 --- a/database/mysql/cfssl-model/dao/certificates.go +++ b/database/mysql/cfssl-model/dao/certificates.go @@ -9,7 +9,7 @@ import ( uuid "github.com/satori/go.uuid" "gorm.io/gorm" - "gitlab.oneitfarm.com/bifrost/capitalizone/database/mysql/cfssl-model/model" + "github.com/ztalab/ZACA/database/mysql/cfssl-model/model" ) var ( diff --git a/database/mysql/cfssl-model/dao/forbid.go b/database/mysql/cfssl-model/dao/forbid.go index 4553029..f0ad53f 100644 --- a/database/mysql/cfssl-model/dao/forbid.go +++ b/database/mysql/cfssl-model/dao/forbid.go @@ -8,7 +8,7 @@ import ( uuid "github.com/satori/go.uuid" "gorm.io/gorm" - "gitlab.oneitfarm.com/bifrost/capitalizone/database/mysql/cfssl-model/model" + "github.com/ztalab/ZACA/database/mysql/cfssl-model/model" ) var ( diff --git a/database/mysql/cfssl-model/dao/ocsp_responses.go b/database/mysql/cfssl-model/dao/ocsp_responses.go index 4bbeb93..999f1fb 100644 --- a/database/mysql/cfssl-model/dao/ocsp_responses.go +++ b/database/mysql/cfssl-model/dao/ocsp_responses.go @@ -9,7 +9,7 @@ import ( uuid "github.com/satori/go.uuid" "gorm.io/gorm" - "gitlab.oneitfarm.com/bifrost/capitalizone/database/mysql/cfssl-model/model" + "github.com/ztalab/ZACA/database/mysql/cfssl-model/model" ) var ( diff --git a/database/mysql/cfssl-model/dao/self_keypair.go b/database/mysql/cfssl-model/dao/self_keypair.go index e2c4444..0104427 100644 --- a/database/mysql/cfssl-model/dao/self_keypair.go +++ b/database/mysql/cfssl-model/dao/self_keypair.go @@ -9,7 +9,7 @@ import ( uuid "github.com/satori/go.uuid" "gorm.io/gorm" - "gitlab.oneitfarm.com/bifrost/capitalizone/examples/cfssl-model/model" + "github.com/ztalab/ZACA/database/mysql/cfssl-model/model" ) var ( diff --git a/database/mysql/migration.go b/database/mysql/migration.go index 7a2f3b1..5a88898 100644 --- a/database/mysql/migration.go +++ b/database/mysql/migration.go @@ -6,7 +6,7 @@ import ( "github.com/golang-migrate/migrate/v4" "github.com/golang-migrate/migrate/v4/database/mysql" _ "github.com/golang-migrate/migrate/v4/source/file" - logger "gitlab.oneitfarm.com/bifrost/cilog/v2" + "github.com/ztalab/ZACA/pkg/logger" _ "gorm.io/driver/mysql" "gorm.io/gorm" ) @@ -15,7 +15,7 @@ func Migrate(db *gorm.DB) error { lo := logger.Named("migration") sql, err := db.DB() if err != nil { - return fmt.Errorf("获取 DB 实例失败: %v", err) + return fmt.Errorf("failed to get DB instance: %v", err) } driver, err := mysql.WithInstance(sql, &mysql.Config{}) m, err := migrate.NewWithDatabaseInstance( @@ -29,7 +29,7 @@ func Migrate(db *gorm.DB) error { lo.Info("no changes.") return nil } - return fmt.Errorf("MySQL 迁移异常: %v", err) + return fmt.Errorf("MySQL migration exception: %v", err) } lo.Info("Migrations success.") return nil diff --git a/docker-compose.yml b/docker-compose.yml deleted file mode 100644 index 2078174..0000000 --- a/docker-compose.yml +++ /dev/null @@ -1,11 +0,0 @@ -version: "3.8" - -services: - mysql: - image: "mysql:5.7" - ports: - - "13306:3306" - environment: - MYSQL_ROOT_PASSWORD: 123456 - MYSQL_DATABASE: cap - \ No newline at end of file diff --git a/docs/docs.go b/docs/docs.go index 9629d80..da0774c 100644 --- a/docs/docs.go +++ b/docs/docs.go @@ -25,14 +25,14 @@ var doc = `{ "paths": { "/ca/intermediate_topology": { "get": { - "description": "子CA拓扑", + "description": "Sub-CA topology", "produces": [ "application/json" ], "tags": [ "CA" ], - "summary": "子CA拓扑", + "summary": "Sub-CA topology", "responses": { "200": { "description": " ", @@ -72,14 +72,14 @@ var doc = `{ }, "/ca/overall_certs_count": { "get": { - "description": "证书总数、根据分类划分的数量、对应服务数量", + "description": "Total number of certificates, number by classification, number of corresponding services", "produces": [ "application/json" ], "tags": [ "CA" ], - "summary": "(p2)证书分类", + "summary": "(p2)Certificate classification", "responses": { "200": { "description": " ", @@ -116,14 +116,14 @@ var doc = `{ }, "/ca/overall_expiry_certs": { "get": { - "description": "证书已过期数量, 一周内过期数量, 1/3个月内过期数量, 3个月后过期数量", + "description": "Number of certificates expired: within one week, within 1/3 months and after 3 months", "produces": [ "application/json" ], "tags": [ "CA" ], - "summary": "(p2)证书有效期", + "summary": "(p2)Certificate validity", "responses": { "200": { "description": " ", @@ -160,14 +160,14 @@ var doc = `{ }, "/ca/overall_units_enable_status": { "get": { - "description": "已启用总数, 禁用总数, 对应服务数", + "description": "Total enabled, total disabled, corresponding services", "produces": [ "application/json" ], "tags": [ "CA" ], - "summary": "(p2)启用情况", + "summary": "(p2)Enabling condition", "responses": { "200": { "description": " ", @@ -204,18 +204,18 @@ var doc = `{ }, "/ca/role_profiles": { "get": { - "description": "环境隔离类型", + "description": "Environmental isolation type", "produces": [ "application/json" ], "tags": [ "CA" ], - "summary": "(p1)环境隔离类型", + "summary": "(p1)Environmental isolation type", "parameters": [ { "type": "boolean", - "description": "只返回类型列表, 供搜索条件", + "description": "Only a list of types is returned for search criteria", "name": "short", "in": "query" } @@ -259,14 +259,14 @@ var doc = `{ }, "/ca/upper_ca_intermediate_topology": { "get": { - "description": "上层CA拓扑", + "description": "Upper CA topology", "produces": [ "application/json" ], "tags": [ "CA" ], - "summary": "上层CA拓扑", + "summary": "Upper CA topology", "responses": { "200": { "description": " ", @@ -306,30 +306,30 @@ var doc = `{ }, "/ca/workload_units": { "get": { - "description": "CA 下 Units", + "description": "CA Units", "produces": [ "application/json" ], "tags": [ "CA" ], - "summary": "(p1)服务单元", + "summary": "(p1)Service unit", "parameters": [ { "type": "integer", - "description": "页数, 默认1", + "description": "Number of pages, default 1", "name": "page", "in": "query" }, { "type": "integer", - "description": "页数限制, 默认20", + "description": "Page limit, default 20", "name": "limit_num", "in": "query" }, { "type": "string", - "description": "UniqueID 查询", + "description": "UniqueID Query", "name": "unique_id", "in": "query" } @@ -385,7 +385,7 @@ var doc = `{ }, "/certleaf/cert_chain": { "get": { - "description": "获取证书链信息", + "description": "Get certificate chain information", "produces": [ "application/json" ], @@ -396,19 +396,19 @@ var doc = `{ "parameters": [ { "type": "boolean", - "description": "展示 CA 自身证书链", + "description": "Show CA's own certificate chain", "name": "self_cert", "in": "query" }, { "type": "string", - "description": "SN+AKI 查询指定证书", + "description": "SN+AKI Query the specified certificate", "name": "sn", "in": "query" }, { "type": "string", - "description": "SN+AKI 查询指定证书", + "description": "SN+AKI Query the specified certificate", "name": "aki", "in": "query" } @@ -449,14 +449,14 @@ var doc = `{ }, "/certleaf/cert_chain_from_root": { "get": { - "description": "Root视角下所有证书链", + "description": "All certificate chains from the root Perspective", "produces": [ "application/json" ], "tags": [ "certleaf" ], - "summary": "(p1)根视角证书链", + "summary": "(p1)Root view certificate chain", "responses": { "200": { "description": " ", @@ -493,7 +493,7 @@ var doc = `{ }, "/workload/cert": { "get": { - "description": "证书详情", + "description": "Certificate details", "produces": [ "application/json" ], @@ -504,14 +504,14 @@ var doc = `{ "parameters": [ { "type": "string", - "description": "证书 sn", + "description": "Certificate sn", "name": "sn", "in": "query", "required": true }, { "type": "string", - "description": "证书 aki", + "description": "Certificate aki", "name": "aki", "in": "query", "required": true @@ -553,7 +553,7 @@ var doc = `{ }, "/workload/certs": { "get": { - "description": "证书列表", + "description": "Certificate list", "produces": [ "application/json" ], @@ -564,55 +564,55 @@ var doc = `{ "parameters": [ { "type": "string", - "description": "证书类型 gateway/sidecar/standalone", + "description": "Certificate type default", "name": "role", "in": "query" }, { "type": "string", - "description": "根据UniqueID查询", + "description": "Query by unique ID", "name": "unique_id", "in": "query" }, { "type": "string", - "description": "根据证书序列号查询", + "description": "Query by certificate serial number", "name": "cert_sn", "in": "query" }, { "type": "string", - "description": "证书状态 good/revoked", + "description": "Certificate status good/revoked", "name": "status", "in": "query" }, { "type": "string", - "description": "排序,默认 issued_at desc", + "description": "Sort, default issued_at desc", "name": "order", "in": "query" }, { "type": "string", - "description": "过期, 起始时间点", + "description": "Expiration, starting point", "name": "expiry_start_time", "in": "query" }, { "type": "string", - "description": "过期, 结束时间点", + "description": "Expiration, end time point", "name": "expiry_end_time", "in": "query" }, { "type": "integer", - "description": "分页参数, 默认 20", + "description": "Paging parameters, default 20", "name": "limit_num", "in": "query" }, { "type": "integer", - "description": "页数, 默认 1", + "description": "Number of pages, default 1", "name": "page", "in": "query" } @@ -668,14 +668,14 @@ var doc = `{ }, "/workload/lifecycle/forbid_new_certs": { "post": { - "description": "禁止某个 UniqueID 申请证书", + "description": "Prohibit a uniqueID from requesting a certificate", "produces": [ "application/json" ], "tags": [ "Workload" ], - "summary": "禁止申请证书", + "summary": "Application for certificate is prohibited", "parameters": [ { "description": " ", @@ -711,14 +711,14 @@ var doc = `{ }, "/workload/lifecycle/forbid_unit": { "post": { - "description": "吊销并禁止服务证书", + "description": "Revoke and prohibit service certificates", "produces": [ "application/json" ], "tags": [ "Workload" ], - "summary": "(p1)吊销并禁止服务证书", + "summary": "(p1)Revoke and prohibit service certificates", "parameters": [ { "description": " ", @@ -754,7 +754,7 @@ var doc = `{ }, "/workload/lifecycle/recover": { "post": { - "description": "恢复证书", + "description": "Restore certificate", "produces": [ "application/json" ], @@ -764,7 +764,7 @@ var doc = `{ "summary": "(p3)Recover", "parameters": [ { - "description": "sn+aki / unique_id 二选一", + "description": "sn+aki / unique_id either-or", "name": "body", "in": "body", "required": true, @@ -797,14 +797,14 @@ var doc = `{ }, "/workload/lifecycle/recover_forbid_new_certs": { "post": { - "description": "恢复允许某个 UniqueID 申请证书", + "description": "Recovery allows a uniqueID to request a certificate", "produces": [ "application/json" ], "tags": [ "Workload" ], - "summary": "恢复申请证书", + "summary": "Resume application certificate", "parameters": [ { "description": " ", @@ -840,14 +840,14 @@ var doc = `{ }, "/workload/lifecycle/recover_unit": { "post": { - "description": "恢复并允许服务证书", + "description": "Restore and allow service certificates", "produces": [ "application/json" ], "tags": [ "Workload" ], - "summary": "(p1)恢复并允许服务证书", + "summary": "(p1)Restore and allow service certificates", "parameters": [ { "description": " ", @@ -883,7 +883,7 @@ var doc = `{ }, "/workload/lifecycle/revoke": { "post": { - "description": "吊销证书", + "description": "revoked certificate", "produces": [ "application/json" ], @@ -893,7 +893,7 @@ var doc = `{ "summary": "(p3)Revoke", "parameters": [ { - "description": "sn+aki / unique_id 二选一", + "description": "sn+aki / unique_id pick one of two", "name": "body", "in": "body", "required": true, @@ -926,54 +926,54 @@ var doc = `{ }, "/workload/units_certs_list": { "get": { - "description": "服务证书列表", + "description": "List of service certificates", "produces": [ "application/json" ], "tags": [ "Workload" ], - "summary": "(p1)服务证书列表", + "summary": "(p1)List of service certificates", "parameters": [ { "type": "string", - "description": "查询 unique_id", + "description": "Query unique_id", "name": "unique_id", "in": "query" }, { "type": "string", - "description": "证书类型", + "description": "Certificate type", "name": "role", "in": "query" }, { "type": "string", - "description": "过期, 起始时间点", + "description": "Expiration, starting point", "name": "expiry_start_time", "in": "query" }, { "type": "string", - "description": "过期, 结束时间点", + "description": "Expiration, end time point", "name": "expiry_end_time", "in": "query" }, { "type": "integer", - "description": "是否禁用, 1禁用 2启用", + "description": "Disable, 1 disable, 2 enable", "name": "is_forbid", "in": "query" }, { "type": "integer", - "description": "分页参数, 默认 20", + "description": "Paging parameters, default 20", "name": "limit_num", "in": "query" }, { "type": "integer", - "description": "页数, 默认 1", + "description": "Number of pages, default 1", "name": "page", "in": "query" } @@ -1017,14 +1017,14 @@ var doc = `{ }, "/workload/units_forbid_query": { "get": { - "description": "查询 unique_id 是否被禁止申请证书", + "description": "Query unique_id Is it forbidden to apply for certificate", "produces": [ "application/json" ], "tags": [ "Workload" ], - "summary": "禁止申请证书查询", + "summary": "Prohibit applying for certificate query", "parameters": [ { "type": "array", @@ -1032,7 +1032,7 @@ var doc = `{ "type": "string" }, "collectionFormat": "multi", - "description": "查询 unique_id 数组", + "description": "Query unique_ID array", "name": "unique_ids", "in": "query", "required": true @@ -1074,17 +1074,17 @@ var doc = `{ }, "/workload/units_status": { "post": { - "description": "服务对应状态查询", + "description": "Service corresponding status query", "produces": [ "application/json" ], "tags": [ "Workload" ], - "summary": "(p1)服务对应状态查询", + "summary": "(p1)Service corresponding status query", "parameters": [ { - "description": "查询 unique_id 数组", + "description": "Query unique_ID array", "name": "json", "in": "body", "required": true, @@ -1156,15 +1156,14 @@ var doc = `{ "type": "object", "properties": { "role": { - "description": "类别", "type": "string" }, "total": { - "description": "证书总数", + "description": "Total number of certificates", "type": "integer" }, "units_count": { - "description": "服务数量", + "description": "number of services", "type": "integer" } } @@ -1260,11 +1259,11 @@ var doc = `{ "type": "object", "properties": { "first_issued_at": { - "description": "首次签发证书日期", + "description": "Date of first issuance of certificate", "type": "string" }, "forbidden": { - "description": "是否被禁止", + "description": "Is it prohibited", "type": "boolean" }, "role": { @@ -1274,7 +1273,7 @@ var doc = `{ "type": "string" }, "valid_num": { - "description": "有效证书数量", + "description": "Number of valid certificates", "type": "integer" } } @@ -1289,7 +1288,7 @@ var doc = `{ "$ref": "#/definitions/schema.Certificate" }, "cert_str": { - "description": "展示证书的详细信息", + "description": "Show certificate details", "type": "string" }, "cn": { @@ -1444,7 +1443,7 @@ var doc = `{ "$ref": "#/definitions/schema.Certificate" }, "cert_str": { - "description": "展示证书的详细信息", + "description": "Show certificate details", "type": "string" }, "cn": { diff --git a/docs/swagger.json b/docs/swagger.json index 82a8779..171b81a 100644 --- a/docs/swagger.json +++ b/docs/swagger.json @@ -6,14 +6,14 @@ "paths": { "/ca/intermediate_topology": { "get": { - "description": "子CA拓扑", + "description": "Sub-CA topology", "produces": [ "application/json" ], "tags": [ "CA" ], - "summary": "子CA拓扑", + "summary": "Sub-CA topology", "responses": { "200": { "description": " ", @@ -53,14 +53,14 @@ }, "/ca/overall_certs_count": { "get": { - "description": "证书总数、根据分类划分的数量、对应服务数量", + "description": "Total number of certificates, number by classification, number of corresponding services", "produces": [ "application/json" ], "tags": [ "CA" ], - "summary": "(p2)证书分类", + "summary": "(p2)Certificate classification", "responses": { "200": { "description": " ", @@ -97,14 +97,14 @@ }, "/ca/overall_expiry_certs": { "get": { - "description": "证书已过期数量, 一周内过期数量, 1/3个月内过期数量, 3个月后过期数量", + "description": "Number of certificates expired: within one week, within 1/3 months and after 3 months", "produces": [ "application/json" ], "tags": [ "CA" ], - "summary": "(p2)证书有效期", + "summary": "(p2)Certificate validity", "responses": { "200": { "description": " ", @@ -141,14 +141,14 @@ }, "/ca/overall_units_enable_status": { "get": { - "description": "已启用总数, 禁用总数, 对应服务数", + "description": "Total enabled, total disabled, corresponding services", "produces": [ "application/json" ], "tags": [ "CA" ], - "summary": "(p2)启用情况", + "summary": "(p2)Enabling condition", "responses": { "200": { "description": " ", @@ -185,18 +185,18 @@ }, "/ca/role_profiles": { "get": { - "description": "环境隔离类型", + "description": "Environmental isolation type", "produces": [ "application/json" ], "tags": [ "CA" ], - "summary": "(p1)环境隔离类型", + "summary": "(p1)Environmental isolation type", "parameters": [ { "type": "boolean", - "description": "只返回类型列表, 供搜索条件", + "description": "Only a list of types is returned for search criteria", "name": "short", "in": "query" } @@ -240,14 +240,14 @@ }, "/ca/upper_ca_intermediate_topology": { "get": { - "description": "上层CA拓扑", + "description": "Upper CA topology", "produces": [ "application/json" ], "tags": [ "CA" ], - "summary": "上层CA拓扑", + "summary": "Upper CA topology", "responses": { "200": { "description": " ", @@ -287,30 +287,30 @@ }, "/ca/workload_units": { "get": { - "description": "CA 下 Units", + "description": "CA Units", "produces": [ "application/json" ], "tags": [ "CA" ], - "summary": "(p1)服务单元", + "summary": "(p1)Service unit", "parameters": [ { "type": "integer", - "description": "页数, 默认1", + "description": "Number of pages, default 1", "name": "page", "in": "query" }, { "type": "integer", - "description": "页数限制, 默认20", + "description": "Page limit, default 20", "name": "limit_num", "in": "query" }, { "type": "string", - "description": "UniqueID 查询", + "description": "UniqueID Query", "name": "unique_id", "in": "query" } @@ -366,7 +366,7 @@ }, "/certleaf/cert_chain": { "get": { - "description": "获取证书链信息", + "description": "Get certificate chain information", "produces": [ "application/json" ], @@ -377,19 +377,19 @@ "parameters": [ { "type": "boolean", - "description": "展示 CA 自身证书链", + "description": "Show CA's own certificate chain", "name": "self_cert", "in": "query" }, { "type": "string", - "description": "SN+AKI 查询指定证书", + "description": "SN+AKI Query the specified certificate", "name": "sn", "in": "query" }, { "type": "string", - "description": "SN+AKI 查询指定证书", + "description": "SN+AKI Query the specified certificate", "name": "aki", "in": "query" } @@ -430,14 +430,14 @@ }, "/certleaf/cert_chain_from_root": { "get": { - "description": "Root视角下所有证书链", + "description": "All certificate chains from the root Perspective", "produces": [ "application/json" ], "tags": [ "certleaf" ], - "summary": "(p1)根视角证书链", + "summary": "(p1)Root view certificate chain", "responses": { "200": { "description": " ", @@ -474,7 +474,7 @@ }, "/workload/cert": { "get": { - "description": "证书详情", + "description": "Certificate details", "produces": [ "application/json" ], @@ -485,14 +485,14 @@ "parameters": [ { "type": "string", - "description": "证书 sn", + "description": "Certificate sn", "name": "sn", "in": "query", "required": true }, { "type": "string", - "description": "证书 aki", + "description": "Certificate aki", "name": "aki", "in": "query", "required": true @@ -534,7 +534,7 @@ }, "/workload/certs": { "get": { - "description": "证书列表", + "description": "Certificate list", "produces": [ "application/json" ], @@ -545,55 +545,55 @@ "parameters": [ { "type": "string", - "description": "证书类型 gateway/sidecar/standalone", + "description": "Certificate type default", "name": "role", "in": "query" }, { "type": "string", - "description": "根据UniqueID查询", + "description": "Query by unique ID", "name": "unique_id", "in": "query" }, { "type": "string", - "description": "根据证书序列号查询", + "description": "Query by certificate serial number", "name": "cert_sn", "in": "query" }, { "type": "string", - "description": "证书状态 good/revoked", + "description": "Certificate status good/revoked", "name": "status", "in": "query" }, { "type": "string", - "description": "排序,默认 issued_at desc", + "description": "Sort, default issued_at desc", "name": "order", "in": "query" }, { "type": "string", - "description": "过期, 起始时间点", + "description": "Expiration, starting point", "name": "expiry_start_time", "in": "query" }, { "type": "string", - "description": "过期, 结束时间点", + "description": "Expiration, end time point", "name": "expiry_end_time", "in": "query" }, { "type": "integer", - "description": "分页参数, 默认 20", + "description": "Paging parameters, default 20", "name": "limit_num", "in": "query" }, { "type": "integer", - "description": "页数, 默认 1", + "description": "Number of pages, default 1", "name": "page", "in": "query" } @@ -649,14 +649,14 @@ }, "/workload/lifecycle/forbid_new_certs": { "post": { - "description": "禁止某个 UniqueID 申请证书", + "description": "Prohibit a uniqueID from requesting a certificate", "produces": [ "application/json" ], "tags": [ "Workload" ], - "summary": "禁止申请证书", + "summary": "Application for certificate is prohibited", "parameters": [ { "description": " ", @@ -692,14 +692,14 @@ }, "/workload/lifecycle/forbid_unit": { "post": { - "description": "吊销并禁止服务证书", + "description": "Revoke and prohibit service certificates", "produces": [ "application/json" ], "tags": [ "Workload" ], - "summary": "(p1)吊销并禁止服务证书", + "summary": "(p1)Revoke and prohibit service certificates", "parameters": [ { "description": " ", @@ -735,7 +735,7 @@ }, "/workload/lifecycle/recover": { "post": { - "description": "恢复证书", + "description": "Restore certificate", "produces": [ "application/json" ], @@ -745,7 +745,7 @@ "summary": "(p3)Recover", "parameters": [ { - "description": "sn+aki / unique_id 二选一", + "description": "sn+aki / unique_id either-or", "name": "body", "in": "body", "required": true, @@ -778,14 +778,14 @@ }, "/workload/lifecycle/recover_forbid_new_certs": { "post": { - "description": "恢复允许某个 UniqueID 申请证书", + "description": "Recovery allows a uniqueID to request a certificate", "produces": [ "application/json" ], "tags": [ "Workload" ], - "summary": "恢复申请证书", + "summary": "Resume application certificate", "parameters": [ { "description": " ", @@ -821,14 +821,14 @@ }, "/workload/lifecycle/recover_unit": { "post": { - "description": "恢复并允许服务证书", + "description": "Restore and allow service certificates", "produces": [ "application/json" ], "tags": [ "Workload" ], - "summary": "(p1)恢复并允许服务证书", + "summary": "(p1)Restore and allow service certificates", "parameters": [ { "description": " ", @@ -864,7 +864,7 @@ }, "/workload/lifecycle/revoke": { "post": { - "description": "吊销证书", + "description": "revoked certificate", "produces": [ "application/json" ], @@ -874,7 +874,7 @@ "summary": "(p3)Revoke", "parameters": [ { - "description": "sn+aki / unique_id 二选一", + "description": "sn+aki / unique_id pick one of two", "name": "body", "in": "body", "required": true, @@ -907,54 +907,54 @@ }, "/workload/units_certs_list": { "get": { - "description": "服务证书列表", + "description": "List of service certificates", "produces": [ "application/json" ], "tags": [ "Workload" ], - "summary": "(p1)服务证书列表", + "summary": "(p1)List of service certificates", "parameters": [ { "type": "string", - "description": "查询 unique_id", + "description": "Query unique_id", "name": "unique_id", "in": "query" }, { "type": "string", - "description": "证书类型", + "description": "Certificate type", "name": "role", "in": "query" }, { "type": "string", - "description": "过期, 起始时间点", + "description": "Expiration, starting point", "name": "expiry_start_time", "in": "query" }, { "type": "string", - "description": "过期, 结束时间点", + "description": "Expiration, end time point", "name": "expiry_end_time", "in": "query" }, { "type": "integer", - "description": "是否禁用, 1禁用 2启用", + "description": "Disable, 1 disable, 2 enable", "name": "is_forbid", "in": "query" }, { "type": "integer", - "description": "分页参数, 默认 20", + "description": "Paging parameters, default 20", "name": "limit_num", "in": "query" }, { "type": "integer", - "description": "页数, 默认 1", + "description": "Number of pages, default 1", "name": "page", "in": "query" } @@ -998,14 +998,14 @@ }, "/workload/units_forbid_query": { "get": { - "description": "查询 unique_id 是否被禁止申请证书", + "description": "Query unique_id Is it forbidden to apply for certificate", "produces": [ "application/json" ], "tags": [ "Workload" ], - "summary": "禁止申请证书查询", + "summary": "Prohibit applying for certificate query", "parameters": [ { "type": "array", @@ -1013,7 +1013,7 @@ "type": "string" }, "collectionFormat": "multi", - "description": "查询 unique_id 数组", + "description": "Query unique_ID array", "name": "unique_ids", "in": "query", "required": true @@ -1055,17 +1055,17 @@ }, "/workload/units_status": { "post": { - "description": "服务对应状态查询", + "description": "Service corresponding status query", "produces": [ "application/json" ], "tags": [ "Workload" ], - "summary": "(p1)服务对应状态查询", + "summary": "(p1)Service corresponding status query", "parameters": [ { - "description": "查询 unique_id 数组", + "description": "Query unique_ID array", "name": "json", "in": "body", "required": true, @@ -1137,15 +1137,14 @@ "type": "object", "properties": { "role": { - "description": "类别", "type": "string" }, "total": { - "description": "证书总数", + "description": "Total number of certificates", "type": "integer" }, "units_count": { - "description": "服务数量", + "description": "number of services", "type": "integer" } } @@ -1241,11 +1240,11 @@ "type": "object", "properties": { "first_issued_at": { - "description": "首次签发证书日期", + "description": "Date of first issuance of certificate", "type": "string" }, "forbidden": { - "description": "是否被禁止", + "description": "Is it prohibited", "type": "boolean" }, "role": { @@ -1255,7 +1254,7 @@ "type": "string" }, "valid_num": { - "description": "有效证书数量", + "description": "Number of valid certificates", "type": "integer" } } @@ -1270,7 +1269,7 @@ "$ref": "#/definitions/schema.Certificate" }, "cert_str": { - "description": "展示证书的详细信息", + "description": "Show certificate details", "type": "string" }, "cn": { @@ -1425,7 +1424,7 @@ "$ref": "#/definitions/schema.Certificate" }, "cert_str": { - "description": "展示证书的详细信息", + "description": "Show certificate details", "type": "string" }, "cn": { diff --git a/docs/swagger.yaml b/docs/swagger.yaml index d4fc9a7..c37b12a 100644 --- a/docs/swagger.yaml +++ b/docs/swagger.yaml @@ -17,13 +17,12 @@ definitions: ca.OverallCertsCountItem: properties: role: - description: 类别 type: string total: - description: 证书总数 + description: Total number of certificates type: integer units_count: - description: 服务数量 + description: number of services type: integer type: object ca.OverallCertsCountResponse: @@ -85,17 +84,17 @@ definitions: ca.WorkloadUnit: properties: first_issued_at: - description: 首次签发证书日期 + description: Date of first issuance of certificate type: string forbidden: - description: 是否被禁止 + description: Is it prohibited type: boolean role: type: string unique_id: type: string valid_num: - description: 有效证书数量 + description: Number of valid certificates type: integer type: object certleaf.LeafCert: @@ -105,7 +104,7 @@ definitions: cert_info: $ref: '#/definitions/schema.Certificate' cert_str: - description: 展示证书的详细信息 + description: Show certificate details type: string cn: type: string @@ -207,7 +206,7 @@ definitions: cert_info: $ref: '#/definitions/schema.Certificate' cert_str: - description: 展示证书的详细信息 + description: Show certificate details type: string cn: type: string @@ -341,7 +340,7 @@ info: paths: /ca/intermediate_topology: get: - description: 子CA拓扑 + description: Sub-CA topology produces: - application/json responses: @@ -364,12 +363,13 @@ paths: description: Internal Server Error schema: $ref: '#/definitions/helper.HTTPWrapErrorResponse' - summary: 子CA拓扑 + summary: Sub-CA topology tags: - CA /ca/overall_certs_count: get: - description: 证书总数、根据分类划分的数量、对应服务数量 + description: Total number of certificates, number by classification, number + of corresponding services produces: - application/json responses: @@ -390,12 +390,13 @@ paths: description: Internal Server Error schema: $ref: '#/definitions/helper.HTTPWrapErrorResponse' - summary: (p2)证书分类 + summary: (p2)Certificate classification tags: - CA /ca/overall_expiry_certs: get: - description: 证书已过期数量, 一周内过期数量, 1/3个月内过期数量, 3个月后过期数量 + description: 'Number of certificates expired: within one week, within 1/3 months + and after 3 months' produces: - application/json responses: @@ -416,12 +417,12 @@ paths: description: Internal Server Error schema: $ref: '#/definitions/helper.HTTPWrapErrorResponse' - summary: (p2)证书有效期 + summary: (p2)Certificate validity tags: - CA /ca/overall_units_enable_status: get: - description: 已启用总数, 禁用总数, 对应服务数 + description: Total enabled, total disabled, corresponding services produces: - application/json responses: @@ -442,14 +443,14 @@ paths: description: Internal Server Error schema: $ref: '#/definitions/helper.HTTPWrapErrorResponse' - summary: (p2)启用情况 + summary: (p2)Enabling condition tags: - CA /ca/role_profiles: get: - description: 环境隔离类型 + description: Environmental isolation type parameters: - - description: 只返回类型列表, 供搜索条件 + - description: Only a list of types is returned for search criteria in: query name: short type: boolean @@ -475,12 +476,12 @@ paths: description: Internal Server Error schema: $ref: '#/definitions/helper.HTTPWrapErrorResponse' - summary: (p1)环境隔离类型 + summary: (p1)Environmental isolation type tags: - CA /ca/upper_ca_intermediate_topology: get: - description: 上层CA拓扑 + description: Upper CA topology produces: - application/json responses: @@ -503,22 +504,22 @@ paths: description: Internal Server Error schema: $ref: '#/definitions/helper.HTTPWrapErrorResponse' - summary: 上层CA拓扑 + summary: Upper CA topology tags: - CA /ca/workload_units: get: - description: CA 下 Units + description: CA Units parameters: - - description: 页数, 默认1 + - description: Number of pages, default 1 in: query name: page type: integer - - description: 页数限制, 默认20 + - description: Page limit, default 20 in: query name: limit_num type: integer - - description: UniqueID 查询 + - description: UniqueID Query in: query name: unique_id type: string @@ -549,22 +550,22 @@ paths: description: Internal Server Error schema: $ref: '#/definitions/helper.HTTPWrapErrorResponse' - summary: (p1)服务单元 + summary: (p1)Service unit tags: - CA /certleaf/cert_chain: get: - description: 获取证书链信息 + description: Get certificate chain information parameters: - - description: 展示 CA 自身证书链 + - description: Show CA's own certificate chain in: query name: self_cert type: boolean - - description: SN+AKI 查询指定证书 + - description: SN+AKI Query the specified certificate in: query name: sn type: string - - description: SN+AKI 查询指定证书 + - description: SN+AKI Query the specified certificate in: query name: aki type: string @@ -593,7 +594,7 @@ paths: - certleaf /certleaf/cert_chain_from_root: get: - description: Root视角下所有证书链 + description: All certificate chains from the root Perspective produces: - application/json responses: @@ -614,19 +615,19 @@ paths: description: Internal Server Error schema: $ref: '#/definitions/helper.HTTPWrapErrorResponse' - summary: (p1)根视角证书链 + summary: (p1)Root view certificate chain tags: - certleaf /workload/cert: get: - description: 证书详情 + description: Certificate details parameters: - - description: 证书 sn + - description: Certificate sn in: query name: sn required: true type: string - - description: 证书 aki + - description: Certificate aki in: query name: aki required: true @@ -656,41 +657,41 @@ paths: - Workload /workload/certs: get: - description: 证书列表 + description: Certificate list parameters: - - description: 证书类型 gateway/sidecar/standalone + - description: Certificate type default in: query name: role type: string - - description: 根据UniqueID查询 + - description: Query by unique ID in: query name: unique_id type: string - - description: 根据证书序列号查询 + - description: Query by certificate serial number in: query name: cert_sn type: string - - description: 证书状态 good/revoked + - description: Certificate status good/revoked in: query name: status type: string - - description: 排序,默认 issued_at desc + - description: Sort, default issued_at desc in: query name: order type: string - - description: 过期, 起始时间点 + - description: Expiration, starting point in: query name: expiry_start_time type: string - - description: 过期, 结束时间点 + - description: Expiration, end time point in: query name: expiry_end_time type: string - - description: 分页参数, 默认 20 + - description: Paging parameters, default 20 in: query name: limit_num type: integer - - description: 页数, 默认 1 + - description: Number of pages, default 1 in: query name: page type: integer @@ -726,7 +727,7 @@ paths: - Workload /workload/lifecycle/forbid_new_certs: post: - description: 禁止某个 UniqueID 申请证书 + description: Prohibit a uniqueID from requesting a certificate parameters: - description: ' ' in: body @@ -749,12 +750,12 @@ paths: description: Internal Server Error schema: $ref: '#/definitions/helper.HTTPWrapErrorResponse' - summary: 禁止申请证书 + summary: Application for certificate is prohibited tags: - Workload /workload/lifecycle/forbid_unit: post: - description: 吊销并禁止服务证书 + description: Revoke and prohibit service certificates parameters: - description: ' ' in: body @@ -777,14 +778,14 @@ paths: description: Internal Server Error schema: $ref: '#/definitions/helper.HTTPWrapErrorResponse' - summary: (p1)吊销并禁止服务证书 + summary: (p1)Revoke and prohibit service certificates tags: - Workload /workload/lifecycle/recover: post: - description: 恢复证书 + description: Restore certificate parameters: - - description: sn+aki / unique_id 二选一 + - description: sn+aki / unique_id either-or in: body name: body required: true @@ -810,7 +811,7 @@ paths: - Workload /workload/lifecycle/recover_forbid_new_certs: post: - description: 恢复允许某个 UniqueID 申请证书 + description: Recovery allows a uniqueID to request a certificate parameters: - description: ' ' in: body @@ -833,12 +834,12 @@ paths: description: Internal Server Error schema: $ref: '#/definitions/helper.HTTPWrapErrorResponse' - summary: 恢复申请证书 + summary: Resume application certificate tags: - Workload /workload/lifecycle/recover_unit: post: - description: 恢复并允许服务证书 + description: Restore and allow service certificates parameters: - description: ' ' in: body @@ -861,14 +862,14 @@ paths: description: Internal Server Error schema: $ref: '#/definitions/helper.HTTPWrapErrorResponse' - summary: (p1)恢复并允许服务证书 + summary: (p1)Restore and allow service certificates tags: - Workload /workload/lifecycle/revoke: post: - description: 吊销证书 + description: revoked certificate parameters: - - description: sn+aki / unique_id 二选一 + - description: sn+aki / unique_id pick one of two in: body name: body required: true @@ -894,33 +895,33 @@ paths: - Workload /workload/units_certs_list: get: - description: 服务证书列表 + description: List of service certificates parameters: - - description: 查询 unique_id + - description: Query unique_id in: query name: unique_id type: string - - description: 证书类型 + - description: Certificate type in: query name: role type: string - - description: 过期, 起始时间点 + - description: Expiration, starting point in: query name: expiry_start_time type: string - - description: 过期, 结束时间点 + - description: Expiration, end time point in: query name: expiry_end_time type: string - - description: 是否禁用, 1禁用 2启用 + - description: Disable, 1 disable, 2 enable in: query name: is_forbid type: integer - - description: 分页参数, 默认 20 + - description: Paging parameters, default 20 in: query name: limit_num type: integer - - description: 页数, 默认 1 + - description: Number of pages, default 1 in: query name: page type: integer @@ -946,15 +947,15 @@ paths: description: Internal Server Error schema: $ref: '#/definitions/helper.HTTPWrapErrorResponse' - summary: (p1)服务证书列表 + summary: (p1)List of service certificates tags: - Workload /workload/units_forbid_query: get: - description: 查询 unique_id 是否被禁止申请证书 + description: Query unique_id Is it forbidden to apply for certificate parameters: - collectionFormat: multi - description: 查询 unique_id 数组 + description: Query unique_ID array in: query items: type: string @@ -981,14 +982,14 @@ paths: description: Internal Server Error schema: $ref: '#/definitions/helper.HTTPWrapErrorResponse' - summary: 禁止申请证书查询 + summary: Prohibit applying for certificate query tags: - Workload /workload/units_status: post: - description: 服务对应状态查询 + description: Service corresponding status query parameters: - - description: 查询 unique_id 数组 + - description: Query unique_ID array in: body name: json required: true @@ -1014,7 +1015,7 @@ paths: description: Internal Server Error schema: $ref: '#/definitions/helper.HTTPWrapErrorResponse' - summary: (p1)服务对应状态查询 + summary: (p1)Service corresponding status query tags: - Workload swagger: "2.0" diff --git a/examples/cfssl-model/dao/certificates.go b/examples/cfssl-model/dao/certificates.go deleted file mode 100644 index 4d9d952..0000000 --- a/examples/cfssl-model/dao/certificates.go +++ /dev/null @@ -1,111 +0,0 @@ -package dao - -import ( - "context" - "time" - - "gitlab.oneitfarm.com/bifrost/capitalizone/examples/cfssl-model/model" - - "github.com/guregu/null" - "github.com/satori/go.uuid" -) - -var ( - _ = time.Second - _ = null.Bool{} - _ = uuid.UUID{} -) - -// GetAllCertificates is a function to get a slice of record(s) from certificates table in the cap database -// params - page - page requested (defaults to 0) -// params - pagesize - number of records in a page (defaults to 20) -// params - order - db sort order column -// error - ErrNotFound, db Find error -func GetAllCertificates(ctx context.Context, page, pagesize int64, order string) (results []*model.Certificates, totalRows int, err error) { - - resultOrm := DB.Model(&model.Certificates{}) - resultOrm.Count(&totalRows) - - if page > 0 { - offset := (page - 1) * pagesize - resultOrm = resultOrm.Offset(offset).Limit(pagesize) - } else { - resultOrm = resultOrm.Limit(pagesize) - } - - if order != "" { - resultOrm = resultOrm.Order(order) - } - - if err = resultOrm.Find(&results).Error; err != nil { - err = ErrNotFound - return nil, -1, err - } - - return results, totalRows, nil -} - -// GetCertificates is a function to get a single record from the certificates table in the cap database -// error - ErrNotFound, db Find error -func GetCertificates(ctx context.Context, argSerialNumber string, argAuthorityKeyIdentifier string) (record *model.Certificates, err error) { - record = &model.Certificates{} - if err = DB.First(record, argSerialNumber, argAuthorityKeyIdentifier).Error; err != nil { - err = ErrNotFound - return record, err - } - - return record, nil -} - -// AddCertificates is a function to add a single record to certificates table in the cap database -// error - ErrInsertFailed, db save call failed -func AddCertificates(ctx context.Context, record *model.Certificates) (result *model.Certificates, RowsAffected int64, err error) { - db := DB.Save(record) - if err = db.Error; err != nil { - return nil, -1, ErrInsertFailed - } - - return record, db.RowsAffected, nil -} - -// UpdateCertificates is a function to update a single record from certificates table in the cap database -// error - ErrNotFound, db record for id not found -// error - ErrUpdateFailed, db meta data copy failed or db.Save call failed -func UpdateCertificates(ctx context.Context, argSerialNumber string, argAuthorityKeyIdentifier string, updated *model.Certificates) (result *model.Certificates, RowsAffected int64, err error) { - - result = &model.Certificates{} - db := DB.First(result, argSerialNumber, argAuthorityKeyIdentifier) - if err = db.Error; err != nil { - return nil, -1, ErrNotFound - } - - if err = Copy(result, updated); err != nil { - return nil, -1, ErrUpdateFailed - } - - db = db.Save(result) - if err = db.Error; err != nil { - return nil, -1, ErrUpdateFailed - } - - return result, db.RowsAffected, nil -} - -// DeleteCertificates is a function to delete a single record from certificates table in the cap database -// error - ErrNotFound, db Find error -// error - ErrDeleteFailed, db Delete failed error -func DeleteCertificates(ctx context.Context, argSerialNumber string, argAuthorityKeyIdentifier string) (rowsAffected int64, err error) { - - record := &model.Certificates{} - db := DB.First(record, argSerialNumber, argAuthorityKeyIdentifier) - if db.Error != nil { - return -1, ErrNotFound - } - - db = db.Delete(record) - if err = db.Error; err != nil { - return -1, ErrDeleteFailed - } - - return db.RowsAffected, nil -} diff --git a/examples/cfssl-model/dao/dao_base.go b/examples/cfssl-model/dao/dao_base.go deleted file mode 100644 index 4125292..0000000 --- a/examples/cfssl-model/dao/dao_base.go +++ /dev/null @@ -1,90 +0,0 @@ -package dao - -import ( - "context" - "errors" - "fmt" - "reflect" - - "github.com/jinzhu/gorm" -) - -// BuildInfo is used to define the application build info, and inject values into via the build process. -type BuildInfo struct { - - // BuildDate date string of when build was performed filled in by -X compile flag - BuildDate string - - // LatestCommit date string of when build was performed filled in by -X compile flag - LatestCommit string - - // BuildNumber date string of when build was performed filled in by -X compile flag - BuildNumber string - - // BuiltOnIP date string of when build was performed filled in by -X compile flag - BuiltOnIP string - - // BuiltOnOs date string of when build was performed filled in by -X compile flag - BuiltOnOs string - - // RuntimeVer date string of when build was performed filled in by -X compile flag - RuntimeVer string -} - -type LogSql func(ctx context.Context, sql string) - -var ( - // ErrNotFound error when record not found - ErrNotFound = fmt.Errorf("record Not Found") - - // ErrUnableToMarshalJSON error when json payload corrupt - ErrUnableToMarshalJSON = fmt.Errorf("json payload corrupt") - - // ErrUpdateFailed error when update fails - ErrUpdateFailed = fmt.Errorf("db update error") - - // ErrInsertFailed error when insert fails - ErrInsertFailed = fmt.Errorf("db insert error") - - // ErrDeleteFailed error when delete fails - ErrDeleteFailed = fmt.Errorf("db delete error") - - // ErrBadParams error when bad params passed in - ErrBadParams = fmt.Errorf("bad params error") - - // DB reference to database - DB *gorm.DB - - // AppBuildInfo reference to build info - AppBuildInfo *BuildInfo - - // Logger function that will be invoked before executing sql - Logger LogSql -) - -// Copy a src struct into a destination struct -func Copy(dst interface{}, src interface{}) error { - dstV := reflect.Indirect(reflect.ValueOf(dst)) - srcV := reflect.Indirect(reflect.ValueOf(src)) - - if !dstV.CanAddr() { - return errors.New("copy to value is unaddressable") - } - - if srcV.Type() != dstV.Type() { - return errors.New("different types can be copied") - } - - for i := 0; i < dstV.NumField(); i++ { - f := srcV.Field(i) - if !isZeroOfUnderlyingType(f.Interface()) { - dstV.Field(i).Set(f) - } - } - - return nil -} - -func isZeroOfUnderlyingType(x interface{}) bool { - return x == nil || reflect.DeepEqual(x, reflect.Zero(reflect.TypeOf(x)).Interface()) -} diff --git a/examples/cfssl-model/dao/forbid.go b/examples/cfssl-model/dao/forbid.go deleted file mode 100644 index e4730a7..0000000 --- a/examples/cfssl-model/dao/forbid.go +++ /dev/null @@ -1,111 +0,0 @@ -package dao - -import ( - "context" - "time" - - "gitlab.oneitfarm.com/bifrost/capitalizone/examples/cfssl-model/model" - - "github.com/guregu/null" - "github.com/satori/go.uuid" -) - -var ( - _ = time.Second - _ = null.Bool{} - _ = uuid.UUID{} -) - -// GetAllForbid is a function to get a slice of record(s) from forbid table in the cap database -// params - page - page requested (defaults to 0) -// params - pagesize - number of records in a page (defaults to 20) -// params - order - db sort order column -// error - ErrNotFound, db Find error -func GetAllForbid(ctx context.Context, page, pagesize int64, order string) (results []*model.Forbid, totalRows int, err error) { - - resultOrm := DB.Model(&model.Forbid{}) - resultOrm.Count(&totalRows) - - if page > 0 { - offset := (page - 1) * pagesize - resultOrm = resultOrm.Offset(offset).Limit(pagesize) - } else { - resultOrm = resultOrm.Limit(pagesize) - } - - if order != "" { - resultOrm = resultOrm.Order(order) - } - - if err = resultOrm.Find(&results).Error; err != nil { - err = ErrNotFound - return nil, -1, err - } - - return results, totalRows, nil -} - -// GetForbid is a function to get a single record from the forbid table in the cap database -// error - ErrNotFound, db Find error -func GetForbid(ctx context.Context, argId uint32) (record *model.Forbid, err error) { - record = &model.Forbid{} - if err = DB.First(record, argId).Error; err != nil { - err = ErrNotFound - return record, err - } - - return record, nil -} - -// AddForbid is a function to add a single record to forbid table in the cap database -// error - ErrInsertFailed, db save call failed -func AddForbid(ctx context.Context, record *model.Forbid) (result *model.Forbid, RowsAffected int64, err error) { - db := DB.Save(record) - if err = db.Error; err != nil { - return nil, -1, ErrInsertFailed - } - - return record, db.RowsAffected, nil -} - -// UpdateForbid is a function to update a single record from forbid table in the cap database -// error - ErrNotFound, db record for id not found -// error - ErrUpdateFailed, db meta data copy failed or db.Save call failed -func UpdateForbid(ctx context.Context, argId uint32, updated *model.Forbid) (result *model.Forbid, RowsAffected int64, err error) { - - result = &model.Forbid{} - db := DB.First(result, argId) - if err = db.Error; err != nil { - return nil, -1, ErrNotFound - } - - if err = Copy(result, updated); err != nil { - return nil, -1, ErrUpdateFailed - } - - db = db.Save(result) - if err = db.Error; err != nil { - return nil, -1, ErrUpdateFailed - } - - return result, db.RowsAffected, nil -} - -// DeleteForbid is a function to delete a single record from forbid table in the cap database -// error - ErrNotFound, db Find error -// error - ErrDeleteFailed, db Delete failed error -func DeleteForbid(ctx context.Context, argId uint32) (rowsAffected int64, err error) { - - record := &model.Forbid{} - db := DB.First(record, argId) - if db.Error != nil { - return -1, ErrNotFound - } - - db = db.Delete(record) - if err = db.Error; err != nil { - return -1, ErrDeleteFailed - } - - return db.RowsAffected, nil -} diff --git a/examples/cfssl-model/dao/ocsp_responses.go b/examples/cfssl-model/dao/ocsp_responses.go deleted file mode 100644 index 69924d0..0000000 --- a/examples/cfssl-model/dao/ocsp_responses.go +++ /dev/null @@ -1,111 +0,0 @@ -package dao - -import ( - "context" - "time" - - "gitlab.oneitfarm.com/bifrost/capitalizone/examples/cfssl-model/model" - - "github.com/guregu/null" - "github.com/satori/go.uuid" -) - -var ( - _ = time.Second - _ = null.Bool{} - _ = uuid.UUID{} -) - -// GetAllOcspResponses is a function to get a slice of record(s) from ocsp_responses table in the cap database -// params - page - page requested (defaults to 0) -// params - pagesize - number of records in a page (defaults to 20) -// params - order - db sort order column -// error - ErrNotFound, db Find error -func GetAllOcspResponses(ctx context.Context, page, pagesize int64, order string) (results []*model.OcspResponses, totalRows int, err error) { - - resultOrm := DB.Model(&model.OcspResponses{}) - resultOrm.Count(&totalRows) - - if page > 0 { - offset := (page - 1) * pagesize - resultOrm = resultOrm.Offset(offset).Limit(pagesize) - } else { - resultOrm = resultOrm.Limit(pagesize) - } - - if order != "" { - resultOrm = resultOrm.Order(order) - } - - if err = resultOrm.Find(&results).Error; err != nil { - err = ErrNotFound - return nil, -1, err - } - - return results, totalRows, nil -} - -// GetOcspResponses is a function to get a single record from the ocsp_responses table in the cap database -// error - ErrNotFound, db Find error -func GetOcspResponses(ctx context.Context, argSerialNumber string, argAuthorityKeyIdentifier string) (record *model.OcspResponses, err error) { - record = &model.OcspResponses{} - if err = DB.First(record, argSerialNumber, argAuthorityKeyIdentifier).Error; err != nil { - err = ErrNotFound - return record, err - } - - return record, nil -} - -// AddOcspResponses is a function to add a single record to ocsp_responses table in the cap database -// error - ErrInsertFailed, db save call failed -func AddOcspResponses(ctx context.Context, record *model.OcspResponses) (result *model.OcspResponses, RowsAffected int64, err error) { - db := DB.Save(record) - if err = db.Error; err != nil { - return nil, -1, ErrInsertFailed - } - - return record, db.RowsAffected, nil -} - -// UpdateOcspResponses is a function to update a single record from ocsp_responses table in the cap database -// error - ErrNotFound, db record for id not found -// error - ErrUpdateFailed, db meta data copy failed or db.Save call failed -func UpdateOcspResponses(ctx context.Context, argSerialNumber string, argAuthorityKeyIdentifier string, updated *model.OcspResponses) (result *model.OcspResponses, RowsAffected int64, err error) { - - result = &model.OcspResponses{} - db := DB.First(result, argSerialNumber, argAuthorityKeyIdentifier) - if err = db.Error; err != nil { - return nil, -1, ErrNotFound - } - - if err = Copy(result, updated); err != nil { - return nil, -1, ErrUpdateFailed - } - - db = db.Save(result) - if err = db.Error; err != nil { - return nil, -1, ErrUpdateFailed - } - - return result, db.RowsAffected, nil -} - -// DeleteOcspResponses is a function to delete a single record from ocsp_responses table in the cap database -// error - ErrNotFound, db Find error -// error - ErrDeleteFailed, db Delete failed error -func DeleteOcspResponses(ctx context.Context, argSerialNumber string, argAuthorityKeyIdentifier string) (rowsAffected int64, err error) { - - record := &model.OcspResponses{} - db := DB.First(record, argSerialNumber, argAuthorityKeyIdentifier) - if db.Error != nil { - return -1, ErrNotFound - } - - db = db.Delete(record) - if err = db.Error; err != nil { - return -1, ErrDeleteFailed - } - - return db.RowsAffected, nil -} diff --git a/examples/cfssl-model/dao/schema_migrations.go b/examples/cfssl-model/dao/schema_migrations.go deleted file mode 100644 index 35afff0..0000000 --- a/examples/cfssl-model/dao/schema_migrations.go +++ /dev/null @@ -1,111 +0,0 @@ -package dao - -import ( - "context" - "time" - - "gitlab.oneitfarm.com/bifrost/capitalizone/examples/cfssl-model/model" - - "github.com/guregu/null" - "github.com/satori/go.uuid" -) - -var ( - _ = time.Second - _ = null.Bool{} - _ = uuid.UUID{} -) - -// GetAllSchemaMigrations is a function to get a slice of record(s) from schema_migrations table in the cap database -// params - page - page requested (defaults to 0) -// params - pagesize - number of records in a page (defaults to 20) -// params - order - db sort order column -// error - ErrNotFound, db Find error -func GetAllSchemaMigrations(ctx context.Context, page, pagesize int64, order string) (results []*model.SchemaMigrations, totalRows int, err error) { - - resultOrm := DB.Model(&model.SchemaMigrations{}) - resultOrm.Count(&totalRows) - - if page > 0 { - offset := (page - 1) * pagesize - resultOrm = resultOrm.Offset(offset).Limit(pagesize) - } else { - resultOrm = resultOrm.Limit(pagesize) - } - - if order != "" { - resultOrm = resultOrm.Order(order) - } - - if err = resultOrm.Find(&results).Error; err != nil { - err = ErrNotFound - return nil, -1, err - } - - return results, totalRows, nil -} - -// GetSchemaMigrations is a function to get a single record from the schema_migrations table in the cap database -// error - ErrNotFound, db Find error -func GetSchemaMigrations(ctx context.Context, argVersion int64) (record *model.SchemaMigrations, err error) { - record = &model.SchemaMigrations{} - if err = DB.First(record, argVersion).Error; err != nil { - err = ErrNotFound - return record, err - } - - return record, nil -} - -// AddSchemaMigrations is a function to add a single record to schema_migrations table in the cap database -// error - ErrInsertFailed, db save call failed -func AddSchemaMigrations(ctx context.Context, record *model.SchemaMigrations) (result *model.SchemaMigrations, RowsAffected int64, err error) { - db := DB.Save(record) - if err = db.Error; err != nil { - return nil, -1, ErrInsertFailed - } - - return record, db.RowsAffected, nil -} - -// UpdateSchemaMigrations is a function to update a single record from schema_migrations table in the cap database -// error - ErrNotFound, db record for id not found -// error - ErrUpdateFailed, db meta data copy failed or db.Save call failed -func UpdateSchemaMigrations(ctx context.Context, argVersion int64, updated *model.SchemaMigrations) (result *model.SchemaMigrations, RowsAffected int64, err error) { - - result = &model.SchemaMigrations{} - db := DB.First(result, argVersion) - if err = db.Error; err != nil { - return nil, -1, ErrNotFound - } - - if err = Copy(result, updated); err != nil { - return nil, -1, ErrUpdateFailed - } - - db = db.Save(result) - if err = db.Error; err != nil { - return nil, -1, ErrUpdateFailed - } - - return result, db.RowsAffected, nil -} - -// DeleteSchemaMigrations is a function to delete a single record from schema_migrations table in the cap database -// error - ErrNotFound, db Find error -// error - ErrDeleteFailed, db Delete failed error -func DeleteSchemaMigrations(ctx context.Context, argVersion int64) (rowsAffected int64, err error) { - - record := &model.SchemaMigrations{} - db := DB.First(record, argVersion) - if db.Error != nil { - return -1, ErrNotFound - } - - db = db.Delete(record) - if err = db.Error; err != nil { - return -1, ErrDeleteFailed - } - - return db.RowsAffected, nil -} diff --git a/examples/cfssl-model/dao/self_keypair.go b/examples/cfssl-model/dao/self_keypair.go deleted file mode 100644 index edaeff3..0000000 --- a/examples/cfssl-model/dao/self_keypair.go +++ /dev/null @@ -1,111 +0,0 @@ -package dao - -import ( - "context" - "time" - - "gitlab.oneitfarm.com/bifrost/capitalizone/examples/cfssl-model/model" - - "github.com/guregu/null" - "github.com/satori/go.uuid" -) - -var ( - _ = time.Second - _ = null.Bool{} - _ = uuid.UUID{} -) - -// GetAllSelfKeypair is a function to get a slice of record(s) from self_keypair table in the cap database -// params - page - page requested (defaults to 0) -// params - pagesize - number of records in a page (defaults to 20) -// params - order - db sort order column -// error - ErrNotFound, db Find error -func GetAllSelfKeypair(ctx context.Context, page, pagesize int64, order string) (results []*model.SelfKeypair, totalRows int, err error) { - - resultOrm := DB.Model(&model.SelfKeypair{}) - resultOrm.Count(&totalRows) - - if page > 0 { - offset := (page - 1) * pagesize - resultOrm = resultOrm.Offset(offset).Limit(pagesize) - } else { - resultOrm = resultOrm.Limit(pagesize) - } - - if order != "" { - resultOrm = resultOrm.Order(order) - } - - if err = resultOrm.Find(&results).Error; err != nil { - err = ErrNotFound - return nil, -1, err - } - - return results, totalRows, nil -} - -// GetSelfKeypair is a function to get a single record from the self_keypair table in the cap database -// error - ErrNotFound, db Find error -func GetSelfKeypair(ctx context.Context, argId uint32) (record *model.SelfKeypair, err error) { - record = &model.SelfKeypair{} - if err = DB.First(record, argId).Error; err != nil { - err = ErrNotFound - return record, err - } - - return record, nil -} - -// AddSelfKeypair is a function to add a single record to self_keypair table in the cap database -// error - ErrInsertFailed, db save call failed -func AddSelfKeypair(ctx context.Context, record *model.SelfKeypair) (result *model.SelfKeypair, RowsAffected int64, err error) { - db := DB.Save(record) - if err = db.Error; err != nil { - return nil, -1, ErrInsertFailed - } - - return record, db.RowsAffected, nil -} - -// UpdateSelfKeypair is a function to update a single record from self_keypair table in the cap database -// error - ErrNotFound, db record for id not found -// error - ErrUpdateFailed, db meta data copy failed or db.Save call failed -func UpdateSelfKeypair(ctx context.Context, argId uint32, updated *model.SelfKeypair) (result *model.SelfKeypair, RowsAffected int64, err error) { - - result = &model.SelfKeypair{} - db := DB.First(result, argId) - if err = db.Error; err != nil { - return nil, -1, ErrNotFound - } - - if err = Copy(result, updated); err != nil { - return nil, -1, ErrUpdateFailed - } - - db = db.Save(result) - if err = db.Error; err != nil { - return nil, -1, ErrUpdateFailed - } - - return result, db.RowsAffected, nil -} - -// DeleteSelfKeypair is a function to delete a single record from self_keypair table in the cap database -// error - ErrNotFound, db Find error -// error - ErrDeleteFailed, db Delete failed error -func DeleteSelfKeypair(ctx context.Context, argId uint32) (rowsAffected int64, err error) { - - record := &model.SelfKeypair{} - db := DB.First(record, argId) - if db.Error != nil { - return -1, ErrNotFound - } - - db = db.Delete(record) - if err = db.Error; err != nil { - return -1, ErrDeleteFailed - } - - return db.RowsAffected, nil -} diff --git a/examples/cfssl-model/model/certificates.go b/examples/cfssl-model/model/certificates.go deleted file mode 100644 index 7a02372..0000000 --- a/examples/cfssl-model/model/certificates.go +++ /dev/null @@ -1,379 +0,0 @@ -package model - -import ( - "database/sql" - "time" - - "github.com/guregu/null" - "github.com/satori/go.uuid" -) - -var ( - _ = time.Second - _ = sql.LevelDefault - _ = null.Bool{} - _ = uuid.UUID{} -) - -/* -DB Table Details -------------------------------------- - - -CREATE TABLE `certificates` ( - `serial_number` varchar(128) NOT NULL, - `authority_key_identifier` varchar(128) NOT NULL, - `ca_label` varchar(128) DEFAULT NULL, - `status` varchar(128) NOT NULL, - `reason` int(11) DEFAULT NULL, - `expiry` timestamp NULL DEFAULT NULL, - `revoked_at` timestamp NULL DEFAULT NULL, - `pem` text NOT NULL, - `issued_at` timestamp NULL DEFAULT NULL, - `not_before` timestamp NULL DEFAULT NULL, - `metadata` json DEFAULT NULL, - `sans` json DEFAULT NULL, - `common_name` text, - PRIMARY KEY (`serial_number`,`authority_key_identifier`) -) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 - -JSON Sample -------------------------------------- -{ "reason": 92, "expiry": "2286-06-20T23:57:01.802309193+08:00", "pem": "NIarUyeMyuQCDtUVoaLYWXctc", "not_before": "2277-09-13T05:59:41.211347736+08:00", "sans": "FhywGSuCylQTTWJcEXEwgjEqb", "serial_number": "OcunCCFpLqmKkOheulgeVYnqj", "authority_key_identifier": "JypnksToEYXOYcoAiRfBoAYrf", "ca_label": "pVfRwEQusPsFVZCUgjUGhcgIt", "common_name": "aoSbySXychvtlTtREsJimCBib", "metadata": "FmTeDWLnbpvZKCrkRnhraDMhm", "status": "ViTBMuKSQfqvLDlZOHITIYABu", "revoked_at": "2094-12-13T11:08:25.06039924+08:00", "issued_at": "2036-08-08T14:53:33.984450504+08:00"} - - - -*/ - -// Certificates struct is a row record of the certificates table in the cap database -type Certificates struct { - //[ 0] serial_number varchar(128) null: false primary: true isArray: false auto: false col: varchar len: 128 default: [] - SerialNumber string `gorm:"primary_key;column:serial_number;type:varchar;size:128;" json:"serial_number" db:"serial_number"` - //[ 1] authority_key_identifier varchar(128) null: false primary: true isArray: false auto: false col: varchar len: 128 default: [] - AuthorityKeyIdentifier string `gorm:"primary_key;column:authority_key_identifier;type:varchar;size:128;" json:"authority_key_identifier" db:"authority_key_identifier"` - //[ 2] ca_label varchar(128) null: true primary: false isArray: false auto: false col: varchar len: 128 default: [] - CaLabel sql.NullString `gorm:"column:ca_label;type:varchar;size:128;" json:"ca_label" db:"ca_label"` - //[ 3] status varchar(128) null: false primary: false isArray: false auto: false col: varchar len: 128 default: [] - Status string `gorm:"column:status;type:varchar;size:128;" json:"status" db:"status"` - //[ 4] reason int null: true primary: false isArray: false auto: false col: int len: -1 default: [] - Reason sql.NullInt64 `gorm:"column:reason;type:int;" json:"reason" db:"reason"` - //[ 5] expiry timestamp null: true primary: false isArray: false auto: false col: timestamp len: -1 default: [] - Expiry time.Time `gorm:"column:expiry;type:timestamp;" json:"expiry" db:"expiry"` - //[ 6] revoked_at timestamp null: true primary: false isArray: false auto: false col: timestamp len: -1 default: [] - RevokedAt time.Time `gorm:"column:revoked_at;type:timestamp;" json:"revoked_at" db:"revoked_at"` - //[ 7] pem text(65535) null: false primary: false isArray: false auto: false col: text len: 65535 default: [] - Pem string `gorm:"column:pem;type:text;size:65535;" json:"pem" db:"pem"` - //[ 8] issued_at timestamp null: true primary: false isArray: false auto: false col: timestamp len: -1 default: [] - IssuedAt time.Time `gorm:"column:issued_at;type:timestamp;" json:"issued_at" db:"issued_at"` - //[ 9] not_before timestamp null: true primary: false isArray: false auto: false col: timestamp len: -1 default: [] - NotBefore time.Time `gorm:"column:not_before;type:timestamp;" json:"not_before" db:"not_before"` - //[10] metadata json null: true primary: false isArray: false auto: false col: json len: -1 default: [] - Metadata sql.NullString `gorm:"column:metadata;type:json;" json:"metadata" db:"metadata"` - //[11] sans json null: true primary: false isArray: false auto: false col: json len: -1 default: [] - Sans sql.NullString `gorm:"column:sans;type:json;" json:"sans" db:"sans"` - //[12] common_name text(65535) null: true primary: false isArray: false auto: false col: text len: 65535 default: [] - CommonName sql.NullString `gorm:"column:common_name;type:text;size:65535;" json:"common_name" db:"common_name"` -} - -var certificatesTableInfo = &TableInfo{ - Name: "certificates", - Columns: []*ColumnInfo{ - - &ColumnInfo{ - Index: 0, - Name: "serial_number", - Comment: ``, - Notes: ``, - Nullable: false, - DatabaseTypeName: "varchar", - DatabaseTypePretty: "varchar(128)", - IsPrimaryKey: true, - IsAutoIncrement: false, - IsArray: false, - ColumnType: "varchar", - ColumnLength: 128, - GoFieldName: "SerialNumber", - GoFieldType: "string", - JSONFieldName: "serial_number", - ProtobufFieldName: "serial_number", - ProtobufType: "string", - ProtobufPos: 1, - }, - - &ColumnInfo{ - Index: 1, - Name: "authority_key_identifier", - Comment: ``, - Notes: ``, - Nullable: false, - DatabaseTypeName: "varchar", - DatabaseTypePretty: "varchar(128)", - IsPrimaryKey: true, - IsAutoIncrement: false, - IsArray: false, - ColumnType: "varchar", - ColumnLength: 128, - GoFieldName: "AuthorityKeyIdentifier", - GoFieldType: "string", - JSONFieldName: "authority_key_identifier", - ProtobufFieldName: "authority_key_identifier", - ProtobufType: "string", - ProtobufPos: 2, - }, - - &ColumnInfo{ - Index: 2, - Name: "ca_label", - Comment: ``, - Notes: ``, - Nullable: true, - DatabaseTypeName: "varchar", - DatabaseTypePretty: "varchar(128)", - IsPrimaryKey: false, - IsAutoIncrement: false, - IsArray: false, - ColumnType: "varchar", - ColumnLength: 128, - GoFieldName: "CaLabel", - GoFieldType: "sql.NullString", - JSONFieldName: "ca_label", - ProtobufFieldName: "ca_label", - ProtobufType: "string", - ProtobufPos: 3, - }, - - &ColumnInfo{ - Index: 3, - Name: "status", - Comment: ``, - Notes: ``, - Nullable: false, - DatabaseTypeName: "varchar", - DatabaseTypePretty: "varchar(128)", - IsPrimaryKey: false, - IsAutoIncrement: false, - IsArray: false, - ColumnType: "varchar", - ColumnLength: 128, - GoFieldName: "Status", - GoFieldType: "string", - JSONFieldName: "status", - ProtobufFieldName: "status", - ProtobufType: "string", - ProtobufPos: 4, - }, - - &ColumnInfo{ - Index: 4, - Name: "reason", - Comment: ``, - Notes: ``, - Nullable: true, - DatabaseTypeName: "int", - DatabaseTypePretty: "int", - IsPrimaryKey: false, - IsAutoIncrement: false, - IsArray: false, - ColumnType: "int", - ColumnLength: -1, - GoFieldName: "Reason", - GoFieldType: "sql.NullInt64", - JSONFieldName: "reason", - ProtobufFieldName: "reason", - ProtobufType: "int32", - ProtobufPos: 5, - }, - - &ColumnInfo{ - Index: 5, - Name: "expiry", - Comment: ``, - Notes: ``, - Nullable: true, - DatabaseTypeName: "timestamp", - DatabaseTypePretty: "timestamp", - IsPrimaryKey: false, - IsAutoIncrement: false, - IsArray: false, - ColumnType: "timestamp", - ColumnLength: -1, - GoFieldName: "Expiry", - GoFieldType: "time.Time", - JSONFieldName: "expiry", - ProtobufFieldName: "expiry", - ProtobufType: "uint64", - ProtobufPos: 6, - }, - - &ColumnInfo{ - Index: 6, - Name: "revoked_at", - Comment: ``, - Notes: ``, - Nullable: true, - DatabaseTypeName: "timestamp", - DatabaseTypePretty: "timestamp", - IsPrimaryKey: false, - IsAutoIncrement: false, - IsArray: false, - ColumnType: "timestamp", - ColumnLength: -1, - GoFieldName: "RevokedAt", - GoFieldType: "time.Time", - JSONFieldName: "revoked_at", - ProtobufFieldName: "revoked_at", - ProtobufType: "uint64", - ProtobufPos: 7, - }, - - &ColumnInfo{ - Index: 7, - Name: "pem", - Comment: ``, - Notes: ``, - Nullable: false, - DatabaseTypeName: "text", - DatabaseTypePretty: "text(65535)", - IsPrimaryKey: false, - IsAutoIncrement: false, - IsArray: false, - ColumnType: "text", - ColumnLength: 65535, - GoFieldName: "Pem", - GoFieldType: "string", - JSONFieldName: "pem", - ProtobufFieldName: "pem", - ProtobufType: "string", - ProtobufPos: 8, - }, - - &ColumnInfo{ - Index: 8, - Name: "issued_at", - Comment: ``, - Notes: ``, - Nullable: true, - DatabaseTypeName: "timestamp", - DatabaseTypePretty: "timestamp", - IsPrimaryKey: false, - IsAutoIncrement: false, - IsArray: false, - ColumnType: "timestamp", - ColumnLength: -1, - GoFieldName: "IssuedAt", - GoFieldType: "time.Time", - JSONFieldName: "issued_at", - ProtobufFieldName: "issued_at", - ProtobufType: "uint64", - ProtobufPos: 9, - }, - - &ColumnInfo{ - Index: 9, - Name: "not_before", - Comment: ``, - Notes: ``, - Nullable: true, - DatabaseTypeName: "timestamp", - DatabaseTypePretty: "timestamp", - IsPrimaryKey: false, - IsAutoIncrement: false, - IsArray: false, - ColumnType: "timestamp", - ColumnLength: -1, - GoFieldName: "NotBefore", - GoFieldType: "time.Time", - JSONFieldName: "not_before", - ProtobufFieldName: "not_before", - ProtobufType: "uint64", - ProtobufPos: 10, - }, - - &ColumnInfo{ - Index: 10, - Name: "metadata", - Comment: ``, - Notes: ``, - Nullable: true, - DatabaseTypeName: "json", - DatabaseTypePretty: "json", - IsPrimaryKey: false, - IsAutoIncrement: false, - IsArray: false, - ColumnType: "json", - ColumnLength: -1, - GoFieldName: "Metadata", - GoFieldType: "sql.NullString", - JSONFieldName: "metadata", - ProtobufFieldName: "metadata", - ProtobufType: "string", - ProtobufPos: 11, - }, - - &ColumnInfo{ - Index: 11, - Name: "sans", - Comment: ``, - Notes: ``, - Nullable: true, - DatabaseTypeName: "json", - DatabaseTypePretty: "json", - IsPrimaryKey: false, - IsAutoIncrement: false, - IsArray: false, - ColumnType: "json", - ColumnLength: -1, - GoFieldName: "Sans", - GoFieldType: "sql.NullString", - JSONFieldName: "sans", - ProtobufFieldName: "sans", - ProtobufType: "string", - ProtobufPos: 12, - }, - - &ColumnInfo{ - Index: 12, - Name: "common_name", - Comment: ``, - Notes: ``, - Nullable: true, - DatabaseTypeName: "text", - DatabaseTypePretty: "text(65535)", - IsPrimaryKey: false, - IsAutoIncrement: false, - IsArray: false, - ColumnType: "text", - ColumnLength: 65535, - GoFieldName: "CommonName", - GoFieldType: "sql.NullString", - JSONFieldName: "common_name", - ProtobufFieldName: "common_name", - ProtobufType: "string", - ProtobufPos: 13, - }, - }, -} - -// TableName sets the insert table name for this struct type -func (c *Certificates) TableName() string { - return "certificates" -} - -// BeforeSave invoked before saving, return an error if field is not populated. -func (c *Certificates) BeforeSave() error { - return nil -} - -// Prepare invoked before saving, can be used to populate fields etc. -func (c *Certificates) Prepare() { -} - -// Validate invoked before performing action, return an error if field is not populated. -func (c *Certificates) Validate(action Action) error { - return nil -} - -// TableInfo return table meta data -func (c *Certificates) TableInfo() *TableInfo { - return certificatesTableInfo -} diff --git a/examples/cfssl-model/model/forbid.go b/examples/cfssl-model/model/forbid.go deleted file mode 100644 index baf76a8..0000000 --- a/examples/cfssl-model/model/forbid.go +++ /dev/null @@ -1,192 +0,0 @@ -package model - -import ( - "database/sql" - "time" - - "github.com/guregu/null" - "github.com/satori/go.uuid" -) - -var ( - _ = time.Second - _ = sql.LevelDefault - _ = null.Bool{} - _ = uuid.UUID{} -) - -/* -DB Table Details -------------------------------------- - - -CREATE TABLE `forbid` ( - `id` int(11) unsigned NOT NULL AUTO_INCREMENT, - `unique_id` varchar(40) NOT NULL, - `created_at` timestamp NULL DEFAULT NULL, - `updated_at` timestamp NULL DEFAULT NULL, - `deleted_at` timestamp NULL DEFAULT NULL, - PRIMARY KEY (`id`) -) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 - -JSON Sample -------------------------------------- -{ "id": 57, "unique_id": "xNqChbAQxwlXQscFcNacXlEFR", "created_at": "2189-03-28T13:29:03.755973202+08:00", "updated_at": "2165-07-06T04:08:04.165658654+08:00", "deleted_at": "2062-09-27T18:12:40.213950692+08:00"} - - -Comments -------------------------------------- -[ 0] column is set for unsigned - - - -*/ - -// Forbid struct is a row record of the forbid table in the cap database -type Forbid struct { - //[ 0] id uint null: false primary: true isArray: false auto: true col: uint len: -1 default: [] - ID uint32 `gorm:"primary_key;AUTO_INCREMENT;column:id;type:uint;" json:"id" db:"id"` - //[ 1] unique_id varchar(40) null: false primary: false isArray: false auto: false col: varchar len: 40 default: [] - UniqueID string `gorm:"column:unique_id;type:varchar;size:40;" json:"unique_id" db:"unique_id"` - //[ 2] created_at timestamp null: true primary: false isArray: false auto: false col: timestamp len: -1 default: [] - CreatedAt time.Time `gorm:"column:created_at;type:timestamp;" json:"created_at" db:"created_at"` - //[ 3] updated_at timestamp null: true primary: false isArray: false auto: false col: timestamp len: -1 default: [] - UpdatedAt time.Time `gorm:"column:updated_at;type:timestamp;" json:"updated_at" db:"updated_at"` - //[ 4] deleted_at timestamp null: true primary: false isArray: false auto: false col: timestamp len: -1 default: [] - DeletedAt time.Time `gorm:"column:deleted_at;type:timestamp;" json:"deleted_at" db:"deleted_at"` -} - -var forbidTableInfo = &TableInfo{ - Name: "forbid", - Columns: []*ColumnInfo{ - - &ColumnInfo{ - Index: 0, - Name: "id", - Comment: ``, - Notes: `column is set for unsigned`, - Nullable: false, - DatabaseTypeName: "uint", - DatabaseTypePretty: "uint", - IsPrimaryKey: true, - IsAutoIncrement: true, - IsArray: false, - ColumnType: "uint", - ColumnLength: -1, - GoFieldName: "ID", - GoFieldType: "uint32", - JSONFieldName: "id", - ProtobufFieldName: "id", - ProtobufType: "uint32", - ProtobufPos: 1, - }, - - &ColumnInfo{ - Index: 1, - Name: "unique_id", - Comment: ``, - Notes: ``, - Nullable: false, - DatabaseTypeName: "varchar", - DatabaseTypePretty: "varchar(40)", - IsPrimaryKey: false, - IsAutoIncrement: false, - IsArray: false, - ColumnType: "varchar", - ColumnLength: 40, - GoFieldName: "UniqueID", - GoFieldType: "string", - JSONFieldName: "unique_id", - ProtobufFieldName: "unique_id", - ProtobufType: "string", - ProtobufPos: 2, - }, - - &ColumnInfo{ - Index: 2, - Name: "created_at", - Comment: ``, - Notes: ``, - Nullable: true, - DatabaseTypeName: "timestamp", - DatabaseTypePretty: "timestamp", - IsPrimaryKey: false, - IsAutoIncrement: false, - IsArray: false, - ColumnType: "timestamp", - ColumnLength: -1, - GoFieldName: "CreatedAt", - GoFieldType: "time.Time", - JSONFieldName: "created_at", - ProtobufFieldName: "created_at", - ProtobufType: "uint64", - ProtobufPos: 3, - }, - - &ColumnInfo{ - Index: 3, - Name: "updated_at", - Comment: ``, - Notes: ``, - Nullable: true, - DatabaseTypeName: "timestamp", - DatabaseTypePretty: "timestamp", - IsPrimaryKey: false, - IsAutoIncrement: false, - IsArray: false, - ColumnType: "timestamp", - ColumnLength: -1, - GoFieldName: "UpdatedAt", - GoFieldType: "time.Time", - JSONFieldName: "updated_at", - ProtobufFieldName: "updated_at", - ProtobufType: "uint64", - ProtobufPos: 4, - }, - - &ColumnInfo{ - Index: 4, - Name: "deleted_at", - Comment: ``, - Notes: ``, - Nullable: true, - DatabaseTypeName: "timestamp", - DatabaseTypePretty: "timestamp", - IsPrimaryKey: false, - IsAutoIncrement: false, - IsArray: false, - ColumnType: "timestamp", - ColumnLength: -1, - GoFieldName: "DeletedAt", - GoFieldType: "time.Time", - JSONFieldName: "deleted_at", - ProtobufFieldName: "deleted_at", - ProtobufType: "uint64", - ProtobufPos: 5, - }, - }, -} - -// TableName sets the insert table name for this struct type -func (f *Forbid) TableName() string { - return "forbid" -} - -// BeforeSave invoked before saving, return an error if field is not populated. -func (f *Forbid) BeforeSave() error { - return nil -} - -// Prepare invoked before saving, can be used to populate fields etc. -func (f *Forbid) Prepare() { -} - -// Validate invoked before performing action, return an error if field is not populated. -func (f *Forbid) Validate(action Action) error { - return nil -} - -// TableInfo return table meta data -func (f *Forbid) TableInfo() *TableInfo { - return forbidTableInfo -} diff --git a/examples/cfssl-model/model/model_base.go b/examples/cfssl-model/model/model_base.go deleted file mode 100644 index 2f79be8..0000000 --- a/examples/cfssl-model/model/model_base.go +++ /dev/null @@ -1,102 +0,0 @@ -package model - -import "fmt" - -// Action CRUD actions -type Action int32 - -var ( - // Create action when record is created - Create = Action(0) - - // RetrieveOne action when a record is retrieved from db - RetrieveOne = Action(1) - - // RetrieveMany action when record(s) are retrieved from db - RetrieveMany = Action(2) - - // Update action when record is updated in db - Update = Action(3) - - // Delete action when record is deleted in db - Delete = Action(4) - - // FetchDDL action when fetching ddl info from db - FetchDDL = Action(5) - - tables map[string]*TableInfo -) - -func init() { - tables = make(map[string]*TableInfo) - - tables["certificates"] = certificatesTableInfo - tables["forbid"] = forbidTableInfo - tables["ocsp_responses"] = ocsp_responsesTableInfo - tables["schema_migrations"] = schema_migrationsTableInfo - tables["self_keypair"] = self_keypairTableInfo -} - -// String describe the action -func (i Action) String() string { - switch i { - case Create: - return "Create" - case RetrieveOne: - return "RetrieveOne" - case RetrieveMany: - return "RetrieveMany" - case Update: - return "Update" - case Delete: - return "Delete" - case FetchDDL: - return "FetchDDL" - default: - return fmt.Sprintf("unknown action: %d", int(i)) - } -} - -// Model interface methods for database structs generated -type Model interface { - TableName() string - BeforeSave() error - Prepare() - Validate(action Action) error - TableInfo() *TableInfo -} - -// TableInfo describes a table in the database -type TableInfo struct { - Name string `json:"name"` - Columns []*ColumnInfo `json:"columns"` -} - -// ColumnInfo describes a column in the database table -type ColumnInfo struct { - Index int `json:"index"` - GoFieldName string `json:"go_field_name"` - GoFieldType string `json:"go_field_type"` - JSONFieldName string `json:"json_field_name"` - ProtobufFieldName string `json:"protobuf_field_name"` - ProtobufType string `json:"protobuf_field_type"` - ProtobufPos int `json:"protobuf_field_pos"` - Comment string `json:"comment"` - Notes string `json:"notes"` - Name string `json:"name"` - Nullable bool `json:"is_nullable"` - DatabaseTypeName string `json:"database_type_name"` - DatabaseTypePretty string `json:"database_type_pretty"` - IsPrimaryKey bool `json:"is_primary_key"` - IsAutoIncrement bool `json:"is_auto_increment"` - IsArray bool `json:"is_array"` - ColumnType string `json:"column_type"` - ColumnLength int64 `json:"column_length"` - DefaultValue string `json:"default_value"` -} - -// GetTableInfo retrieve TableInfo for a table -func GetTableInfo(name string) (*TableInfo, bool) { - val, ok := tables[name] - return val, ok -} diff --git a/examples/cfssl-model/model/ocsp_responses.go b/examples/cfssl-model/model/ocsp_responses.go deleted file mode 100644 index ff25d73..0000000 --- a/examples/cfssl-model/model/ocsp_responses.go +++ /dev/null @@ -1,163 +0,0 @@ -package model - -import ( - "database/sql" - "time" - - "github.com/guregu/null" - "github.com/satori/go.uuid" -) - -var ( - _ = time.Second - _ = sql.LevelDefault - _ = null.Bool{} - _ = uuid.UUID{} -) - -/* -DB Table Details -------------------------------------- - - -CREATE TABLE `ocsp_responses` ( - `serial_number` varchar(128) NOT NULL, - `authority_key_identifier` varchar(128) NOT NULL, - `body` text NOT NULL, - `expiry` timestamp NULL DEFAULT NULL, - PRIMARY KEY (`serial_number`,`authority_key_identifier`) -) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 - -JSON Sample -------------------------------------- -{ "serial_number": "DdgqIQHdVqgdnumwvwsfSjQPE", "authority_key_identifier": "ciTjUwiSPuHlGaiGbArdRBYZP", "body": "bwgShfAYjoqUhqaDvwBPNXWoX", "expiry": "2092-02-18T17:11:45.861789103+08:00"} - - - -*/ - -// OcspResponses struct is a row record of the ocsp_responses table in the cap database -type OcspResponses struct { - //[ 0] serial_number varchar(128) null: false primary: true isArray: false auto: false col: varchar len: 128 default: [] - SerialNumber string `gorm:"primary_key;column:serial_number;type:varchar;size:128;" json:"serial_number" db:"serial_number"` - //[ 1] authority_key_identifier varchar(128) null: false primary: true isArray: false auto: false col: varchar len: 128 default: [] - AuthorityKeyIdentifier string `gorm:"primary_key;column:authority_key_identifier;type:varchar;size:128;" json:"authority_key_identifier" db:"authority_key_identifier"` - //[ 2] body text(65535) null: false primary: false isArray: false auto: false col: text len: 65535 default: [] - Body string `gorm:"column:body;type:text;size:65535;" json:"body" db:"body"` - //[ 3] expiry timestamp null: true primary: false isArray: false auto: false col: timestamp len: -1 default: [] - Expiry time.Time `gorm:"column:expiry;type:timestamp;" json:"expiry" db:"expiry"` -} - -var ocsp_responsesTableInfo = &TableInfo{ - Name: "ocsp_responses", - Columns: []*ColumnInfo{ - - &ColumnInfo{ - Index: 0, - Name: "serial_number", - Comment: ``, - Notes: ``, - Nullable: false, - DatabaseTypeName: "varchar", - DatabaseTypePretty: "varchar(128)", - IsPrimaryKey: true, - IsAutoIncrement: false, - IsArray: false, - ColumnType: "varchar", - ColumnLength: 128, - GoFieldName: "SerialNumber", - GoFieldType: "string", - JSONFieldName: "serial_number", - ProtobufFieldName: "serial_number", - ProtobufType: "string", - ProtobufPos: 1, - }, - - &ColumnInfo{ - Index: 1, - Name: "authority_key_identifier", - Comment: ``, - Notes: ``, - Nullable: false, - DatabaseTypeName: "varchar", - DatabaseTypePretty: "varchar(128)", - IsPrimaryKey: true, - IsAutoIncrement: false, - IsArray: false, - ColumnType: "varchar", - ColumnLength: 128, - GoFieldName: "AuthorityKeyIdentifier", - GoFieldType: "string", - JSONFieldName: "authority_key_identifier", - ProtobufFieldName: "authority_key_identifier", - ProtobufType: "string", - ProtobufPos: 2, - }, - - &ColumnInfo{ - Index: 2, - Name: "body", - Comment: ``, - Notes: ``, - Nullable: false, - DatabaseTypeName: "text", - DatabaseTypePretty: "text(65535)", - IsPrimaryKey: false, - IsAutoIncrement: false, - IsArray: false, - ColumnType: "text", - ColumnLength: 65535, - GoFieldName: "Body", - GoFieldType: "string", - JSONFieldName: "body", - ProtobufFieldName: "body", - ProtobufType: "string", - ProtobufPos: 3, - }, - - &ColumnInfo{ - Index: 3, - Name: "expiry", - Comment: ``, - Notes: ``, - Nullable: true, - DatabaseTypeName: "timestamp", - DatabaseTypePretty: "timestamp", - IsPrimaryKey: false, - IsAutoIncrement: false, - IsArray: false, - ColumnType: "timestamp", - ColumnLength: -1, - GoFieldName: "Expiry", - GoFieldType: "time.Time", - JSONFieldName: "expiry", - ProtobufFieldName: "expiry", - ProtobufType: "uint64", - ProtobufPos: 4, - }, - }, -} - -// TableName sets the insert table name for this struct type -func (o *OcspResponses) TableName() string { - return "ocsp_responses" -} - -// BeforeSave invoked before saving, return an error if field is not populated. -func (o *OcspResponses) BeforeSave() error { - return nil -} - -// Prepare invoked before saving, can be used to populate fields etc. -func (o *OcspResponses) Prepare() { -} - -// Validate invoked before performing action, return an error if field is not populated. -func (o *OcspResponses) Validate(action Action) error { - return nil -} - -// TableInfo return table meta data -func (o *OcspResponses) TableInfo() *TableInfo { - return ocsp_responsesTableInfo -} diff --git a/examples/cfssl-model/model/schema_migrations.go b/examples/cfssl-model/model/schema_migrations.go deleted file mode 100644 index abe0497..0000000 --- a/examples/cfssl-model/model/schema_migrations.go +++ /dev/null @@ -1,115 +0,0 @@ -package model - -import ( - "database/sql" - "time" - - "github.com/guregu/null" - "github.com/satori/go.uuid" -) - -var ( - _ = time.Second - _ = sql.LevelDefault - _ = null.Bool{} - _ = uuid.UUID{} -) - -/* -DB Table Details -------------------------------------- - - -CREATE TABLE `schema_migrations` ( - `version` bigint(20) NOT NULL, - `dirty` tinyint(1) NOT NULL, - PRIMARY KEY (`version`) -) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 - -JSON Sample -------------------------------------- -{ "version": 14, "dirty": 61} - - - -*/ - -// SchemaMigrations struct is a row record of the schema_migrations table in the cap database -type SchemaMigrations struct { - //[ 0] version bigint null: false primary: true isArray: false auto: false col: bigint len: -1 default: [] - Version int64 `gorm:"primary_key;column:version;type:bigint;" json:"version" db:"version"` - //[ 1] dirty tinyint null: false primary: false isArray: false auto: false col: tinyint len: -1 default: [] - Dirty int32 `gorm:"column:dirty;type:tinyint;" json:"dirty" db:"dirty"` -} - -var schema_migrationsTableInfo = &TableInfo{ - Name: "schema_migrations", - Columns: []*ColumnInfo{ - - &ColumnInfo{ - Index: 0, - Name: "version", - Comment: ``, - Notes: ``, - Nullable: false, - DatabaseTypeName: "bigint", - DatabaseTypePretty: "bigint", - IsPrimaryKey: true, - IsAutoIncrement: false, - IsArray: false, - ColumnType: "bigint", - ColumnLength: -1, - GoFieldName: "Version", - GoFieldType: "int64", - JSONFieldName: "version", - ProtobufFieldName: "version", - ProtobufType: "int64", - ProtobufPos: 1, - }, - - &ColumnInfo{ - Index: 1, - Name: "dirty", - Comment: ``, - Notes: ``, - Nullable: false, - DatabaseTypeName: "tinyint", - DatabaseTypePretty: "tinyint", - IsPrimaryKey: false, - IsAutoIncrement: false, - IsArray: false, - ColumnType: "tinyint", - ColumnLength: -1, - GoFieldName: "Dirty", - GoFieldType: "int32", - JSONFieldName: "dirty", - ProtobufFieldName: "dirty", - ProtobufType: "int32", - ProtobufPos: 2, - }, - }, -} - -// TableName sets the insert table name for this struct type -func (s *SchemaMigrations) TableName() string { - return "schema_migrations" -} - -// BeforeSave invoked before saving, return an error if field is not populated. -func (s *SchemaMigrations) BeforeSave() error { - return nil -} - -// Prepare invoked before saving, can be used to populate fields etc. -func (s *SchemaMigrations) Prepare() { -} - -// Validate invoked before performing action, return an error if field is not populated. -func (s *SchemaMigrations) Validate(action Action) error { - return nil -} - -// TableInfo return table meta data -func (s *SchemaMigrations) TableInfo() *TableInfo { - return schema_migrationsTableInfo -} diff --git a/examples/cfssl-model/model/self_keypair.go b/examples/cfssl-model/model/self_keypair.go deleted file mode 100644 index 9035052..0000000 --- a/examples/cfssl-model/model/self_keypair.go +++ /dev/null @@ -1,219 +0,0 @@ -package model - -import ( - "database/sql" - "time" - - "github.com/guregu/null" - "github.com/satori/go.uuid" -) - -var ( - _ = time.Second - _ = sql.LevelDefault - _ = null.Bool{} - _ = uuid.UUID{} -) - -/* -DB Table Details -------------------------------------- - - -CREATE TABLE `self_keypair` ( - `id` int(11) unsigned NOT NULL AUTO_INCREMENT, - `name` varchar(40) NOT NULL, - `private_key` text, - `certificate` text, - `created_at` timestamp NULL DEFAULT NULL, - `updated_at` timestamp NULL DEFAULT NULL, - UNIQUE KEY `id` (`id`), - KEY `name` (`name`) USING BTREE -) ENGINE=InnoDB AUTO_INCREMENT=7 DEFAULT CHARSET=utf8mb4 - -JSON Sample -------------------------------------- -{ "name": "LrZSWvuAbMHHLxiDYKhSGWqZF", "private_key": "wUAOGjvBXIDoCrsYqDQpetUdT", "certificate": "QtxLIwyVDgjqgXkVmfmGWhyiC", "created_at": "2022-09-04T23:03:17.873061067+08:00", "updated_at": "2188-10-06T19:24:13.423938207+08:00", "id": 42} - - -Comments -------------------------------------- -[ 0] column is set for unsignedWarning table: self_keypair does not have a primary key defined, setting col position 1 id as primary key - - - - -*/ - -// SelfKeypair struct is a row record of the self_keypair table in the cap database -type SelfKeypair struct { - //[ 0] id uint null: false primary: true isArray: false auto: true col: uint len: -1 default: [] - ID uint32 `gorm:"primary_key;AUTO_INCREMENT;column:id;type:uint;" json:"id" db:"id"` - //[ 1] name varchar(40) null: false primary: false isArray: false auto: false col: varchar len: 40 default: [] - Name string `gorm:"column:name;type:varchar;size:40;" json:"name" db:"name"` - //[ 2] private_key text(65535) null: true primary: false isArray: false auto: false col: text len: 65535 default: [] - PrivateKey sql.NullString `gorm:"column:private_key;type:text;size:65535;" json:"private_key" db:"private_key"` - //[ 3] certificate text(65535) null: true primary: false isArray: false auto: false col: text len: 65535 default: [] - Certificate sql.NullString `gorm:"column:certificate;type:text;size:65535;" json:"certificate" db:"certificate"` - //[ 4] created_at timestamp null: true primary: false isArray: false auto: false col: timestamp len: -1 default: [] - CreatedAt time.Time `gorm:"column:created_at;type:timestamp;" json:"created_at" db:"created_at"` - //[ 5] updated_at timestamp null: true primary: false isArray: false auto: false col: timestamp len: -1 default: [] - UpdatedAt time.Time `gorm:"column:updated_at;type:timestamp;" json:"updated_at" db:"updated_at"` -} - -var self_keypairTableInfo = &TableInfo{ - Name: "self_keypair", - Columns: []*ColumnInfo{ - - &ColumnInfo{ - Index: 0, - Name: "id", - Comment: ``, - Notes: `column is set for unsignedWarning table: self_keypair does not have a primary key defined, setting col position 1 id as primary key -`, - Nullable: false, - DatabaseTypeName: "uint", - DatabaseTypePretty: "uint", - IsPrimaryKey: true, - IsAutoIncrement: true, - IsArray: false, - ColumnType: "uint", - ColumnLength: -1, - GoFieldName: "ID", - GoFieldType: "uint32", - JSONFieldName: "id", - ProtobufFieldName: "id", - ProtobufType: "uint32", - ProtobufPos: 1, - }, - - &ColumnInfo{ - Index: 1, - Name: "name", - Comment: ``, - Notes: ``, - Nullable: false, - DatabaseTypeName: "varchar", - DatabaseTypePretty: "varchar(40)", - IsPrimaryKey: false, - IsAutoIncrement: false, - IsArray: false, - ColumnType: "varchar", - ColumnLength: 40, - GoFieldName: "Name", - GoFieldType: "string", - JSONFieldName: "name", - ProtobufFieldName: "name", - ProtobufType: "string", - ProtobufPos: 2, - }, - - &ColumnInfo{ - Index: 2, - Name: "private_key", - Comment: ``, - Notes: ``, - Nullable: true, - DatabaseTypeName: "text", - DatabaseTypePretty: "text(65535)", - IsPrimaryKey: false, - IsAutoIncrement: false, - IsArray: false, - ColumnType: "text", - ColumnLength: 65535, - GoFieldName: "PrivateKey", - GoFieldType: "sql.NullString", - JSONFieldName: "private_key", - ProtobufFieldName: "private_key", - ProtobufType: "string", - ProtobufPos: 3, - }, - - &ColumnInfo{ - Index: 3, - Name: "certificate", - Comment: ``, - Notes: ``, - Nullable: true, - DatabaseTypeName: "text", - DatabaseTypePretty: "text(65535)", - IsPrimaryKey: false, - IsAutoIncrement: false, - IsArray: false, - ColumnType: "text", - ColumnLength: 65535, - GoFieldName: "Certificate", - GoFieldType: "sql.NullString", - JSONFieldName: "certificate", - ProtobufFieldName: "certificate", - ProtobufType: "string", - ProtobufPos: 4, - }, - - &ColumnInfo{ - Index: 4, - Name: "created_at", - Comment: ``, - Notes: ``, - Nullable: true, - DatabaseTypeName: "timestamp", - DatabaseTypePretty: "timestamp", - IsPrimaryKey: false, - IsAutoIncrement: false, - IsArray: false, - ColumnType: "timestamp", - ColumnLength: -1, - GoFieldName: "CreatedAt", - GoFieldType: "time.Time", - JSONFieldName: "created_at", - ProtobufFieldName: "created_at", - ProtobufType: "uint64", - ProtobufPos: 5, - }, - - &ColumnInfo{ - Index: 5, - Name: "updated_at", - Comment: ``, - Notes: ``, - Nullable: true, - DatabaseTypeName: "timestamp", - DatabaseTypePretty: "timestamp", - IsPrimaryKey: false, - IsAutoIncrement: false, - IsArray: false, - ColumnType: "timestamp", - ColumnLength: -1, - GoFieldName: "UpdatedAt", - GoFieldType: "time.Time", - JSONFieldName: "updated_at", - ProtobufFieldName: "updated_at", - ProtobufType: "uint64", - ProtobufPos: 6, - }, - }, -} - -// TableName sets the insert table name for this struct type -func (s *SelfKeypair) TableName() string { - return "self_keypair" -} - -// BeforeSave invoked before saving, return an error if field is not populated. -func (s *SelfKeypair) BeforeSave() error { - return nil -} - -// Prepare invoked before saving, can be used to populate fields etc. -func (s *SelfKeypair) Prepare() { -} - -// Validate invoked before performing action, return an error if field is not populated. -func (s *SelfKeypair) Validate(action Action) error { - return nil -} - -// TableInfo return table meta data -func (s *SelfKeypair) TableInfo() *TableInfo { - return self_keypairTableInfo -} diff --git a/go.mod b/go.mod index ab8b4f4..dde0901 100644 --- a/go.mod +++ b/go.mod @@ -1,78 +1,56 @@ -module gitlab.oneitfarm.com/bifrost/capitalizone +module github.com/ztalab/ZACA go 1.16 require ( - github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751 github.com/araddon/dateparse v0.0.0-20210207001429-0eec95c9db7e github.com/cloudflare/backoff v0.0.0-20161212185259-647f3cdfc87a - github.com/davecgh/go-spew v1.1.1 - github.com/deckarep/golang-set v1.7.1 + github.com/fsnotify/fsnotify v1.4.9 // indirect + github.com/garyburd/redigo v1.6.3 github.com/gin-contrib/pprof v1.3.0 github.com/gin-gonic/gin v1.6.3 - github.com/go-logr/logr v0.2.1 // indirect github.com/go-openapi/jsonreference v0.19.3 // indirect github.com/go-openapi/spec v0.19.3 // indirect github.com/go-resty/resty/v2 v2.6.0 github.com/go-sql-driver/mysql v1.6.0 - github.com/gogo/protobuf v1.3.2 github.com/golang-migrate/migrate/v4 v4.14.1 github.com/google/certificate-transparency-go v1.1.2-0.20210728111105-5f7e9ba4be3d // indirect - github.com/google/gofuzz v1.2.0 github.com/gorilla/mux v1.8.0 github.com/guregu/null v4.0.0+incompatible - github.com/hashicorp/go-discover v0.0.0-20201029210230-738cb3105cd0 - github.com/hashicorp/go-multierror v1.1.0 github.com/hashicorp/vault/api v1.1.0 - github.com/hyperledger/fabric-ca v1.5.0 github.com/jinzhu/gorm v1.9.16 github.com/jmoiron/sqlx v1.2.1-0.20190826204134-d7d95172beb5 github.com/joho/godotenv v1.3.0 github.com/json-iterator/go v1.1.11 + github.com/kisielk/sqlstruct v0.0.0-20201105191214-5f3e10d3ab46 // indirect github.com/klauspost/compress v1.10.10 // indirect - github.com/labstack/echo/v4 v4.1.11 github.com/lib/pq v1.9.0 // indirect github.com/mailru/easyjson v0.7.0 // indirect + github.com/mattn/go-sqlite3 v1.14.5 // indirect github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 // indirect github.com/mayocream/pki v0.0.0-20210826155834-685adbcfbc3b - github.com/mitchellh/go-homedir v1.1.0 github.com/mitchellh/mapstructure v1.4.0 // indirect github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e // indirect - github.com/nxadm/tail v1.4.5 // indirect github.com/pkg/errors v0.9.1 github.com/prometheus/client_golang v1.11.0 github.com/satori/go.uuid v1.2.0 - github.com/shopspring/decimal v1.2.0 github.com/smartystreets/assertions v1.1.1 // indirect + github.com/spf13/afero v1.2.2 // indirect github.com/spf13/cast v1.3.1 - github.com/spf13/pflag v1.0.5 github.com/spf13/viper v1.7.1 github.com/spiffe/go-spiffe/v2 v2.0.0-beta.4 - github.com/stretchr/testify v1.7.0 github.com/swaggo/files v0.0.0-20190704085106-630677cd5c14 github.com/swaggo/gin-swagger v1.3.0 github.com/swaggo/swag v1.5.1 github.com/tal-tech/go-zero v1.1.4 - github.com/tidwall/gjson v1.6.8 github.com/urfave/cli v1.22.5 github.com/valyala/fasthttp v1.15.1 - gitlab.oneitfarm.com/bifrost/cfssl v1.5.2 - gitlab.oneitfarm.com/bifrost/cilog v0.1.11-0.20210328092732-32c048345c0a - gitlab.oneitfarm.com/bifrost/go-netstat v0.0.0-20210714072428-58eb4d3c7c06 - gitlab.oneitfarm.com/bifrost/go-toolbox v0.1.1 - gitlab.oneitfarm.com/bifrost/influxdata v0.0.0-20201231101639-c65e9be7f18a + github.com/ztalab/cfssl v0.0.2 go.uber.org/multierr v1.6.0 go.uber.org/zap v1.17.0 golang.org/x/crypto v0.0.0-20201117144127-c1f2f97bffc9 - golang.org/x/net v0.0.0-20210503060351-7fd8e65b6420 golang.org/x/sync v0.0.0-20210220032951-036812b2e83c gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f // indirect gorm.io/driver/mysql v1.0.3 gorm.io/gorm v1.20.8 - k8s.io/api v0.19.3 - k8s.io/apimachinery v0.19.3 - k8s.io/client-go v0.19.3 - k8s.io/klog/v2 v2.3.0 - k8s.io/kube-openapi v0.0.0-20200831175022-64514a1d5d59 - sigs.k8s.io/yaml v1.2.0 ) diff --git a/go.sum b/go.sum index f7bf0a5..1c7d6a1 100644 --- a/go.sum +++ b/go.sum @@ -12,7 +12,6 @@ cloud.google.com/go v0.44.3/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxK cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc= cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0= cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To= -cloud.google.com/go v0.51.0/go.mod h1:hWtGJ6gnXH+KgDv+V0zFGDvpi07n3z8ZNj3T1RW0Gcw= cloud.google.com/go v0.52.0/go.mod h1:pXajvRH/6o3+F9jDHZWQ5PbGhn+o8w9qiu/CffaVdO4= cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M= cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bPc= @@ -70,46 +69,11 @@ github.com/Azure/azure-amqp-common-go/v2 v2.1.0/go.mod h1:R8rea+gJRuJR6QxTir/XuE github.com/Azure/azure-pipeline-go v0.2.1/go.mod h1:UGSo8XybXnIGZ3epmeBw7Jdz+HiUVpqIlpz/HKHylF4= github.com/Azure/azure-sdk-for-go v29.0.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= github.com/Azure/azure-sdk-for-go v30.1.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= -github.com/Azure/azure-sdk-for-go v44.0.0+incompatible h1:e82Yv2HNpS0kuyeCrV29OPKvEiqfs2/uJHic3/3iKdg= -github.com/Azure/azure-sdk-for-go v44.0.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= github.com/Azure/azure-service-bus-go v0.9.1/go.mod h1:yzBx6/BUGfjfeqbRZny9AQIbIe3AcV9WZbAdpkoXOa0= github.com/Azure/azure-storage-blob-go v0.8.0/go.mod h1:lPI3aLPpuLTeUwh1sViKXFxwl2B6teiRqI0deQUvsw0= github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78 h1:w+iIsaOQNcT7OZ575w+acHgRric5iCyQh+xv+KJ4HB8= github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8= github.com/Azure/go-autorest v12.0.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= -github.com/Azure/go-autorest v14.2.0+incompatible h1:V5VMDjClD3GiElqLWO7mz2MxNAK/vTfRHdAubSIPRgs= -github.com/Azure/go-autorest v14.2.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= -github.com/Azure/go-autorest/autorest v0.9.0/go.mod h1:xyHB1BMZT0cuDHU7I0+g046+BFDTQ8rEZB0s4Yfa6bI= -github.com/Azure/go-autorest/autorest v0.9.6/go.mod h1:/FALq9T/kS7b5J5qsQ+RSTUdAmGFqi0vUdVNNx8q630= -github.com/Azure/go-autorest/autorest v0.11.0 h1:tnO41Uo+/0sxTMFY/U7aKg2abek3JOnnXcuSuba74jI= -github.com/Azure/go-autorest/autorest v0.11.0/go.mod h1:JFgpikqFJ/MleTTxwepExTKnFUKKszPS8UavbQYUMuw= -github.com/Azure/go-autorest/autorest/adal v0.5.0/go.mod h1:8Z9fGy2MpX0PvDjB1pEgQTmVqjGhiHBW7RJJEciWzS0= -github.com/Azure/go-autorest/autorest/adal v0.8.2/go.mod h1:ZjhuQClTqx435SRJ2iMlOxPYt3d2C/T/7TiQCVZSn3Q= -github.com/Azure/go-autorest/autorest/adal v0.9.0 h1:SigMbuFNuKgc1xcGhaeapbh+8fgsu+GxgDRFyg7f5lM= -github.com/Azure/go-autorest/autorest/adal v0.9.0/go.mod h1:/c022QCutn2P7uY+/oQWWNcK9YU+MH96NgK+jErpbcg= -github.com/Azure/go-autorest/autorest/azure/auth v0.5.0 h1:nSMjYIe24eBYasAIxt859TxyXef/IqoH+8/g4+LmcVs= -github.com/Azure/go-autorest/autorest/azure/auth v0.5.0/go.mod h1:QRTvSZQpxqm8mSErhnbI+tANIBAKP7B+UIE2z4ypUO0= -github.com/Azure/go-autorest/autorest/azure/cli v0.4.0 h1:Ml+UCrnlKD+cJmSzrZ/RDcDw86NjkRUpnFh7V5JUhzU= -github.com/Azure/go-autorest/autorest/azure/cli v0.4.0/go.mod h1:JljT387FplPzBA31vUcvsetLKF3pec5bdAxjVU4kI2s= -github.com/Azure/go-autorest/autorest/date v0.1.0/go.mod h1:plvfp3oPSKwf2DNjlBjWF/7vwR+cUD/ELuzDCXwHUVA= -github.com/Azure/go-autorest/autorest/date v0.2.0/go.mod h1:vcORJHLJEh643/Ioh9+vPmf1Ij9AEBM5FuBIXLmIy0g= -github.com/Azure/go-autorest/autorest/date v0.3.0 h1:7gUk1U5M/CQbp9WoqinNzJar+8KY+LPI6wiWrP/myHw= -github.com/Azure/go-autorest/autorest/date v0.3.0/go.mod h1:BI0uouVdmngYNUzGWeSYnokU+TrmwEsOqdt8Y6sso74= -github.com/Azure/go-autorest/autorest/mocks v0.1.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0= -github.com/Azure/go-autorest/autorest/mocks v0.2.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0= -github.com/Azure/go-autorest/autorest/mocks v0.3.0/go.mod h1:a8FDP3DYzQ4RYfVAxAN3SVSiiO77gL2j2ronKKP0syM= -github.com/Azure/go-autorest/autorest/mocks v0.4.0 h1:z20OWOSG5aCye0HEkDp6TPmP17ZcfeMxPi6HnSALa8c= -github.com/Azure/go-autorest/autorest/mocks v0.4.0/go.mod h1:LTp+uSrOhSkaKrUy935gNZuuIPPVsHlr9DSOxSayd+k= -github.com/Azure/go-autorest/autorest/to v0.4.0 h1:oXVqrxakqqV1UZdSazDOPOLvOIz+XA683u8EctwboHk= -github.com/Azure/go-autorest/autorest/to v0.4.0/go.mod h1:fE8iZBn7LQR7zH/9XU2NcPR4o9jEImooCeWJcYV/zLE= -github.com/Azure/go-autorest/autorest/validation v0.3.0 h1:3I9AAI63HfcLtphd9g39ruUwRI+Ca+z/f36KHPFRUss= -github.com/Azure/go-autorest/autorest/validation v0.3.0/go.mod h1:yhLgjC0Wda5DYXl6JAsWyUe4KVNffhoDhG0zVzUMo3E= -github.com/Azure/go-autorest/logger v0.1.0/go.mod h1:oExouG+K6PryycPJfVSxi/koC6LSNgds39diKLz7Vrc= -github.com/Azure/go-autorest/logger v0.2.0 h1:e4RVHVZKC5p6UANLJHkM4OfR1UKZPj8Wt8Pcx+3oqrE= -github.com/Azure/go-autorest/logger v0.2.0/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZmbF5NWuPV8+WeEW8= -github.com/Azure/go-autorest/tracing v0.5.0/go.mod h1:r/s2XiOKccPW3HrqB+W0TQzfbtp2fGCgRFtBroKn4Dk= -github.com/Azure/go-autorest/tracing v0.6.0 h1:TYi4+3m5t6K48TGI9AUdb+IzbnSxvnvUMfuitfgcfuo= -github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBpUA79WCAKPPZVC2DeU= github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= @@ -130,22 +94,17 @@ github.com/Masterminds/sprig v2.15.0+incompatible/go.mod h1:y6hNFY5UBTIWBxnzTeuN github.com/Masterminds/sprig v2.22.0+incompatible/go.mod h1:y6hNFY5UBTIWBxnzTeuNhlNS5hqE0NB0E6fgfo2Br3o= github.com/Microsoft/go-winio v0.4.15-0.20190919025122-fc70bd9a86b5 h1:ygIc8M6trr62pF5DucadTWGdEB4mEyvzi0e2nbcmcyA= github.com/Microsoft/go-winio v0.4.15-0.20190919025122-fc70bd9a86b5/go.mod h1:tTuCMEN+UleMWgg9dVx4Hu52b1bJo+59jBh3ajtinzw= -github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ= github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= github.com/PuerkitoBio/goquery v1.5.1/go.mod h1:GsLWisAFVj4WgDibEWF4pvYnkVQBpKBKeU+7zCJoLcc= -github.com/PuerkitoBio/purell v1.0.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= github.com/PuerkitoBio/purell v1.1.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= github.com/PuerkitoBio/purell v1.1.1 h1:WEQqlqaGbrPkxLJWfBwQmfEAE1Z7ONdDLqrN38tNFfI= github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= -github.com/PuerkitoBio/urlesc v0.0.0-20160726150825-5bd2802263f2/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578 h1:d+Bc7a5rLufV/sSk/8dngufqelfh6jnri85riMAaF/M= github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= github.com/Shopify/sarama v1.19.0/go.mod h1:FVkBWblsNy7DGZRfXLU0O9RCGt5g3g3yEuWXgklEdEo= github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMxUHB2q5Ap20/P/eIdh4G0pI= github.com/StackExchange/wmi v0.0.0-20170410192909-ea383cf3ba6e/go.mod h1:3eOhrUMpNV+6aFIbp5/iudMxNCF27Vw2OZgy4xEx0Fg= github.com/VividCortex/gohistogram v1.0.0/go.mod h1:Pf5mBqqDxYaXu3hDrrU+w6nw50o/4+TcAqDqk/vUH7g= -github.com/abdullin/seq v0.0.0-20160510034733-d5467c17e7af h1:DBNMBMuMiWYu0b+8KMJuWmfCkcxl09JwdlqwDZZ6U14= -github.com/abdullin/seq v0.0.0-20160510034733-d5467c17e7af/go.mod h1:5Jv4cbFiHJMsVxt52+i0Ha45fjshj6wxYr1r19tB9bw= github.com/afex/hystrix-go v0.0.0-20180502004556-fa1af6a1f4f5/go.mod h1:SkGFH1ia65gfNATL8TAiHDNxPzPdmEL5uirI2Uyuz6c= github.com/akavel/rsrc v0.8.0/go.mod h1:uLoCtb9J+EyAqh+26kdrTgmzRBFPGOolLWKpdxkKq+c= github.com/alcortesm/tgz v0.0.0-20161220082320-9c5fe88206d7/go.mod h1:6zEj6s6u/ghQa61ZWa/C2Aw3RkjiTBOix7dkqa1VLIs= @@ -193,9 +152,7 @@ github.com/aws/aws-sdk-go v1.20.6/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN github.com/aws/aws-sdk-go v1.23.20/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= github.com/aws/aws-sdk-go v1.25.11/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= github.com/aws/aws-sdk-go v1.25.37/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= -github.com/aws/aws-sdk-go v1.25.41/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= github.com/aws/aws-sdk-go v1.27.0/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= -github.com/aws/aws-sdk-go v1.37.0 h1:GzFnhOIsrGyQ69s7VgqtrG2BG8v7X7vwB3Xpbd/DBBk= github.com/aws/aws-sdk-go v1.37.0/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro= github.com/aws/aws-sdk-go-v2 v0.18.0/go.mod h1:JWVYvqSMppoMJC0x5wdwiImzgXTI9FuZwxzkQq9wy+g= github.com/aybabtme/rgbterm v0.0.0-20170906152045-cc83f3b3ce59/go.mod h1:q/89r3U2H7sSsE2t6Kca0lfwTK8JdoNGS/yzM/4iH5I= @@ -239,7 +196,6 @@ github.com/clbanning/x2j v0.0.0-20191024224557-825249438eec/go.mod h1:jMjuTZXRI4 github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/cloudflare/backoff v0.0.0-20161212185259-647f3cdfc87a h1:8d1CEOF1xldesKds5tRG3tExBsMOgWYownMHNCsev54= github.com/cloudflare/backoff v0.0.0-20161212185259-647f3cdfc87a/go.mod h1:rzgs2ZOiguV6/NpiDgADjRLPNyZlApIWxKpkT+X8SdY= -github.com/cloudflare/cfssl v1.4.1/go.mod h1:KManx/OJPb5QY+y0+o/898AMcM128sF0bURvoVUSjTo= github.com/cloudflare/go-metrics v0.0.0-20151117154305-6a9aea36fb41/go.mod h1:eaZPlJWD+G9wseg1BuRXlHnjntPMrywMsyxf+LTOdP4= github.com/cloudflare/golz4 v0.0.0-20150217214814-ef862a3cdc58/go.mod h1:EOBUe0h4xcZ5GoxqC5SDxFQ8gwyZPKQoEzownBlhI80= github.com/cloudflare/redoctober v0.0.0-20171127175943-746a508df14c/go.mod h1:6Se34jNoqrd8bTxrmJB2Bg2aoZ2CdSXonils9NsiNgo= @@ -295,25 +251,15 @@ github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSs github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/dchest/siphash v1.2.1/go.mod h1:q+IRvb2gOSrUnYoPqHiyHXS0FOBBOdl6tONBlVnOnt4= -github.com/deckarep/golang-set v1.7.1 h1:SCQV0S6gTtp6itiFrTqI+pfmJ4LN85S1YzhDf9rTHJQ= -github.com/deckarep/golang-set v1.7.1/go.mod h1:93vsz/8Wt4joVM7c2AVqh+YRMiUSc14yDtF28KmMOgQ= github.com/denisenkom/go-mssqldb v0.0.0-20191124224453-732737034ffd/go.mod h1:xbL0rPBG9cCiLr28tMa8zpbdarY27NDyej4t/EjAShU= github.com/denisenkom/go-mssqldb v0.0.0-20200620013148-b91950f658ec h1:NfhRXXFDPxcF5Cwo06DzeIaE7uuJtAUhsDwH3LNsjos= github.com/denisenkom/go-mssqldb v0.0.0-20200620013148-b91950f658ec/go.mod h1:xbL0rPBG9cCiLr28tMa8zpbdarY27NDyej4t/EjAShU= -github.com/denverdino/aliyungo v0.0.0-20170926055100-d3308649c661 h1:lrWnAyy/F72MbxIxFUzKmcMCdt9Oi8RzpAxzTNQHD7o= -github.com/denverdino/aliyungo v0.0.0-20170926055100-d3308649c661/go.mod h1:dV8lFg6daOBZbT6/BDGIz6Y3WFGn8juu6G+CQ6LHtl0= github.com/devigned/tab v0.1.1/go.mod h1:XG9mPq0dFghrYvoBF3xdRrJzSTX1b7IQrvaL9mzjeJY= -github.com/dgrijalva/jwt-go v3.2.0+incompatible h1:7qlOGliEKZXTDg6OTjfoBKDXWrumCAMpl/TFQ4/5kLM= github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no= github.com/dhui/dktest v0.3.3 h1:DBuH/9GFaWbDRa42qsut/hbQu+srAQ0rPWnUoiGX7CA= github.com/dhui/dktest v0.3.3/go.mod h1:EML9sP4sqJELHn4jV7B0TY8oF6077nk83/tz7M56jcQ= -github.com/digitalocean/godo v1.7.5 h1:JOQbAO6QT1GGjor0doT0mXefX2FgUDPOpYh2RaXA+ko= -github.com/digitalocean/godo v1.7.5/go.mod h1:h6faOIcZ8lWIwNQ+DN7b3CgX4Kwby5T+nbpNqkUIozU= -github.com/dimchansky/utfbom v1.1.0 h1:FcM3g+nofKgUteL8dm/UpdRXNC9KmADgTpLKsu0TRo4= github.com/dimchansky/utfbom v1.1.0/go.mod h1:rO41eb7gLfo8SF1jd9F8HplJm1Fewwi4mQvIirEdv+8= -github.com/dnaeon/go-vcr v1.0.1 h1:r8L/HqC0Hje5AXMu1ooW8oyQyOFv4GxqpL0nRP7SLLY= -github.com/dnaeon/go-vcr v1.0.1/go.mod h1:aBB1+wY4s93YsC3HHjMBMrwTj2R9FHDzUr9KyGc8n1E= github.com/docker/distribution v2.7.1+incompatible h1:a5mlkVzth6W5A4fOsS3D2EO5BUmsJpcB+cRlLU7cSug= github.com/docker/distribution v2.7.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= github.com/docker/docker v17.12.0-ce-rc1.0.20200618181300-9dc6525e6118+incompatible h1:iWPIG7pWIsCwT6ZtHnTUpoVMnete7O/pzd9HFE3+tn8= @@ -322,8 +268,6 @@ github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKoh github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec= github.com/docker/go-units v0.4.0 h1:3uh0PgVws3nIA0Q+MwDC8yjEPf9zjRfZZWXZYDct3Tw= github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= -github.com/docker/spdystream v0.0.0-20160310174837-449fdfce4d96/go.mod h1:Qh8CwZgvJUkLughtfhJv5dyTYa91l1fOUCrgjqmcifM= -github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE= github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= github.com/dustin/go-humanize v1.0.0 h1:VSnTsYCnlFHaM2/igO1h6X3HA71jcobQuxemgkq4zYo= github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= @@ -332,8 +276,6 @@ github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21/go.mod h1 github.com/eapache/queue v1.1.0/go.mod h1:6eCeP0CKFpHLu8blIFXhExK/dRa7WDZfr6jVFPTqq+I= github.com/edsrzf/mmap-go v0.0.0-20170320065105-0bce6a688712/go.mod h1:YO35OhQPt3KJa3ryjFM5Bs14WD66h8eGKpfaBNrHW5M= github.com/edsrzf/mmap-go v1.0.0/go.mod h1:YO35OhQPt3KJa3ryjFM5Bs14WD66h8eGKpfaBNrHW5M= -github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc= -github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= github.com/emicklei/proto v1.9.0/go.mod h1:rn1FgRS/FANiZdD2djyH7TMA9jdRDcYQ9IEN9yvjX0A= github.com/emirpasic/gods v1.12.0/go.mod h1:YfzfFFoVP/catgzJb4IKIqXjX78Ha8FMSDh3ymbK86o= github.com/envoyproxy/go-control-plane v0.6.9/go.mod h1:SBwIajubJHhxtWwsL9s8ss4safvEdbitLhGGK48rN6g= @@ -352,15 +294,11 @@ github.com/envoyproxy/protoc-gen-validate v0.3.0-java/go.mod h1:iSmxcyjqTsJpI2R4 github.com/erikstmartin/go-testdb v0.0.0-20160219214506-8d10e4a1bae5 h1:Yzb9+7DPaBjB8zlTR87/ElzFsnQfuHnVUVqpZZIcV5Y= github.com/erikstmartin/go-testdb v0.0.0-20160219214506-8d10e4a1bae5/go.mod h1:a2zkGnVExMxdzMo3M0Hi/3sEU+cWnZpSni0O6/Yb/P0= github.com/etcd-io/gofail v0.0.0-20190801230047-ad7f989257ca/go.mod h1:49H/RkXP8pKaZy4h0d+NW16rSLhyVBt4o6VLJbmOqDE= -github.com/evanphx/json-patch v4.2.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= -github.com/evanphx/json-patch v4.9.0+incompatible h1:kLcOMZeuLAJvL2BPWLMIj5oaZQobrkAqrL+WFZwQses= -github.com/evanphx/json-patch v4.9.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= github.com/fatih/color v1.9.0 h1:8xPHl4/q1VyqGIPif1F+1V3Y3lSmrq01EabUW3CoW5s= github.com/fatih/color v1.9.0/go.mod h1:eQcE1qtQxscV5RaZvpXrrb8Drkc3/DdQ+uUYCNjL+zU= github.com/fatih/structs v1.1.0/go.mod h1:9NiDSp5zOcgEDl+j00MP/WkGVPOlPRLejGD8Ga6PJ7M= github.com/fatih/structtag v1.2.0/go.mod h1:mBJUNpUnHmRKrKlQQlmCrh5PuhftFbNv8Ys4/aAZl94= -github.com/felixge/httpsnoop v1.0.1/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568/go.mod h1:xEzjJPgXI435gkrCt3MPfRiAkVrwSbHsst4LCFVfpJc= github.com/form3tech-oss/jwt-go v3.2.2+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k= github.com/form3tech-oss/jwt-go v3.2.3+incompatible h1:7ZaBxOI7TMoYBfyA3cQHErNNyAWIKUMIwqxEtgHOs5c= @@ -380,12 +318,11 @@ github.com/fullstorydev/grpcurl v1.8.0/go.mod h1:Mn2jWbdMrQGJQ8UD62uNyMumT2acsZU github.com/fullstorydev/grpcurl v1.8.1/go.mod h1:3BWhvHZwNO7iLXaQlojdg5NA6SxUDePli4ecpK1N7gw= github.com/fullstorydev/grpcurl v1.8.2 h1:2II5e++aFnctnPJir3GL6cPSwF69Ord1u/9O+fv1vrI= github.com/fullstorydev/grpcurl v1.8.2/go.mod h1:YvWNT3xRp2KIRuvCphFodG0fKkMXwaxA9CJgKCcyzUQ= -github.com/garyburd/redigo v1.6.2 h1:yE/pwKCrbLpLpQICzYTeZ7JsTA/C53wFTJHaEtRqniM= -github.com/garyburd/redigo v1.6.2/go.mod h1:NR3MbYisc3/PwhQ00EMzDiPmrwpPxAn5GI05/YaO1SY= +github.com/garyburd/redigo v1.6.3 h1:HCeeRluvAgMusMomi1+6Y5dmFOdYV/JzoRrrbFlkGIc= +github.com/garyburd/redigo v1.6.3/go.mod h1:rTb6epsqigu3kYKBnaF028A7Tf/Aw5s0cqA47doKKqw= github.com/getsentry/raven-go v0.0.0-20180121060056-563b81fc02b7/go.mod h1:KungGk8q33+aIAZUIVWZDr2OfAEBsO49PX4NzFV5kcQ= github.com/getsentry/raven-go v0.2.0 h1:no+xWJRb5ZI7eE8TWgIq1jLulQiIoLG0IfYxv5JYMGs= github.com/getsentry/raven-go v0.2.0/go.mod h1:KungGk8q33+aIAZUIVWZDr2OfAEBsO49PX4NzFV5kcQ= -github.com/ghodss/yaml v0.0.0-20150909031657-73d445a93680/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/gin-contrib/gzip v0.0.1 h1:ezvKOL6jH+jlzdHNE4h9h8q8uMpDQjyl0NN0Jd7jozc= github.com/gin-contrib/gzip v0.0.1/go.mod h1:fGBJBCdt6qCZuCAOwWuFhBB4OOq9EFqlo5dEaFhhu5w= @@ -408,7 +345,6 @@ github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9 github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= github.com/go-ini/ini v1.25.4/go.mod h1:ByCAeIL28uOIIG0E3PJtZPDL8WnHpFKFOtgjp+3Ies8= -github.com/go-kit/kit v0.7.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-kit/kit v0.10.0/go.mod h1:xUsJbQ/Fp4kEt7AFgCuvyX4a71u8h9jB8tj/ORgOZ7o= @@ -417,27 +353,19 @@ github.com/go-ldap/ldap/v3 v3.1.3/go.mod h1:3rbOH3jRS2u6jg2rJnKAMLE/xQyCKIveG2Sa github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A= -github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas= -github.com/go-logr/logr v0.2.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU= -github.com/go-logr/logr v0.2.1 h1:fV3MLmabKIZ383XifUjFSwcoGee0v9qgPp8wy5svibE= -github.com/go-logr/logr v0.2.1/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU= github.com/go-ole/go-ole v1.2.1/go.mod h1:7FAglXiTm7HKlQRDeOQ6ZNUHidzCWXuZWq/1dTyBNF8= -github.com/go-openapi/jsonpointer v0.0.0-20160704185906-46af16f9f7b1/go.mod h1:+35s3my2LFTysnkMfxsJBAMHj/DoqoB9knIWoYG/Vk0= github.com/go-openapi/jsonpointer v0.17.0/go.mod h1:cOnomiV+CVVwFLk0A/MExoFMjwdsUdVpsRhURCKh+3M= github.com/go-openapi/jsonpointer v0.19.2/go.mod h1:3akKfEdA7DF1sugOqz1dVQHBcuDBPKZGEoHC/NkiQRg= github.com/go-openapi/jsonpointer v0.19.3 h1:gihV7YNZK1iK6Tgwwsxo2rJbD1GTbdm72325Bq8FI3w= github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= -github.com/go-openapi/jsonreference v0.0.0-20160704190145-13c6e3589ad9/go.mod h1:W3Z9FmVs9qj+KR4zFKmDPGiLdk1D9Rlm7cyMvf57TTg= github.com/go-openapi/jsonreference v0.17.0/go.mod h1:g4xxGn04lDIRh0GJb5QlpE3HfopLOL6uZrK/VgnsK9I= github.com/go-openapi/jsonreference v0.19.0/go.mod h1:g4xxGn04lDIRh0GJb5QlpE3HfopLOL6uZrK/VgnsK9I= github.com/go-openapi/jsonreference v0.19.2/go.mod h1:jMjeRr2HHw6nAVajTXJ4eiUwohSTlpa0o73RUL1owJc= github.com/go-openapi/jsonreference v0.19.3 h1:5cxNfTy0UVC3X8JL5ymxzyoUZmo8iZb+jeTWn7tUa8o= github.com/go-openapi/jsonreference v0.19.3/go.mod h1:rjx6GuL8TTa9VaixXglHmQmIL98+wF9xc8zWvFonSJ8= -github.com/go-openapi/spec v0.0.0-20160808142527-6aced65f8501/go.mod h1:J8+jY1nAiCcj+friV/PDoE1/3eeccG9LYBs0tYvLOWc= github.com/go-openapi/spec v0.19.0/go.mod h1:XkF/MOi14NmjsfZ8VtAKf8pIlbZzyoTvZsdfssdxcBI= github.com/go-openapi/spec v0.19.3 h1:0XRyw8kguri6Yw4SxhsQA/atC88yqrk0+G4YhI2wabc= github.com/go-openapi/spec v0.19.3/go.mod h1:FpwSN1ksY1eteniUU7X0N/BgJ7a4WvBFVA8Lj9mJglo= -github.com/go-openapi/swag v0.0.0-20160704191624-1d0bd113de87/go.mod h1:DXUve3Dpr1UfpPtxFw+EFuQ41HhCWZfha5jSVRG7C7I= github.com/go-openapi/swag v0.17.0/go.mod h1:AByQ+nYG6gQg71GINrmuDXCPWdL640yX49/kXLo40Tg= github.com/go-openapi/swag v0.19.2/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= github.com/go-openapi/swag v0.19.5 h1:lTz6Ys4CmqqCQmZPBlbQENR1/GucA2bzYTE12Pw4tFY= @@ -457,7 +385,6 @@ github.com/go-redis/redis v6.15.8+incompatible/go.mod h1:NAIEuMOZ/fxfXJIrKDQDz8w github.com/go-redis/redis v6.15.9+incompatible/go.mod h1:NAIEuMOZ/fxfXJIrKDQDz8wamY7mA7PouImQ2Jvg6kA= github.com/go-resty/resty/v2 v2.6.0 h1:joIR5PNLM2EFqqESUjCMGXrWmXNHEU9CEiK813oKYS4= github.com/go-resty/resty/v2 v2.6.0/go.mod h1:PwvJS6hvaPkjtjNg9ph+VrSD92bi5Zq73w/BIH7cC3Q= -github.com/go-sql-driver/mysql v1.3.0/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w= github.com/go-sql-driver/mysql v1.4.0/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w= github.com/go-sql-driver/mysql v1.4.1/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w= github.com/go-sql-driver/mysql v1.5.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg= @@ -504,7 +431,6 @@ github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71 github.com/golang/mock v1.5.0/go.mod h1:CWnOUgYIOo4TcNZ0wHX3YZCqsaM1I1Jvs6v3mP3KVu8= github.com/golang/mock v1.6.0 h1:ErTB+efbowRARo13NNdxyJji2egdxLGQhRaY+DUumQc= github.com/golang/mock v1.6.0/go.mod h1:p6yTPP+5HYm5mzsMV8JkE6ZKdX+/wYM6Hr+LicevLPs= -github.com/golang/protobuf v0.0.0-20161109072736-4bd1920723d7/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.0.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.1.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= @@ -557,15 +483,10 @@ github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ github.com/google/go-github v17.0.0+incompatible/go.mod h1:zLgOLi98H3fifZn+44m+umXrS52loVEgC2AApnigrVQ= github.com/google/go-github/v28 v28.1.1/go.mod h1:bsqJWQX05omyWVmc00nEUql9mhQyv38lDZ8kPZcQVoM= github.com/google/go-licenses v0.0.0-20210329231322-ce1d9163b77d/go.mod h1:+TYOmkVoJOpwnS0wfdsJCV9CoD5nJYsHoFk/0CrTK4M= -github.com/google/go-querystring v0.0.0-20170111101155-53e6ce116135/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck= -github.com/google/go-querystring v1.0.0 h1:Xkwi/a1rcvNg1PPYe5vI8GbeBY/jrVuDX5ASuANWTrk= github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck= github.com/google/go-replayers/grpcreplay v0.1.0/go.mod h1:8Ig2Idjpr6gifRd6pNVggX6TC1Zw6Jx74AKp7QNH2QE= github.com/google/go-replayers/httpreplay v0.1.0/go.mod h1:YKZViNhiGgqdBlUbI2MwGpq4pXxNmhJLPHQ7cv2b5no= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= -github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= -github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= -github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/gops v0.3.7/go.mod h1:bj0cwMmX1X4XIJFTjR99R5sCxNssNJ8HebFNvoQlmgY= github.com/google/licenseclassifier v0.0.0-20210325184830-bb04aff29e72/go.mod h1:qsqn2hxC+vURpyBRygGUuinTO42MFRLcsmQ/P8v94+M= github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= @@ -601,18 +522,9 @@ github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+ github.com/google/uuid v1.1.2 h1:EVhdT+1Kseyi1/pUmXKaFxYsDNy9RQYkMWRH68J/W7Y= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/wire v0.3.0/go.mod h1:i1DMg/Lu8Sz5yYl25iOdmc5CT5qusaa+zmRWs16741s= -github.com/googleapis/gax-go v2.0.2+incompatible h1:silFMLAnr330+NRuag/VjIGF7TLp/LBrV2CJKFLWEww= github.com/googleapis/gax-go v2.0.2+incompatible/go.mod h1:SFVmujtThgffbyetf+mdk2eWhX2bMyUtNHzFKcPA9HY= github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= -github.com/googleapis/gax-go/v2 v2.0.5 h1:sjZBwGj9Jlw33ImPtvFviGYvseOtDM7hkSKB7+Tv3SM= github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= -github.com/googleapis/gnostic v0.0.0-20170729233727-0c5108395e2d/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY= -github.com/googleapis/gnostic v0.1.0/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY= -github.com/googleapis/gnostic v0.2.0/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY= -github.com/googleapis/gnostic v0.4.1 h1:DLJCy1n/vrD4HPjOvYcT8aYQXpPIzoRZONaYwyycI+I= -github.com/googleapis/gnostic v0.4.1/go.mod h1:LRhVm6pbyptWbWbuZ38d1eyptfvIytN3ir6b65WBswg= -github.com/gophercloud/gophercloud v0.1.0 h1:P/nh25+rzXouhytV2pUHBb65fnds26Ghl8/391+sT5o= -github.com/gophercloud/gophercloud v0.1.0/go.mod h1:vxM41WHh5uqHVBMZHzuwNOHh8XEoIEcSTewFxm1c5g8= github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1 h1:EGx4pi6eqNxGaHF6qqu48+N2wcFQ5qg5FXgOdqsJ5d8= github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= github.com/gordonklaus/ineffassign v0.0.0-20200309095847-7953dde2c7bf/go.mod h1:cuNKsD1zp2v6XfE/orVX2QE1LC+i254ceGcVeDT3pTU= @@ -620,7 +532,6 @@ github.com/goreleaser/goreleaser v0.134.0/go.mod h1:ZT6Y2rSYa6NxQzIsdfWWNWAlYGXG github.com/goreleaser/nfpm v1.2.1/go.mod h1:TtWrABZozuLOttX2uDlYyECfQX7x5XYkVxhjYcR6G9w= github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg= github.com/gorilla/handlers v1.4.2/go.mod h1:Qkdc/uu4tH4g6mTK6auzZ766c4CA0Ng8+o/OAirnOIQ= -github.com/gorilla/handlers v1.5.1/go.mod h1:t8XrUpc4KVXb7HGyJ4/cEnwQiaxrX/hz1Zv/4g96P1Q= github.com/gorilla/mux v1.6.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= github.com/gorilla/mux v1.7.3/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= github.com/gorilla/mux v1.7.4/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So= @@ -631,8 +542,6 @@ github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoA github.com/gorilla/websocket v1.4.1/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= github.com/gorilla/websocket v1.4.2 h1:+/TMaTYc4QFitKJxsQ7Yye35DkWvkdLcvGKqM+x0Ufc= github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= -github.com/grantae/certinfo v0.0.0-20170412194111-59d56a35515b/go.mod h1:zT/uzhdQGTqlwTq7Lpbj3JoJQWfPfIJ1tE0OidAmih8= -github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de4/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= github.com/grpc-ecosystem/go-grpc-middleware v1.2.2/go.mod h1:EaizFBKfUKtMIF5iaDEhniwNedqGo9FuLFzppDr3uwI= @@ -662,8 +571,6 @@ github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brv github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= github.com/hashicorp/go-cleanhttp v0.5.1 h1:dH3aiDG9Jvb5r5+bYHsikaOUIpcM0xvgMXVoDkXMzJM= github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= -github.com/hashicorp/go-discover v0.0.0-20201029210230-738cb3105cd0 h1:UgODETBAoROFMSSVgg0v8vVpD9Tol8FtYcAeomcWJtY= -github.com/hashicorp/go-discover v0.0.0-20201029210230-738cb3105cd0/go.mod h1:D4eo8/CN92vm9/9UDG+ldX1/fMFa4kpl8qzyTolus8o= github.com/hashicorp/go-hclog v0.0.0-20180709165350-ff2cf002a8dd/go.mod h1:9bjs9uLqI8l75knNv3lV1kA55veR+WUPSiKIWcQHudI= github.com/hashicorp/go-hclog v0.9.2/go.mod h1:5CU+agLiy3J7N7QjHK5d05KxGsuXiQLrjA0H7acj2lQ= github.com/hashicorp/go-hclog v0.12.0 h1:d4QkX8FRTYaKaCZBoXYY8zJX2BXjWxurN/GA2tkrmZM= @@ -698,34 +605,22 @@ github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4= github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64= github.com/hashicorp/mdns v1.0.0/go.mod h1:tL+uN++7HEJ6SQLQ2/p+z2pH24WQKWjBPkE0mNTz8vQ= -github.com/hashicorp/mdns v1.0.1 h1:XFSOubp8KWB+Jd2PDyaX5xUd5bhSP/+pTDZVDMzZJM8= -github.com/hashicorp/mdns v1.0.1/go.mod h1:4gW7WsVCke5TE7EPeYliwHlRUyBtfCwuFwuMg2DmyNY= github.com/hashicorp/memberlist v0.1.3/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2pPBoIllUwCN7I= github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc= github.com/hashicorp/vault/api v1.1.0 h1:QcxC7FuqEl0sZaIjcXB/kNEeBa0DH5z57qbWBvZwLC4= github.com/hashicorp/vault/api v1.1.0/go.mod h1:R3Umvhlxi2TN7Ex2hzOowyeNb+SfbVWI973N+ctaFMk= github.com/hashicorp/vault/sdk v0.1.14-0.20200519221838-e0cfd64bc267 h1:e1ok06zGrWJW91rzRroyl5nRNqraaBe4d5hiKcVZuHM= github.com/hashicorp/vault/sdk v0.1.14-0.20200519221838-e0cfd64bc267/go.mod h1:WX57W2PwkrOPQ6rVQk+dy5/htHIaB4aBM70EwKThu10= -github.com/hashicorp/vic v1.5.1-0.20190403131502-bbfe86ec9443 h1:O/pT5C1Q3mVXMyuqg7yuAWUg/jMZR1/0QTzTRdNR6Uw= -github.com/hashicorp/vic v1.5.1-0.20190403131502-bbfe86ec9443/go.mod h1:bEpDU35nTu0ey1EXjwNwPjI9xErAsoOCmcMb9GKvyxo= github.com/hashicorp/yamux v0.0.0-20180604194846-3520598351bb/go.mod h1:+NfK9FKeTrX5uv1uIXGdwYDTeHna2qgaIlx54MXqjAM= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= github.com/huandu/xstrings v1.0.0/go.mod h1:4qWG/gcEcfX4z/mBDHJ++3ReCw9ibxbsNJbcucJdbSo= github.com/huandu/xstrings v1.2.0/go.mod h1:DvyZB1rfVYsBIigL8HwpZgxHwXozlTgGqn63UyNX5k4= github.com/hudl/fargo v1.3.0/go.mod h1:y3CKSmjA+wD2gak7sUSXTAoopbhU08POFhmITJgmKTg= -github.com/hyperledger/fabric v1.4.11/go.mod h1:tGFAOCT696D3rG0Vofd2dyWYLySHlh0aQjf7Q1HAju0= -github.com/hyperledger/fabric-amcl v0.0.0-20200424173818-327c9e2cf77a/go.mod h1:X+DIyUsaTmalOpmpQfIvFZjKHQedrURQ5t4YqquX7lE= -github.com/hyperledger/fabric-ca v1.5.0 h1:perByHPnR8rlTDnw1BIlCGaR4H62Dl06hCLgNzdQDIQ= -github.com/hyperledger/fabric-ca v1.5.0/go.mod h1:PTEvOLa48oZMCWecObv1g8Pu8u0t0PJ+3vmmNm7adbY= -github.com/hyperledger/fabric-lib-go v1.0.0/go.mod h1:H362nMlunurmHwkYqR5uHL2UDWbQdbfz74n8kbCFsqc= github.com/iancoleman/strcase v0.1.2/go.mod h1:SK73tn/9oHe+/Y0h39VT4UCxmurVJkR5NA7kMEAOgSE= github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/imdario/mergo v0.3.4/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= -github.com/imdario/mergo v0.3.5/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= -github.com/imdario/mergo v0.3.6/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= github.com/imdario/mergo v0.3.8/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= -github.com/imdario/mergo v0.3.9 h1:UauaLniWCFHWd+Jp9oCEkTBj8VO/9DKg3PV3VCNMDIg= github.com/imdario/mergo v0.3.9/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= github.com/inconshreveable/mousetrap v1.0.0 h1:Z8tu5sraLXCXIcARxBp/8cbvlwVa7Z1NHg9XEKhtSvM= github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= @@ -754,8 +649,6 @@ github.com/jackc/pgx/v4 v4.0.0-20190421002000-1b8f0016e912/go.mod h1:no/Y67Jkk/9 github.com/jackc/pgx/v4 v4.0.0-pre1.0.20190824185557-6972a5742186/go.mod h1:X+GQnOEnf1dqHGpw7JmHqHc1NxDoalibchSk9/RWuDc= github.com/jackc/puddle v0.0.0-20190413234325-e4ced69a3a2b/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk= github.com/jackc/puddle v0.0.0-20190608224051-11cab39313c9/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk= -github.com/jarcoal/httpmock v0.0.0-20180424175123-9c70cfe4a1da/go.mod h1:ks+b9deReOc7jgqp+e7LuFiCBH6Rm5hL32cLcEAArb4= -github.com/jarcoal/httpmock v1.0.5 h1:cHtVEcTxRSX4J0je7mWPfc9BpDpqzXSJ5HbymZmyHck= github.com/jarcoal/httpmock v1.0.5/go.mod h1:ATjnClrvW/3tijVmpL/va5Z3aAyGvqU3gCT8nX0Txik= github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99/go.mod h1:1lJo3i6rXxKeerYnT8Nvf0QmHCRC1n8sfWVwXF2Frvo= github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= @@ -772,13 +665,10 @@ github.com/jinzhu/now v1.1.1 h1:g39TucaRWyV3dwDO++eEc6qf8TVIQ/Da48WmqjZ3i7E= github.com/jinzhu/now v1.1.1/go.mod h1:d3SSVoowX0Lcu0IBviAWJpolVfI5UJVZZ7cO71lE/z8= github.com/jmespath/go-jmespath v0.0.0-20160202185014-0b12d6b521d8/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= -github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg= github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo= -github.com/jmespath/go-jmespath/internal/testify v1.5.1 h1:shLQSRRSCCPj3f2gpwzGwWFoC7ycTf1rcQZHOlsJ6N8= github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U= github.com/jmhodges/clock v0.0.0-20160418191101-880ee4c33548 h1:dYTbLf4m0a5u0KLmPfB6mgxbcV7588bOCx79hxa5Sr4= github.com/jmhodges/clock v0.0.0-20160418191101-880ee4c33548/go.mod h1:hGT6jSUVzF6no3QaDSMLGLEHtHSBSefs+MgcDWnmhmo= -github.com/jmoiron/sqlx v0.0.0-20180124204410-05cef0741ade/go.mod h1:IiEW3SEiiErVyFdH8NTuWjSifiEQKUoyK3LNqr2kCHU= github.com/jmoiron/sqlx v1.2.0/go.mod h1:1FEQNm3xlJgrMD+FBdI9+xvCksHtbpVBBw5dYhBSsks= github.com/jmoiron/sqlx v1.2.1-0.20190826204134-d7d95172beb5 h1:lrdPtrORjGv1HbbEvKWDUAy97mPpFm4B8hp77tcCUJY= github.com/jmoiron/sqlx v1.2.1-0.20190826204134-d7d95172beb5/go.mod h1:1FEQNm3xlJgrMD+FBdI9+xvCksHtbpVBBw5dYhBSsks= @@ -788,8 +678,6 @@ github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22 github.com/jonboulle/clockwork v0.2.0/go.mod h1:Pkfl5aHPm1nk2H9h0bjmnJD/BcgbGXUBGnn1kMkgxc8= github.com/jonboulle/clockwork v0.2.2 h1:UOGuzwb1PwsrDAObMuhUnj0p5ULPj8V/xJ7Kx9qUBdQ= github.com/jonboulle/clockwork v0.2.2/go.mod h1:Pkfl5aHPm1nk2H9h0bjmnJD/BcgbGXUBGnn1kMkgxc8= -github.com/joyent/triton-go v0.0.0-20180628001255-830d2b111e62 h1:JHCT6xuyPUrbbgAPE/3dqlvUKzRHMNuTBKKUb6OeR/k= -github.com/joyent/triton-go v0.0.0-20180628001255-830d2b111e62/go.mod h1:U+RSyWxWd04xTqnuOQxnai7XGS2PrPY2cfGoDKtMHjA= github.com/jpillora/backoff v0.0.0-20180909062703-3050d21c67d7/go.mod h1:2iMrUgbbvHEiQClaW2NsSzMyGHqN+rDFqY705q49KG0= github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4= github.com/json-iterator/go v1.1.5/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= @@ -842,15 +730,10 @@ github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= github.com/ktrysmt/go-bitbucket v0.6.4/go.mod h1:9u0v3hsd2rqCHRIpbir1oP7F58uo5dq19sBYvuMoyQ4= github.com/kylelemons/go-gypsy v0.0.0-20160905020020-08cad365cd28/go.mod h1:T/T7jsxVqf9k/zYOqbgNAsANsjxTd1Yq3htjDhQ1H0c= github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= -github.com/labstack/echo/v4 v4.1.11 h1:z0BZoArY4FqdpUEl+wlHp4hnr/oSR6MTmQmv8OHSoww= -github.com/labstack/echo/v4 v4.1.11/go.mod h1:i541M3Fj6f76NZtHSj7TXnyM8n2gaodfvfxNnFqi74g= -github.com/labstack/gommon v0.3.0 h1:JEeO0bvc78PKdyHxloTKiF8BD5iGrH8T6MSeGvSgob0= -github.com/labstack/gommon v0.3.0/go.mod h1:MULnywXg0yavhxWKc+lOruYdAhDwPK9wf0OL7NoOu+k= github.com/leodido/go-urn v1.1.0/go.mod h1:+cyI34gQWZcE1eQU7NVgKkkzdXDQHr1dBMtdAPozLkw= github.com/leodido/go-urn v1.2.0 h1:hpXL4XnriNwQ/ABnpepYM/1vCLWNDfUNts8dX3xTG6Y= github.com/leodido/go-urn v1.2.0/go.mod h1:+8+nEpDfqqsY+g338gtMEUOtuK+4dEMhiQEgxpxOKII= github.com/letsencrypt/pkcs11key/v4 v4.0.0/go.mod h1:EFUvBDay26dErnNb70Nd0/VW3tJiIbETBPTl9ATXQag= -github.com/lib/pq v0.0.0-20180201184707-88edab080323/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= github.com/lib/pq v1.0.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= github.com/lib/pq v1.1.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= github.com/lib/pq v1.1.1/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= @@ -861,14 +744,11 @@ github.com/lib/pq v1.9.0 h1:L8nSXQQzAYByakOFMTwpjRoHsMJklur4Gi59b6VivR8= github.com/lib/pq v1.9.0/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= github.com/lightstep/lightstep-tracer-common/golang/gogo v0.0.0-20190605223551-bc2310a04743/go.mod h1:qklhhLq1aX+mtWk9cPHPzaBjWImj5ULL6C7HFJtXQMM= github.com/lightstep/lightstep-tracer-go v0.18.1/go.mod h1:jlF1pusYV4pidLvZ+XD0UBX0ZE6WURAspgAczcDHrL4= -github.com/linode/linodego v0.7.1 h1:4WZmMpSA2NRwlPZcc0+4Gyn7rr99Evk9bnr0B3gXRKE= -github.com/linode/linodego v0.7.1/go.mod h1:ga11n3ivecUrPCHN0rANxKmfWBJVkOXfLMZinAbj2sY= github.com/logrusorgru/aurora v2.0.3+incompatible/go.mod h1:7rIyQOR62GCctdiQpZ/zOJlFyk6y+94wXzv6RNZgaR4= github.com/lyft/protoc-gen-validate v0.0.13/go.mod h1:XbGvPuh87YZc5TdIa2/I4pLk0QoUACkjt2znoq26NVQ= github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= github.com/magiconair/properties v1.8.1 h1:ZC2Vc7/ZFkGmsVC9KvOjumD+G5lXy2RtTKyzRKO2BQ4= github.com/magiconair/properties v1.8.1/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= -github.com/mailru/easyjson v0.0.0-20160728113105-d5b7844b561a/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.0.0-20180823135443-60711f1a8329/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= @@ -912,7 +792,6 @@ github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182aff github.com/mayocream/pki v0.0.0-20210826155834-685adbcfbc3b h1:cn2p9b5eKJOumTAjCSRTSaLQAog6jZ/T0pNFoERtWGg= github.com/mayocream/pki v0.0.0-20210826155834-685adbcfbc3b/go.mod h1:KTrK0VHn1EJ8+WBgA4yfJqBJpQeFjFzB6v9tQu9mXvs= github.com/mgutz/ansi v0.0.0-20170206155736-9520e82c474b/go.mod h1:01TrycV0kFyexm33Z7vhZRXopbI8J3TDReVlkTgMUxE= -github.com/miekg/dns v1.0.14 h1:9jZdLNd/P4+SfEJ0TNyxYpsK8N4GtfylBLqtbYN1sbA= github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= github.com/miekg/pkcs11 v1.0.2/go.mod h1:XsNlhZGX73bx86s2hdc/FuaLm2CPZJemRLMA+WTFxgs= github.com/miekg/pkcs11 v1.0.3/go.mod h1:XsNlhZGX73bx86s2hdc/FuaLm2CPZJemRLMA+WTFxgs= @@ -930,7 +809,6 @@ github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:F github.com/mitchellh/mapstructure v0.0.0-20180220230111-00c29f56e238/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= github.com/mitchellh/mapstructure v1.3.2/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= -github.com/mitchellh/mapstructure v1.3.3/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/mitchellh/mapstructure v1.4.0 h1:7ks8ZkOP5/ujthUsT07rNv+nkLXCQWKNHuwzOAesEks= github.com/mitchellh/mapstructure v1.4.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/mitchellh/reflectwalk v1.0.0/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= @@ -945,13 +823,11 @@ github.com/mohae/deepcopy v0.0.0-20170929034955-c48cc78d4826/go.mod h1:TaXosZuwd github.com/morikuni/aec v1.0.0 h1:nP9CBfwrvYnBRgY6qfDQkygYDmYwOilePFkwzv4dU8A= github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc= github.com/mreiferson/go-httpclient v0.0.0-20160630210159-31f0106b4474/go.mod h1:OQA4XLvDbMgS8P0CevmM4m9Q3Jq4phKUzcocxuGJ5m8= -github.com/munnerz/goautoneg v0.0.0-20120707110453-a547fc61f48d/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= github.com/mutecomm/go-sqlcipher/v4 v4.4.0/go.mod h1:PyN04SaWalavxRGH9E8ZftG6Ju7rsPrGmQRjrEaVpiY= github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/mwitkow/go-proto-validators v0.0.0-20180403085117-0950a7990007/go.mod h1:m2XC9Qq0AlmmVksL6FktJCdTYyLk7V3fKyp0sl1yWQo= github.com/mwitkow/go-proto-validators v0.2.0/go.mod h1:ZfA1hW+UH/2ZHOWvQ3HnQaU0DtnpXu850MZiy+YUgcc= -github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= github.com/nakagami/firebirdsql v0.0.0-20190310045651-3c02a58cfed8/go.mod h1:86wM1zFnC6/uDBfZGNwB65O+pR2OFi5q/YQaEUid1qA= github.com/nats-io/jwt v0.3.0/go.mod h1:fRYCDE99xlTsqUzISS1Bi75UBJ6ljOJQOAAu5VglpSg= github.com/nats-io/jwt v0.3.2/go.mod h1:/euKqTS1ZD+zzjYrY7pseZrTtWQSjujC7xjPc8wL6eU= @@ -962,16 +838,11 @@ github.com/nats-io/nkeys v0.1.3/go.mod h1:xpnFELMwJABBLVhffcfd1MZx6VsNRFpEugbxzi github.com/nats-io/nuid v1.0.1/go.mod h1:19wcPz3Ph3q0Jbyiqsd0kePYG7A95tJPxeL+1OSON2c= github.com/nbio/st v0.0.0-20140626010706-e9e8d9816f32/go.mod h1:9wM+0iRr9ahx58uYLpLIr5fm8diHn0JbqRycJi6w0Ms= github.com/neo4j/neo4j-go-driver v1.8.1-0.20200803113522-b626aa943eba/go.mod h1:ncO5VaFWh0Nrt+4KT4mOZboaczBZcLuHrG+/sUeP8gI= -github.com/nicolai86/scaleway-sdk v1.10.2-0.20180628010248-798f60e20bb2 h1:BQ1HW7hr4IVovMwWg0E0PYcyW8CzqDcVmaew9cujU4s= -github.com/nicolai86/scaleway-sdk v1.10.2-0.20180628010248-798f60e20bb2/go.mod h1:TLb2Sg7HQcgGdloNxkrmtgDNR9uVYF3lfdFIN4Ro6Sk= github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e h1:fD57ERR4JtEqsWbfPhv4DMiApHyliiK5xCTNVSPiaAs= github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= github.com/nishanths/predeclared v0.0.0-20190419143655-18a43bb90ffc/go.mod h1:62PewwiQTlm/7Rj+cxVYqZvDIUc+JjZq6GHAC1fsObQ= github.com/nishanths/predeclared v0.0.0-20200524104333-86fad755b4d3/go.mod h1:nt3d53pc1VYcphSCIaYAJtnPYnr3Zyn8fMq2wvPGPso= github.com/nkovacs/streamquote v0.0.0-20170412213628-49af9bddb229/go.mod h1:0aYXnNPJ8l7uZxf45rWW1a/uME32OF0rhiYGNQ2oF2E= -github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= -github.com/nxadm/tail v1.4.5 h1:obHEce3upls1IBn1gTw/o7bCv7OJb6Ib/o7wNO+4eKw= -github.com/nxadm/tail v1.4.5/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= github.com/oklog/oklog v0.3.2/go.mod h1:FCV+B7mhrz4o+ueLpx+KqkyXRGMWOYEvfiXtdGtbWGs= github.com/oklog/run v1.0.0/go.mod h1:dlhp/R75TPv97u0XWUtDeV/lRKWPKSdTuV0TZvrmrQA= github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= @@ -981,24 +852,14 @@ github.com/olekukonko/tablewriter v0.0.2/go.mod h1:rSAaSIOAGT9odnlyGlUfAJaoc5w2f github.com/olekukonko/tablewriter v0.0.4/go.mod h1:zq6QwlOf5SlnkVbMSr5EoBv3636FWnp+qbPhuoO21uA= github.com/olekukonko/tablewriter v0.0.5 h1:P2Ga83D34wi1o9J6Wh1mRuqd4mF/x/lgBS7N7AbDhec= github.com/olekukonko/tablewriter v0.0.5/go.mod h1:hPp6KlRPjbx+hW8ykQs1w3UBbZlj6HuIJcUGPhkA7kY= -github.com/onsi/ginkgo v0.0.0-20170829012221-11459a886d9c/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.10.3/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/ginkgo v1.11.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.12.0/go.mod h1:oUhWkIvk5aDxtKvDDuw8gItl8pKl42LzjC9KZE0HfGg= -github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= -github.com/onsi/ginkgo v1.14.2 h1:8mVmC9kjFFmA8H4pKMUhcblgifdkOIXPvbhN1T36q1M= -github.com/onsi/ginkgo v1.14.2/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY= -github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/onsi/gomega v1.5.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= -github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= github.com/onsi/gomega v1.9.0/go.mod h1:Ho0h+IUsWyvy1OpqCwxlQ/21gkhVunqlU8fDGcoTdcA= -github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= -github.com/onsi/gomega v1.10.3 h1:gph6h/qe9GSUw1NhH1gp+qb+h8rXD8Cy60Z32Qw3ELA= -github.com/onsi/gomega v1.10.3/go.mod h1:V9xEwhxec5O8UDM77eCW8vLymOMltsqPVYWrpDsH8xc= github.com/op/go-logging v0.0.0-20160315200505-970db520ece7/go.mod h1:HzydrMdWErDVzsI23lYNej1Htcns9BCg93Dk0bBINWk= github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= @@ -1017,8 +878,6 @@ github.com/otiai10/curr v0.0.0-20150429015615-9b4961190c95/go.mod h1:9qAhocn7zKJ github.com/otiai10/curr v1.0.0/go.mod h1:LskTG5wDwr8Rs+nNQ+1LlxRjAtTZZjtJW4rMXl6j4vs= github.com/otiai10/mint v1.3.0/go.mod h1:F5AjcsTsWUqX+Na9fpHb52P8pcRX2CI6A3ctIT91xUo= github.com/otiai10/mint v1.3.1/go.mod h1:/yxELlJQ0ufhjUwhshSj+wFjZ78CnZ48/1wtmBH1OTc= -github.com/packethost/packngo v0.1.1-0.20180711074735-b9cb5096f54c h1:vwpFWvAO8DeIZfFeqASzZfsxuWPno9ncAebBEP0N3uE= -github.com/packethost/packngo v0.1.1-0.20180711074735-b9cb5096f54c/go.mod h1:otzZQXgoO96RTzDB/Hycg0qZcXZsWJGJRSXbmEIJ+4M= github.com/pact-foundation/pact-go v1.0.4/go.mod h1:uExwJY4kCzNPcHRj+hCR/HBbOOIwwtUjcrb0b5/5kLM= github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= github.com/pascaldekloe/goe v0.1.0/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= @@ -1027,7 +886,6 @@ github.com/pelletier/go-buffruneio v0.2.0/go.mod h1:JkE26KsDizTr40EUHkXVtNPvgGtb github.com/pelletier/go-toml v1.2.0 h1:T5zMGML61Wp+FlcbWjRDT7yAxhJNAiPPLOFECq181zc= github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= github.com/performancecopilot/speed v3.0.0+incompatible/go.mod h1:/CLtqpZ5gBg1M9iaPbIdPPGyKcA8hKdoy6hAWba7Yac= -github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= github.com/pierrec/lz4 v1.0.2-0.20190131084431-473cd7ce01a1/go.mod h1:3/3N9NVKO0jef7pBehbT1qWhCMrIgbYNnFAZCqQ5LRc= github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= github.com/pierrec/lz4 v2.5.1+incompatible h1:Yq0up0149Hh5Ekhm/91lgkZuD1ZDnXNM26bycpTzYBM= @@ -1038,7 +896,6 @@ github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINE github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/profile v1.2.1/go.mod h1:hJw3o1OdXxsrSjjVksARp5W95eeEaEfptyVZyv6JUPA= -github.com/pkg/profile v1.5.0/go.mod h1:qBsxPvzyUincmltOk6iyRVxHYg4adc0OFOv72ZdLa18= github.com/pmezard/go-difflib v0.0.0-20151028094244-d8ed2627bdf0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= @@ -1051,7 +908,6 @@ github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5Fsn github.com/prometheus/client_golang v1.3.0/go.mod h1:hJaj2vgQTGQmVCsAACORcieXFeDPbaTKGT+JTgUa3og= github.com/prometheus/client_golang v1.5.1/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU= github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= -github.com/prometheus/client_golang v1.8.0/go.mod h1:O9VU6huf47PktckDQfMTX0Y8tY0/7TSWwj+ITvv0TnM= github.com/prometheus/client_golang v1.10.0/go.mod h1:WJM3cc3yu7XKBKa/I8WeZm+V3eltZnBwfENSU7mdogU= github.com/prometheus/client_golang v1.11.0 h1:HNkLOAEQMIDv/K+04rukrLx6ch7msSRwf3/SASFAGtQ= github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= @@ -1070,8 +926,6 @@ github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y8 github.com/prometheus/common v0.7.0/go.mod h1:DjGbpBbp5NYNiECxcL/VnbXCCaQpKd3tt26CguLLsqA= github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8bs7vj7HSQ4= github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= -github.com/prometheus/common v0.14.0/go.mod h1:U+gB1OBLb1lF3O42bTCL+FK18tX9Oar16Clt/msog/s= -github.com/prometheus/common v0.15.0/go.mod h1:U+gB1OBLb1lF3O42bTCL+FK18tX9Oar16Clt/msog/s= github.com/prometheus/common v0.18.0/go.mod h1:U+gB1OBLb1lF3O42bTCL+FK18tX9Oar16Clt/msog/s= github.com/prometheus/common v0.26.0 h1:iMAkS2TDoNWnKM+Kopnx/8tnEStIfpYA0ur0xQzzhMQ= github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc= @@ -1091,8 +945,6 @@ github.com/pseudomuto/protoc-gen-doc v1.4.1/go.mod h1:exDTOVwqpp30eV/EDPFLZy3Pwr github.com/pseudomuto/protokit v0.2.0/go.mod h1:2PdH30hxVHsup8KpBTOXTBeMVhJZVio3Q8ViKSAXT0Q= github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= github.com/remyoudompheng/bigfft v0.0.0-20190728182440-6a916e37a237/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo= -github.com/renier/xmlrpc v0.0.0-20170708154548-ce4a1a486c03 h1:Wdi9nwnhFNAlseAOekn6B5G/+GMtks9UKbvRU/CMM/o= -github.com/renier/xmlrpc v0.0.0-20170708154548-ce4a1a486c03/go.mod h1:gRAiPF5C5Nd0eyyRdqIu9qTiFSoZzpTq727b5B8fkkU= github.com/rivo/uniseg v0.1.0 h1:+2KBaVoUmb9XzDsrx/Ct0W/EYOSFf/nWTauy++DprtY= github.com/rivo/uniseg v0.1.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc= github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= @@ -1118,7 +970,6 @@ github.com/sassoftware/go-rpmutils v0.0.0-20190420191620-a8f1baeba37b/go.mod h1: github.com/satori/go.uuid v1.2.0 h1:0uYX9dsZ2yD7q2RtLRtPSdGDWzjeM3TbMJP9utgA0ww= github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0= github.com/scylladb/termtables v0.0.0-20191203121021-c4c0b6d42ff4/go.mod h1:C1a7PQSMz9NShzorzCiG2fk9+xuCgLkPeCvMHYR2OWg= -github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529 h1:nn5Wsu0esKSJiIVhscUtVbo7ada43DJhG55ua/hjS5I= github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo= github.com/sergi/go-diff v1.1.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM= @@ -1126,10 +977,7 @@ github.com/sergi/go-diff v1.2.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNX github.com/shirou/gopsutil v0.0.0-20180427012116-c95755e4bcd7/go.mod h1:5b4v6he4MtMOwMlS0TUMTu2PcXUg8+E1lC7eC3UO/RA= github.com/shirou/w32 v0.0.0-20160930032740-bb4de0191aa4/go.mod h1:qsXQc7+bwAM3Q1u/4XEfrquwF8Lw7D7y5cD8CuHnfIc= github.com/shopspring/decimal v0.0.0-20180709203117-cd690d0c9e24/go.mod h1:M+9NzErvs504Cn4c5DxATwIqPbtswREoFCre64PpcG4= -github.com/shopspring/decimal v1.2.0 h1:abSATXmQEYyShuxI4/vyW3tV1MrKAJzCZ/0zLUXYbsQ= -github.com/shopspring/decimal v1.2.0/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o= github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= -github.com/sirupsen/logrus v1.0.6/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= github.com/sirupsen/logrus v1.3.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q= @@ -1147,8 +995,6 @@ github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9 github.com/smartystreets/gunit v1.0.0/go.mod h1:qwPWnhz6pn0NnRBP++URONOVyNkPyr4SauJk4cUOwJs= github.com/snowflakedb/glog v0.0.0-20180824191149-f5055e6f21ce/go.mod h1:EB/w24pR5VKI60ecFnKqXzxX3dOorz1rnVicQTQrGM0= github.com/snowflakedb/gosnowflake v1.3.5/go.mod h1:13Ky+lxzIm3VqNDZJdyvu9MCGy+WgRdYFdXp96UcLZU= -github.com/softlayer/softlayer-go v0.0.0-20180806151055-260589d94c7d h1:bVQRCxQvfjNUeRqaY/uT0tFuvuFY0ulgnczuR684Xic= -github.com/softlayer/softlayer-go v0.0.0-20180806151055-260589d94c7d/go.mod h1:Cw4GTlQccdRGSEf6KiMju767x0NEHE0YIVPJSaXjlsw= github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= github.com/soheilhy/cmux v0.1.5-0.20210205191134-5ec6847320e5/go.mod h1:T7TcVDs9LWfQgPlPsdngu6I6QIoyIFZDDC6sNE1GqG0= github.com/soheilhy/cmux v0.1.5 h1:jjzc5WVemNEDTLwv9tlmemhC73tI08BNOIGwBOo10Js= @@ -1171,7 +1017,6 @@ github.com/spf13/cobra v1.1.3 h1:xghbfqPkxzxP3C/f3n5DdpAbdKLj4ZE4BWQI362l53M= github.com/spf13/cobra v1.1.3/go.mod h1:pGADOWyqRD/YMrPZigI/zbliZ2wVD/23d+is3pSWzOo= github.com/spf13/jwalterweatherman v1.0.0 h1:XHEdyB+EcvlqZamSM4ZOMGlc93t6AcsBEu9Gc1vn7yk= github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= -github.com/spf13/pflag v0.0.0-20170130214245-9ff6c6923cff/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.1/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= @@ -1186,7 +1031,6 @@ github.com/spiffe/go-spiffe/v2 v2.0.0-beta.4/go.mod h1:TEfgrEcyFhuSuvqohJt6IxENU github.com/src-d/gcfg v1.4.0/go.mod h1:p/UMsR43ujA89BJY9duynAwIpvqEujIH/jFlfL7jWoI= github.com/streadway/amqp v0.0.0-20190404075320-75d898a42a94/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw= github.com/streadway/amqp v0.0.0-20190827072141-edfb9018d271/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw= -github.com/streadway/amqp v1.0.0/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw= github.com/streadway/handy v0.0.0-20190108123426-d5acb3125c2a/go.mod h1:qNTQ5P5JnDBl6z3cMAg/SywNDC5ABu5ApDIw6lUbRmI= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= @@ -1208,18 +1052,9 @@ github.com/swaggo/gin-swagger v1.3.0 h1:eOmp7r57oUgZPw2dJOjcGNMse9cvXcI4tTqBcnZt github.com/swaggo/gin-swagger v1.3.0/go.mod h1:oy1BRA6WvgtCp848lhxce7BnWH4C8Bxa0m5SkWx+cS0= github.com/swaggo/swag v1.5.1 h1:2Agm8I4K5qb00620mHq0VJ05/KT4FtmALPIcQR9lEZM= github.com/swaggo/swag v1.5.1/go.mod h1:1Bl9F/ZBpVWh22nY0zmYyASPO1lI/zIwRDrpZU+tv8Y= -github.com/sykesm/zap-logfmt v0.0.4/go.mod h1:AuBd9xQjAe3URrWT1BBDk2v2onAZHkZkWRMiYZXiZWA= github.com/tal-tech/go-zero v1.1.4 h1:lwndCg9cYUV3TgmkDuYUGF2v/2wSqvjZAthrXTGVvc8= github.com/tal-tech/go-zero v1.1.4/go.mod h1:LuYkWF2BE2O/TB9IS+zC86oE1hhS6Ty4yGSBh+JKPaY= -github.com/tencentcloud/tencentcloud-sdk-go v3.0.83+incompatible h1:8uRvJleFpqLsO77WaAh2UrasMOzd8MxXrNj20e7El+Q= -github.com/tencentcloud/tencentcloud-sdk-go v3.0.83+incompatible/go.mod h1:0PfYow01SHPMhKY31xa+EFz2RStxIqj6JFAJS+IkCi4= -github.com/tidwall/gjson v1.6.8 h1:CTmXMClGYPAmln7652e69B7OLXfTi5ABcPPwjIWUv7w= -github.com/tidwall/gjson v1.6.8/go.mod h1:zeFuBCIqD4sN/gmqBzZ4j7Jd6UcA2Fc56x7QFsv+8fI= -github.com/tidwall/match v1.0.3 h1:FQUVvBImDutD8wJLN6c5eMzWtjgONK9MwIBCOrUJKeE= -github.com/tidwall/match v1.0.3/go.mod h1:eRSPERbgtNPcGhD8UCthc6PmLEQXEWd3PRB5JTxsfmM= github.com/tidwall/pretty v0.0.0-20180105212114-65a9db5fad51/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk= -github.com/tidwall/pretty v1.0.2 h1:Z7S3cePv9Jwm1KwS0513MRaoUe3S01WPbLNV40pwWZU= -github.com/tidwall/pretty v1.0.2/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk= github.com/tj/assert v0.0.0-20171129193455-018094318fb0/go.mod h1:mZ9/Rh9oLWpLLDRpvE+3b7gP/C2YyLFYxNmcLnPTMe0= github.com/tj/go-elastic v0.0.0-20171221160941-36157cbbebc2/go.mod h1:WjeM0Oo1eNAjXGDx2yma7uG2XoyRZTq1uv3M/o7imD0= github.com/tj/go-kinesis v0.0.0-20171128231115-08b17f58cb1b/go.mod h1:/yhzCV0xPfx6jb1bBgRFjl5lytqVqZXEaeqWP8lTEao= @@ -1252,15 +1087,9 @@ github.com/valyala/bytebufferpool v1.0.0 h1:GqA5TC/0021Y/b9FG4Oi9Mr3q7XYx6Kllzaw github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc= github.com/valyala/fasthttp v1.15.1 h1:eRb5jzWhbCn/cGu3gNJMcOfPUfXgXCcQIOHjh9ajAS8= github.com/valyala/fasthttp v1.15.1/go.mod h1:YOKImeEosDdBPnxc0gy7INqi3m1zK6A+xl6TwOBhHCA= -github.com/valyala/fastrand v1.0.0 h1:LUKT9aKer2dVQNUi3waewTbKV+7H17kvWFNKs2ObdkI= -github.com/valyala/fastrand v1.0.0/go.mod h1:HWqCzkrkg6QXT8V2EXWvXCoow7vLwOFN002oeRzjapQ= -github.com/valyala/fasttemplate v1.0.1 h1:tY9CJiPnMXf1ERmG2EyK7gNUd+c6RKGD0IfU8WdUSz8= github.com/valyala/fasttemplate v1.0.1/go.mod h1:UQGH1tvbgY+Nz5t2n7tXsz52dQxojPUpymEIMZ47gx8= github.com/valyala/tcplisten v0.0.0-20161114210144-ceec8f93295a/go.mod h1:v3UYOV9WzVtRmSR+PDvWpU/qWl4Wa5LApYYX4ZtKbio= -github.com/vmware/govmomi v0.18.0 h1:f7QxSmP7meCtoAmiKZogvVbLInT+CZx6Px6K5rYsJZo= -github.com/vmware/govmomi v0.18.0/go.mod h1:URlwyTFZX72RmxtxuaFL2Uj3fD1JTvZdx59bHWk6aFU= github.com/weppos/publicsuffix-go v0.4.0/go.mod h1:z3LCPQ38eedDQSwmsSRW4Y7t2L8Ln16JPQ02lHAdn5k= -github.com/weppos/publicsuffix-go v0.5.0/go.mod h1:z3LCPQ38eedDQSwmsSRW4Y7t2L8Ln16JPQ02lHAdn5k= github.com/weppos/publicsuffix-go v0.13.0 h1:0Tu1uzLBd1jPn4k6OnMmOPZH/l/9bj9kUOMMkoRs6Gg= github.com/weppos/publicsuffix-go v0.13.0/go.mod h1:z3LCPQ38eedDQSwmsSRW4Y7t2L8Ln16JPQ02lHAdn5k= github.com/xanzy/go-gitlab v0.15.0/go.mod h1:8zdQa/ri1dfn8eS3Ir1SyfvOKlw7WBJ8DVThkpGiXrs= @@ -1286,29 +1115,14 @@ github.com/zenazn/goji v0.9.0/go.mod h1:7S9M489iMyHBNxwZnk9/EHS098H4/F6TATF2mIxt github.com/ziutek/mymysql v1.5.4/go.mod h1:LMSpPZ6DbqWFxNCHW77HeMg9I646SAhApZ/wKdgO/C0= github.com/zmap/rc2 v0.0.0-20131011165748-24b9757f5521/go.mod h1:3YZ9o3WnatTIZhuOtot4IcUfzoKVjUHqu6WALIyI0nE= github.com/zmap/zcertificate v0.0.0-20180516150559-0e3d58b1bac4/go.mod h1:5iU54tB79AMBcySS0R2XIyZBAVmeHranShAFELYx7is= -github.com/zmap/zcrypto v0.0.0-20190729165852-9051775e6a2e/go.mod h1:w7kd3qXHh8FNaczNjslXqvFQiv5mMWRXlL9klTUAHc8= github.com/zmap/zcrypto v0.0.0-20200513165325-16679db567ff/go.mod h1:TxpejqcVKQjQaVVmMGfzx5HnmFMdIU+vLtaCyPBfGI4= github.com/zmap/zcrypto v0.0.0-20200911161511-43ff0ea04f21 h1:PIpcdSOg3pMdFJUBg5yR9xxcj5rm/SGAyaWT/wK6Kco= github.com/zmap/zcrypto v0.0.0-20200911161511-43ff0ea04f21/go.mod h1:TxpejqcVKQjQaVVmMGfzx5HnmFMdIU+vLtaCyPBfGI4= -github.com/zmap/zlint v0.0.0-20190806154020-fd021b4cfbeb h1:vxqkjztXSaPVDc8FQCdHTaejm2x747f6yPbnu1h2xkg= -github.com/zmap/zlint v0.0.0-20190806154020-fd021b4cfbeb/go.mod h1:29UiAJNsiVdvTBFCJW8e3q6dcDbOoPkhMgttOSCIMMY= github.com/zmap/zlint/v2 v2.2.1 h1:b2kI/ToXX16h2wjV2c6Da65eT6aTMtkLHKetXuM9EtI= github.com/zmap/zlint/v2 v2.2.1/go.mod h1:ixPWsdq8qLxYRpNUTbcKig3R7WgmspsHGLhCCs6rFAM= +github.com/ztalab/cfssl v0.0.2 h1:Lx1CpDSo2ifJ4vqZcTXIPyRBSGuAQBCNiszuEqGnY6w= +github.com/ztalab/cfssl v0.0.2/go.mod h1:9DFxjua+z0uIA0kzWwrJL//weYRUb+tiSvQoeRbfZ7Y= gitlab.com/nyarla/go-crypt v0.0.0-20160106005555-d9a5dc2b789b/go.mod h1:T3BPAOm2cqquPa0MKWeNkmOM5RQsRhkrwMWonFMN7fE= -gitlab.oneitfarm.com/bifrost/cfssl v1.5.2 h1:j/747GF8NWirBJf5TcRFF9hm8K3EDwPGbGTiyj4OrLs= -gitlab.oneitfarm.com/bifrost/cfssl v1.5.2/go.mod h1:XAP4/ld8mU9W3HYVdNy3kTsfRRFcSGI83utlAMvc1ME= -gitlab.oneitfarm.com/bifrost/cilog v0.0.5/go.mod h1:54KVGp85GlnQPigc0Oim2NtE599A5YQHi+JEdse0jgw= -gitlab.oneitfarm.com/bifrost/cilog v0.1.5-0.20210114110313-04a4e6e6bb39/go.mod h1:HDXsXRYII5brSk0RxIbIuvTpNLY92fE7jrl1+kgm670= -gitlab.oneitfarm.com/bifrost/cilog v0.1.11-0.20210328092732-32c048345c0a h1:h1Aqz137zJlaA8cbjZejPPM/4A7uKb6ZM6ZCWMGwfto= -gitlab.oneitfarm.com/bifrost/cilog v0.1.11-0.20210328092732-32c048345c0a/go.mod h1:HDXsXRYII5brSk0RxIbIuvTpNLY92fE7jrl1+kgm670= -gitlab.oneitfarm.com/bifrost/go-netstat v0.0.0-20210714072428-58eb4d3c7c06 h1:jBQ7h8ystm3tRhXX3cblOJxrfhYY//xADbdEa7pRqBc= -gitlab.oneitfarm.com/bifrost/go-netstat v0.0.0-20210714072428-58eb4d3c7c06/go.mod h1:6VbR77JWMWasXJKycOqTp/uyH1LrQPT+L6MHy9TM6Hw= -gitlab.oneitfarm.com/bifrost/go-toolbox v0.1.1 h1:7xb9Ao51wqiRJDF/5G3Fex0Gpzu7JWM8Nc7BwRR2DvM= -gitlab.oneitfarm.com/bifrost/go-toolbox v0.1.1/go.mod h1:5PTNPw68digIgpEcA8yQdOfNJjvKtXFJUvLYqB9m6PU= -gitlab.oneitfarm.com/bifrost/influxdata v0.0.0-20201231101639-c65e9be7f18a h1:/uVPMg3TM46mCM1wodnjhZm3LKwhWa0Uu+tkENnI90I= -gitlab.oneitfarm.com/bifrost/influxdata v0.0.0-20201231101639-c65e9be7f18a/go.mod h1:kZ/+MxmIJ6LiERFsfEAIKWUR/2I6KC+RhWyd9rQs5PM= -gitlab.oneitfarm.com/bifrost/logrus-redis-hook v0.0.2-0.20201203023123-29afb561fa48 h1:hkNy6HmKEAmtHWYNXQXljlrY1ehotGvDkJdOrbtMbJk= -gitlab.oneitfarm.com/bifrost/logrus-redis-hook v0.0.2-0.20201203023123-29afb561fa48/go.mod h1:OPUFWASjDWBiNKJUtfL3PFOLy3GX84d5XUDdm6EruBo= go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= go.etcd.io/bbolt v1.3.4/go.mod h1:G5EMThwa9y8QZGBClrRx5EY+Yw9kAhnjy3bSjsnlVTQ= @@ -1361,7 +1175,6 @@ go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk= go.opencensus.io v0.22.6/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= -go.opencensus.io v0.23.0 h1:gqCw0LfLxScz8irSi8exQc7fyQ0fKQU/qnC/X8+V/1M= go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= go.opentelemetry.io/contrib v0.20.0 h1:ubFQUn0VCZ0gPwIoJfBJVpeBlyRMxu8Mm/huKWYd9p0= go.opentelemetry.io/contrib v0.20.0/go.mod h1:G/EtFaa6qaN7+LxqfIAT3GiZa7Wv5DTBUzl5H4LY0Kc= @@ -1404,7 +1217,6 @@ go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9i go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9Ejo0C68/HhF8uaILCdgjnY+goOA= go.uber.org/zap v1.9.1/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= -go.uber.org/zap v1.12.0/go.mod h1:zwrFLgMcdUuIBviXEYEH1YKNaOBnKXsx2IPda5bBwHM= go.uber.org/zap v1.13.0/go.mod h1:zwrFLgMcdUuIBviXEYEH1YKNaOBnKXsx2IPda5bBwHM= go.uber.org/zap v1.14.1/go.mod h1:Mb2vm2krFEG5DV0W9qcHBYFtp/Wku1cvYaqPsS/WYfc= go.uber.org/zap v1.16.0/go.mod h1:MA8QOfq0BHJwdXa996Y4dYkAqRKB8/1K1QMMZVaNZjQ= @@ -1415,7 +1227,6 @@ golang.org/x/crypto v0.0.0-20180501155221-613d6eafa307/go.mod h1:6SG95UA2DQfeDnf golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= -golang.org/x/crypto v0.0.0-20190211182817-74369b46fc67/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190219172222-a4c6cb3142f2/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190325154230-a5d413f7728c/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= @@ -1431,9 +1242,7 @@ golang.org/x/crypto v0.0.0-20191002192127-34f69633bfdc/go.mod h1:yigFU9vqHzYiE8U golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191117063200-497ca9f6d64f/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20191205180655-e7c4368fe9dd/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20191206172530-e9b2fee46413/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200124225646-8b5121be2f68/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20200220183623-bac4c82f6975/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200510223506-06a226fb4e37/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200709230013-948cd5f35899/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= @@ -1479,7 +1288,6 @@ golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.2 h1:Gz96sIWK3OalVv/I/qNygP42zyoKp3xptRVCWRFEBvo= golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/net v0.0.0-20170114055629-f2499483f923/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180218175443-cbe0f9307d01/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -1510,7 +1318,6 @@ golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLL golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190923162816-aa69164e4478/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20191002035440-2ec189313ef0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20191004110552-13f9640d40b9/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20191119073136-fc4aabc6c914/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= @@ -1523,7 +1330,6 @@ golang.org/x/net v0.0.0-20200421231249-e086a090c8fd/go.mod h1:qpuaurCH72eLCgpAm/ golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200602114024-627f9648deb9/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= @@ -1531,7 +1337,6 @@ golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81R golang.org/x/net v0.0.0-20200813134508-3edf25e44fcc/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20200904194848-62affa334b73/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= -golang.org/x/net v0.0.0-20201006153459-a7d1128ccaa0/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201010224723-4f7140c49acb/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201029221708-28c70e62bb1d/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= @@ -1580,7 +1385,6 @@ golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20210220032951-036812b2e83c h1:5KslGYwFpkhGh+Q16bwMP3cOontH8FOep7tGV86Y7SQ= golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sys v0.0.0-20170830134202-bb24a47a89ea/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20171017063910-8dbc5d05d6ed/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180224232135-f6cff0780e54/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -1595,7 +1399,6 @@ golang.org/x/sys v0.0.0-20181205085412-a5c9d58dba9a/go.mod h1:STP8DvDyc/dI5b8T5h golang.org/x/sys v0.0.0-20181228144115-9a3f9b0469bb/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190129075346-302c3dd5f1cc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190204203706-41f3e6584952/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190209173611-3b5209105503/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190221075227-b4e8571b14e0/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -1613,12 +1416,10 @@ golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190813064441-fde4db37ae7a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191008105621-543471e840be/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191022100944-742c48ecaeb7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191119060738-e882bf8e40c2/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -1639,11 +1440,9 @@ golang.org/x/sys v0.0.0-20200420163511-1957bb5e6d1f/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200519105757-fe76b779f299/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200602225109-6fdc65e7d980/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200622214017-ed371f2e16b4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200728102440-3e129f6d46b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -1652,7 +1451,6 @@ golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20200923182605-d9f96fdee20d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201009025420-dfb3f7c4e634/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20201015000850-e3ed0017c211/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201029080932-201ba4db2418/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -1676,9 +1474,7 @@ golang.org/x/sys v0.0.0-20210603125802-9665404d3644/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20210616094352-59db8d763f22 h1:RqytpXGR1iVNX7psjB3ff8y7sNFinVFvkx1c8SjBkio= golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= -golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1 h1:v+OssWQX+hTHEmOBgwxdZxK4zHq3yOs8F9J7mk0PY8E= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= -golang.org/x/text v0.0.0-20160726164857-2910a502d2bf/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -1699,7 +1495,6 @@ golang.org/x/time v0.0.0-20210220033141-f8bda1e9f3ba/go.mod h1:tRJNPiyCQ0inRvYxb golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20181011042414-1f849cf54d09/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= @@ -1819,7 +1614,6 @@ google.golang.org/api v0.46.0/go.mod h1:ceL4oozhkAiTID8XMmJBsIxID/9wMXJVVFXPg4yl google.golang.org/api v0.47.0/go.mod h1:Wbvgpq1HddcWVtzsVLyfLp8lDg6AA241LmgIL59tHXo= google.golang.org/api v0.48.0/go.mod h1:71Pr1vy+TAZRPkPs/xlCf5SsU8WjuAWv1Pfjbtukyy4= google.golang.org/api v0.49.0/go.mod h1:BECiH72wsfwUvOVn3+btPD5WHi0LzavZReBndi42L18= -google.golang.org/api v0.50.0 h1:LX7NFCFYOHzr7WHaYiRUpeipZe9o5L8T+2F4Z798VDw= google.golang.org/api v0.50.0/go.mod h1:4bNT5pAuq5ji4SRZm+5QIkjny9JAyVD/3gaSihNefaw= google.golang.org/appengine v1.0.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= @@ -1952,9 +1746,7 @@ google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp0 google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.27.1 h1:SnqbnDw1V7RiZcXPx5MEeqPv2s79L9i7BJUlG/+RurQ= google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -gopkg.in/airbrake/gobrake.v2 v2.0.9/go.mod h1:/h5ZAUhDkGaJfjzjKLSjv6zCL6O0LLBxU4K+aSYdM/U= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= -gopkg.in/asn1-ber.v1 v1.0.0-20181015200546-f715ec2f112d/go.mod h1:cuepJuh7vyXfUyUwEgHQXw849cJrilpS5NeIjOWESAw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= @@ -1966,20 +1758,16 @@ gopkg.in/cheggaaa/pb.v1 v1.0.28/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qS gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= gopkg.in/gcfg.v1 v1.2.3/go.mod h1:yesOnuUOFQAhST5vPY4nbZsb/huCgGGXlipJsBn0b3o= -gopkg.in/gemnasium/logrus-airbrake-hook.v2 v2.1.2/go.mod h1:Xk6kEKp8OKb+X14hQBKWaSkCsqBpgog8nAV2xsGOxlo= gopkg.in/go-playground/assert.v1 v1.2.1/go.mod h1:9RXL0bg/zibRAgZUYszZSwO/z8Y/a8bDuhia5mkpMnE= gopkg.in/go-playground/validator.v8 v8.18.2/go.mod h1:RX2a/7Ha8BgOhfk7j780h4/u/RRjR0eouCJSH80/M2Y= gopkg.in/go-playground/validator.v9 v9.29.1/go.mod h1:+c9/zcJMFNgbLvly1L1V+PpxWdVbfP1avr/N00E2vyQ= gopkg.in/h2non/gock.v1 v1.0.15/go.mod h1:sX4zAkdYX1TRGJ2JY156cFspQn4yRWn6p9EMdODlynE= gopkg.in/inconshreveable/log15.v2 v2.0.0-20180818164646-67afb5ed74ec/go.mod h1:aPpfJ7XW+gOuirDoZ8gHhLh3kZ1B08FtV2bbmy7Jv3s= -gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= gopkg.in/ini.v1 v1.51.0 h1:AQvPpx3LzTDM0AjnIRlVFwFFGC+npRopjZxLJj6gdno= gopkg.in/ini.v1 v1.51.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= -gopkg.in/ldap.v2 v2.5.1/go.mod h1:oI0cpe/D7HRtBQl8aTg+ZmzFUAvu4lsv3eLXMLGFxWk= gopkg.in/natefinch/lumberjack.v2 v2.0.0 h1:1Lc07Kr7qY4U2YPouBjpCLxpiyxIVoxqXgkXLknAOE8= gopkg.in/natefinch/lumberjack.v2 v2.0.0/go.mod h1:l0ndWWf7gzL7RNwBG7wST/UCcT4T24xpD6X8LsfU/+k= -gopkg.in/resty.v1 v1.12.0 h1:CuXP0Pjfw9rOuY6EP+UvtNvt5DSqHpIxILZKT/quCZI= gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo= gopkg.in/square/go-jose.v2 v2.4.1/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= gopkg.in/square/go-jose.v2 v2.5.1 h1:7odma5RETjNHWJnR32wx8t+Io4djHE1PqxCFx3iiZ2w= @@ -1987,7 +1775,6 @@ gopkg.in/square/go-jose.v2 v2.5.1/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76 gopkg.in/src-d/go-billy.v4 v4.3.2/go.mod h1:nDjArDMp+XMs1aFAESLRjfGSgfvoYN0hDfzEk0GjC98= gopkg.in/src-d/go-git-fixtures.v3 v3.5.0/go.mod h1:dLBcvytrw/TYZsNTWCnkNF2DSIlzWYqTe3rJR56Ac7g= gopkg.in/src-d/go-git.v4 v4.13.1/go.mod h1:nx5NYcxdKxq5fpltdHnPa2Exj4Sx0EclMWZQbYDu2z8= -gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= gopkg.in/warnings.v0 v0.1.2/go.mod h1:jksf8JmL6Qr/oQM2OXTHunEvvTAsrWBLb6OOjuVWRNI= gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74= @@ -2019,33 +1806,6 @@ honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWh honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= -k8s.io/api v0.18.2/go.mod h1:SJCWI7OLzhZSvbY7U8zwNl9UA4o1fizoug34OV/2r78= -k8s.io/api v0.19.3 h1:GN6ntFnv44Vptj/b+OnMW7FmzkpDoIDLZRvKX3XH9aU= -k8s.io/api v0.19.3/go.mod h1:VF+5FT1B74Pw3KxMdKyinLo+zynBaMBiAfGMuldcNDs= -k8s.io/apimachinery v0.18.2/go.mod h1:9SnR/e11v5IbyPCGbvJViimtJ0SwHG4nfZFjU77ftcA= -k8s.io/apimachinery v0.19.3 h1:bpIQXlKjB4cB/oNpnNnV+BybGPR7iP5oYpsOTEJ4hgc= -k8s.io/apimachinery v0.19.3/go.mod h1:DnPGDnARWFvYa3pMHgSxtbZb7gpzzAZ1pTfaUNDVlmA= -k8s.io/client-go v0.18.2/go.mod h1:Xcm5wVGXX9HAA2JJ2sSBUn3tCJ+4SVlCbl2MNNv+CIU= -k8s.io/client-go v0.19.3 h1:ctqR1nQ52NUs6LpI0w+a5U+xjYwflFwA13OJKcicMxg= -k8s.io/client-go v0.19.3/go.mod h1:+eEMktZM+MG0KO+PTkci8xnbCZHvj9TqR6Q1XDUIJOM= -k8s.io/gengo v0.0.0-20190128074634-0689ccc1d7d6/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= -k8s.io/gengo v0.0.0-20200413195148-3a45101e95ac/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= -k8s.io/klog v0.0.0-20181102134211-b9b56d5dfc92/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk= -k8s.io/klog v0.3.0/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk= -k8s.io/klog v1.0.0 h1:Pt+yjF5aB1xDSVbau4VsWe+dQNzA0qv1LlXdC2dF6Q8= -k8s.io/klog v1.0.0/go.mod h1:4Bi6QPql/J/LkTDqv7R/cd3hPo4k2DG6Ptcz060Ez5I= -k8s.io/klog/v2 v2.0.0/go.mod h1:PBfzABfn139FHAV07az/IF9Wp1bkk3vpT2XSJ76fSDE= -k8s.io/klog/v2 v2.2.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y= -k8s.io/klog/v2 v2.3.0 h1:WmkrnW7fdrm0/DMClc+HIxtftvxVIPAhlVwMQo5yLco= -k8s.io/klog/v2 v2.3.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y= -k8s.io/kube-openapi v0.0.0-20200121204235-bf4fb3bd569c/go.mod h1:GRQhZsXIAJ1xR0C9bd8UpWHZ5plfAS9fzPjJuQ6JL3E= -k8s.io/kube-openapi v0.0.0-20200805222855-6aeccd4b50c6/go.mod h1:UuqjUnNftUyPE5H64/qeyjQoUZhGpeFDVdxjTeEVN2o= -k8s.io/kube-openapi v0.0.0-20200831175022-64514a1d5d59 h1:hlbT1c/UQK1Zf9lsxemrM7C/WnIPwGHgFUgpkVraHcs= -k8s.io/kube-openapi v0.0.0-20200831175022-64514a1d5d59/go.mod h1:UuqjUnNftUyPE5H64/qeyjQoUZhGpeFDVdxjTeEVN2o= -k8s.io/utils v0.0.0-20200324210504-a9aa75ae1b89/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew= -k8s.io/utils v0.0.0-20200729134348-d5654de09c73/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= -k8s.io/utils v0.0.0-20201110183641-67b214c5f920 h1:CbnUZsM497iRC5QMVkHwyl8s2tB3g7yaSHkYPkpgelw= -k8s.io/utils v0.0.0-20201110183641-67b214c5f920/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= modernc.org/b v1.0.0/go.mod h1:uZWcZfRj1BpYzfN9JTerzlNUnnPsV9O2ZA8JsRcubNg= modernc.org/db v1.0.0/go.mod h1:kYD/cO29L/29RM0hXYl4i3+Q5VojL31kTUVpVJDw0s8= modernc.org/file v1.0.0/go.mod h1:uqEokAEn1u6e+J45e54dsEA/pw4o7zLrA2GwyntZzjw= @@ -2063,10 +1823,6 @@ rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8 rsc.io/goversion v1.0.0/go.mod h1:Eih9y/uIBS3ulggl7KNJ09xGSLcuNaLgmvvqa07sgfo= rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= -sigs.k8s.io/structured-merge-diff/v3 v3.0.0-20200116222232-67a7b8c61874/go.mod h1:PlARxl6Hbt/+BC80dRLi1qAmnMqwqDg62YvvVkZjemw= -sigs.k8s.io/structured-merge-diff/v3 v3.0.0/go.mod h1:PlARxl6Hbt/+BC80dRLi1qAmnMqwqDg62YvvVkZjemw= -sigs.k8s.io/structured-merge-diff/v4 v4.0.1 h1:YXTMot5Qz/X1iBRJhAt+vI+HVttY0WkSqqhKxQ0xVbA= -sigs.k8s.io/structured-merge-diff/v4 v4.0.1/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw= sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o= sigs.k8s.io/yaml v1.2.0 h1:kr/MCeFWJWTwyaHoR9c8EjH9OumOmoF9YGiZd7lFm/Q= sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc= diff --git a/initer/client.go b/initer/client.go index 5887375..24462e0 100644 --- a/initer/client.go +++ b/initer/client.go @@ -3,43 +3,20 @@ package initer import ( "crypto/tls" "fmt" - "gitlab.oneitfarm.com/bifrost/go-toolbox/rediscluster" "net/http" "time" vaultAPI "github.com/hashicorp/vault/api" "github.com/pkg/errors" - influx_client "gitlab.oneitfarm.com/bifrost/influxdata/influxdb1-client/v2" + influx_client "github.com/ztalab/ZACA/pkg/influxdb/influxdb-client/v2" mysqlDriver "gorm.io/driver/mysql" "gorm.io/gorm" - "k8s.io/client-go/kubernetes" - "k8s.io/client-go/rest" - "gitlab.oneitfarm.com/bifrost/capitalizone/core" - "gitlab.oneitfarm.com/bifrost/capitalizone/database/mysql" - "gitlab.oneitfarm.com/bifrost/capitalizone/pkg/influxdb" - "gitlab.oneitfarm.com/bifrost/capitalizone/util" + "github.com/ztalab/ZACA/core" + "github.com/ztalab/ZACA/database/mysql" + "github.com/ztalab/ZACA/pkg/influxdb" ) -func kubeDialer(config *core.Config, logger *core.Logger) (kubeCli *kubernetes.Clientset, err error) { - err = util.RetryWithTimeout(func() error { - kubeConfig, err := rest.InClusterConfig() - if err != nil { - return fmt.Errorf("failed to create in-cluster kubernetes client configuration: %v", err) - } - logger.Infof("kubeconfig: %v", kubeConfig) - - kubeCli, err = kubernetes.NewForConfig(kubeConfig) - if err != nil { - return fmt.Errorf("failed to create kubernetes client: %v", err) - } - return nil - }, time.Second, 1*time.Minute, logger.SugaredLogger) - - logger.Info("kubernetes client inited.") - return -} - func mysqlDialer(config *core.Config, logger *core.Logger) (*gorm.DB, error) { db, err := gorm.Open(mysqlDriver.Open(config.Mysql.Dsn), &gorm.Config{ DisableForeignKeyConstraintWhenMigrating: true, @@ -59,7 +36,7 @@ func mysqlDialer(config *core.Config, logger *core.Logger) (*gorm.DB, error) { func influxdbDialer(config *core.Config, logger *core.Logger) { if !config.Influxdb.Enabled { - logger.Warn("Influxdb 功能禁用") + logger.Warn("Influxdb Function disabled") return } tick := time.NewTicker(10 * time.Second) @@ -126,32 +103,3 @@ func vaultDialer(config *core.Config, logger *core.Logger) (*vaultAPI.Client, er return cli, nil } - -func redisDialer(config *core.Config, logger *core.Logger) (cluster *rediscluster.Cluster, err error) { - fmt.Printf("redis nodes: %s", config.Redis.Nodes) - if len(config.Redis.Nodes) == 0 { - logger.Warn("Redis Nodes未配置") - return nil, nil - } - cluster, err = rediscluster.NewCluster( - &rediscluster.Options{ - StartNodes: config.Redis.Nodes, - ConnTimeout: 500 * time.Millisecond, - ReadTimeout: 500 * time.Millisecond, - WriteTimeout: 500 * time.Millisecond, - KeepAlive: 16, - AliveTime: 60 * time.Second, - }) - - if err != nil { - return nil, err - } - - resp, err := cluster.Do("ping", "") - if err != nil { - return nil, err - } - logger.Infof("redis ping: %v", resp) - - return -} diff --git a/initer/election.go b/initer/election.go deleted file mode 100644 index 7f0c5a0..0000000 --- a/initer/election.go +++ /dev/null @@ -1,48 +0,0 @@ -package initer - -import ( - "k8s.io/client-go/tools/leaderelection" - - "gitlab.oneitfarm.com/bifrost/capitalizone/core" -) - -type elector struct { - elector *leaderelection.LeaderElector -} - -func (ele *elector) IsLeader() bool { - if core.Is.Config.Election.AlwaysLeader { - return true - } - if core.Is.Config.Election.Enabled && ele.elector != nil { - return ele.elector.IsLeader() - } - return false -} - -//func elect(conf *core.Config, logger *core.Logger, kubeCli *kubernetes.Clientset) (*elector, error) { -// ele := elector{} -// if conf.Election.Enabled { -// e := election.NewElector(election.Config{ -// Client: kubeCli, -// ElectionID: conf.Election.ID, -// Callbacks: leaderelection.LeaderCallbacks{ -// OnStartedLeading: func(ctx context.Context) { -// logger.Info("started leading..") -// }, -// OnNewLeader: func(identity string) { -// logger.Infof("new leader: %v", identity) -// }, -// OnStoppedLeading: func() { -// logger.Info("stopped leading.") -// }, -// }, -// Logger: logger.Named("elector"), -// }) -// ele.elector = e.Elector -// go func() { -// e.Run(context.Background()) -// }() -// } -// return &ele, nil -//} diff --git a/initer/flags.go b/initer/flags.go index dd221cd..2f1ed55 100644 --- a/initer/flags.go +++ b/initer/flags.go @@ -9,19 +9,18 @@ import ( "github.com/joho/godotenv" "github.com/spf13/viper" - cfssl_config "gitlab.oneitfarm.com/bifrost/cfssl/config" + cfssl_config "github.com/ztalab/cfssl/config" - "gitlab.oneitfarm.com/bifrost/capitalizone/core" - "gitlab.oneitfarm.com/bifrost/capitalizone/core/config" - "gitlab.oneitfarm.com/bifrost/capitalizone/pkg/influxdb" + "github.com/ztalab/ZACA/core" + "github.com/ztalab/ZACA/core/config" + "github.com/ztalab/ZACA/pkg/influxdb" ) const ( - G_ = "IS" - ConfName = "conf" - CmdlineEnvDefault = "default" - CmdlineEnvTest = "test" - CmdlineEnvProd = "prod" + G_ = "IS" + ConfName = "conf" + CmdlineEnvTest = "test" + CmdlineEnvProd = "prod" ) var ( @@ -33,10 +32,9 @@ var ( func parseConfigs(c *cli.Context) (core.Config, error) { // Cmdline flags flag.Parse() - // ENV 读取 godotenv.Load(*flagEnvfile) // Default config - viper.SetConfigName(fmt.Sprintf("%v.%v", ConfName, CmdlineEnvDefault)) + viper.SetConfigName(fmt.Sprintf("%v.%v", ConfName, CmdlineEnvTest)) viper.AddConfigPath(".") if err := viper.ReadInConfig(); err != nil { return core.Config{}, err @@ -79,13 +77,6 @@ func parseConfigs(c *cli.Context) (core.Config, error) { Key: viper.GetString("log.log-proxy.key"), }, }, - Registry: config.Registry{ - SelfName: viper.GetString("registry.self-name") + "." + hs, - Command: c.Args().First(), - }, - Redis: config.Redis{ - Nodes: viper.GetStringSlice("redis.nodes"), - }, Keymanager: config.Keymanager{ UpperCa: viper.GetStringSlice("keymanager.upper-ca"), SelfSign: viper.GetBool("keymanager.self-sign"), @@ -104,16 +95,6 @@ func parseConfigs(c *cli.Context) (core.Config, error) { Singleca: config.Singleca{ ConfigPath: viper.GetString("singleca.config-path"), }, - Election: config.Election{ - Enabled: viper.GetBool("election.enabled"), - ID: viper.GetString("election.id"), - Baseon: viper.GetString("election.baseon"), - AlwaysLeader: viper.GetBool("election.always-leader"), - }, - GatewayNervs: config.GatewayNervs{ - Enabled: viper.GetBool("gateway-nervs.enabled"), - Endpoint: viper.GetString("gateway-nervs.endpoint"), - }, OCSPHost: viper.GetString("ocsp-host"), HTTP: config.HTTP{ OcspListen: viper.GetString("http.ocsp-listen"), @@ -124,11 +105,10 @@ func parseConfigs(c *cli.Context) (core.Config, error) { Dsn: viper.GetString("mysql.dsn"), }, Vault: config.Vault{ - Enabled: viper.GetBool("vault.enabled"), - Addr: viper.GetString("vault.addr"), - Token: viper.GetString("vault.token"), - Prefix: viper.GetString("vault.prefix"), - Discover: viper.GetString("vault.discover"), + Enabled: viper.GetBool("vault.enabled"), + Addr: viper.GetString("vault.addr"), + Token: viper.GetString("vault.token"), + Prefix: viper.GetString("vault.prefix"), }, Influxdb: influxdb.CustomConfig{ Enabled: viper.GetBool("influxdb.enabled"), @@ -147,17 +127,10 @@ func parseConfigs(c *cli.Context) (core.Config, error) { FlushSize: viper.GetInt("influxdb.flush-size"), FlushTime: viper.GetInt("influxdb.flush-time"), }, - Mesh: config.Mesh{ - MSPPortalAPI: viper.GetString("mesh.msp-portal-api"), - }, SwaggerEnabled: viper.GetBool("swagger-enabled"), Debug: viper.GetBool("debug"), Version: viper.GetString("version"), Hostname: hostname, - Metrics: config.Metrics{ - CpuLimit: viper.GetFloat64("metrics.cpu-limit"), - MemLimit: viper.GetFloat64("metrics.mem-limit"), - }, Ocsp: config.Ocsp{ CacheTime: viper.GetInt("ocsp.cache-time"), }, @@ -170,7 +143,7 @@ func parseConfigs(c *cli.Context) (core.Config, error) { cfg, err := cfssl_config.LoadFile(conf.Singleca.ConfigPath) if err != nil { - return conf, fmt.Errorf("cfssl 配置文件 %s 错误: %s", conf.Singleca.ConfigPath, err) + return conf, fmt.Errorf("cfssl configuration file %s Error: %s", conf.Singleca.ConfigPath, err) } cfg.Signing.Default.OCSP = conf.OCSPHost diff --git a/initer/initer.go b/initer/initer.go index 1f121c5..5784ef2 100644 --- a/initer/initer.go +++ b/initer/initer.go @@ -2,25 +2,20 @@ package initer import ( "github.com/urfave/cli" - "gitlab.oneitfarm.com/bifrost/capitalizone/ca/certmanager" - "gitlab.oneitfarm.com/bifrost/capitalizone/ca/datastore" - "gitlab.oneitfarm.com/bifrost/capitalizone/ca/keymanager" - "gitlab.oneitfarm.com/bifrost/capitalizone/pkg/event" - "gitlab.oneitfarm.com/bifrost/cfssl/hook" - logger "gitlab.oneitfarm.com/bifrost/cilog/v2" + "github.com/ztalab/ZACA/ca/datastore" + "github.com/ztalab/ZACA/ca/keymanager" + "github.com/ztalab/ZACA/core" + "github.com/ztalab/ZACA/pkg/logger" + "github.com/ztalab/ZACA/pkg/vaultsecret" + "github.com/ztalab/cfssl/hook" "log" "os" - "runtime" - - "gitlab.oneitfarm.com/bifrost/capitalizone/core" - "gitlab.oneitfarm.com/bifrost/capitalizone/pkg/vaultinit" - "gitlab.oneitfarm.com/bifrost/capitalizone/pkg/vaultsecret" // ... - _ "gitlab.oneitfarm.com/bifrost/capitalizone/util" + _ "github.com/ztalab/ZACA/util" ) -// Init 初始化 +// Init Initialization func Init(c *cli.Context) error { conf, err := parseConfigs(c) if err != nil { @@ -37,21 +32,14 @@ func Init(c *cli.Context) error { if err != nil { logger.Fatal(err) } - // Redis Connect - redisClient, err := redisDialer(&conf, l) - if err != nil { - logger.Fatal(err) - } - i := &core.I{ - Config: &conf, - Logger: l, - Db: db, - RedisClusterClient: redisClient, + Config: &conf, + Logger: l, + Db: db, } if hook.EnableVaultStorage { - logger.Info("启用 Vault 加密储存引擎") + logger.Info("Enable vault encrypted storage engine") vaultClient, err := vaultDialer(&conf, l) if err != nil { logger.Fatal(err) @@ -62,23 +50,9 @@ func Init(c *cli.Context) error { } core.Is = i - // 初始化influxdb + // Initialize incluxdb go influxdbDialer(&conf, l) - if core.Is.Config.Vault.Enabled { - vaultinit.Init() - } else { - go vaultinit.Init() - } - - // 资源监控 - if runtime.GOOS == "linux" && redisClient != nil { - // 初始化推送事件客户端 - event.InitEventClient(redisClient) - InitMonitor() - } - - // TODO 迁移 if os.Getenv("IS_MIGRATION") == "true" { datastore.RunMigration() os.Exit(1) @@ -87,9 +61,6 @@ func Init(c *cli.Context) error { if err := keymanager.InitKeeper(); err != nil { return err } - // Certs - _ = certmanager.NewCertCleaner() - // go cc.AutoGC() logger.Info("success started.") return nil diff --git a/initer/logger.go b/initer/logger.go index 24ee73e..6eb80ef 100644 --- a/initer/logger.go +++ b/initer/logger.go @@ -3,17 +3,16 @@ package initer import ( "log" - "gitlab.oneitfarm.com/bifrost/cilog" - "gitlab.oneitfarm.com/bifrost/cilog/redis_hook" - logger "gitlab.oneitfarm.com/bifrost/cilog/v2" + "github.com/ztalab/ZACA/pkg/logger" + "github.com/ztalab/ZACA/pkg/logger/redis_hook" "go.uber.org/zap/zapcore" - "gitlab.oneitfarm.com/bifrost/capitalizone/core" + "github.com/ztalab/ZACA/core" ) func initLogger(config *core.Config) { conf := &logger.Conf{ - AppInfo: &cilog.ConfigAppData{ + AppInfo: &logger.ConfigAppData{ AppVersion: config.Version, Language: "zh-cn", }, diff --git a/initer/metrics.go b/initer/metrics.go deleted file mode 100644 index cee68e3..0000000 --- a/initer/metrics.go +++ /dev/null @@ -1,310 +0,0 @@ -package initer - -import ( - "fmt" - "github.com/shopspring/decimal" - "gitlab.oneitfarm.com/bifrost/capitalizone/core" - "gitlab.oneitfarm.com/bifrost/capitalizone/logic/schema" - "gitlab.oneitfarm.com/bifrost/capitalizone/pkg/event" - "gitlab.oneitfarm.com/bifrost/capitalizone/pkg/influxdb" - "gitlab.oneitfarm.com/bifrost/capitalizone/pkg/resource" - "gitlab.oneitfarm.com/bifrost/capitalizone/pkg/resource/file_source" - utils "gitlab.oneitfarm.com/bifrost/capitalizone/util" - logger "gitlab.oneitfarm.com/bifrost/cilog/v2" - "sync" - "time" -) - -const ( - alertTypeService = "service" - resourceTypeMemory = "memory" - resourceTypeCPU = "cpu" - resourceTypeBoth = "both" - - RESOURCE_CPU_TYPE = 1 - RESOURCE_MEM_TYPE = 2 - RESOURCE_BOTH_TYPE = 3 -) - -// 属性资源结构体池 -var resourceDataPool sync.Pool - -type MonitorAttributes struct { - PodIp string `json:"pod_ip"` // 当前pod ip - UniqueId string `json:"unique_id"` // 服务识别号 - Hostname string `json:"hostname"` - ServiceName string `json:"service_name"` // 服务可读名称 - CpuLimit float64 `json:"cpu_limit"` // Cpu阈值 - MemLimit float64 `json:"mem_limit"` // 内存阈值 - - DiskRead int64 `json:"disk_read"` // 磁盘读取速率,bytes/sec - DiskWrite int64 `json:"disk_write"` // 磁盘写入速率,bytes/sec - - Cpu float64 `json:"cpu"` // 使用的cpu,单位:% - TotalMemory int64 `json:"total_memory"` // 总内存,单位:mb - Memory float64 `json:"memory"` // 使用的内存,单位:% - MemoryMB int64 `json:"memory_mb"` // 使用的内存,单位:mb - MemoryMBAV int64 `json:"memory_mb_av"` // 剩余的内存,单位:mb - CpuCoreCount float64 `json:"cpu_core_count"` // cpu核心 -} - -// 资源监控 -func InitMonitor() { - utils.GoWithRecover(func() { - InitMonitorHandle() - }, func(r interface{}) { - InitMonitor() - }) -} - -// InitMonitorHandle 监控 -func InitMonitorHandle() { - // 初始化池 - attr := new(MonitorAttributes) - cpu := make(chan float64) - disk := make(chan resource.DiskStat) - attr.PodIp = schema.GetLocalIpLabel() - attr.UniqueId = "capitalizone_" + core.Is.Config.Registry.Command - attr.Hostname = core.Is.Config.Hostname - attr.ServiceName = "CA中心" - - attr.CpuLimit = core.Is.Config.Metrics.CpuLimit - attr.MemLimit = core.Is.Config.Metrics.MemLimit - - cpuTimes := time.Millisecond * 250 - - resourceHd := file_source.NewFileSource() - ticker := time.NewTicker(10 * time.Second) - defer ticker.Stop() - // 初始化池 - resourceDataPool.New = func() interface{} { - return &resourceData{} - } - // cpu阈值计算 - for range ticker.C { - - if resourceHd.InitData() { - attr.CpuCoreCount = resourceHd.GetCpuCount() - } - - // 自身容器资源情况 - scRsd := resourceDataPool.Get().(*resourceData) - scResource := scRsd.getResource(resourceHd, cpu, cpuTimes, disk) - - // 容器资源使用情况 - attr.TotalMemory = scResource.ContainerTotalMemoryMB - attr.MemoryMB = scResource.ContainerUsedMemoryMB - attr.Memory = scResource.UsedMemoryRatio - attr.MemoryMBAV = scResource.ContainerTotalMemoryMB - scResource.ContainerUsedMemoryMB // 业务剩余内存 - attr.Cpu = scResource.ContainerCpuUsedRatio - attr.DiskRead = int64(scResource.ContainerDiskRead) - attr.DiskWrite = int64(scResource.ContainerDiskWrite) - - // report to influxdb - if core.Is.Config.Influxdb.Enabled { - AddMetricsPoint(attr) - } - // 告警 - AlertError(attr) - - // 回收 - scRsd.reset() - resourceDataPool.Put(scRsd) - } -} - -// 告警 -func AlertError(attr *MonitorAttributes) { - var serviceCpuLimit float64 - if attr.CpuCoreCount > 0 { - serviceCpuLimit = attr.CpuCoreCount * attr.CpuLimit - } - // 服务资源预警 - if serviceCpuLimit > 0 { - if attr.Memory > attr.MemLimit || attr.Cpu > serviceCpuLimit { - resourceType := 0 - if attr.Memory > attr.MemLimit && attr.Cpu > serviceCpuLimit { - // cpu + 内存 预警 - resourceType = RESOURCE_BOTH_TYPE - } else if attr.Memory > attr.MemLimit { - // 内存预警 - resourceType = RESOURCE_MEM_TYPE - } else if attr.Cpu > serviceCpuLimit { - // cpu预警 - resourceType = RESOURCE_CPU_TYPE - } - data := MSPReportResource{ - UniqueId: attr.UniqueId, - AlertType: 4, - ResourceType: resourceType, - Cpu: attr.Cpu, - CpuCoreCount: attr.CpuCoreCount, - CpuThreshold: serviceCpuLimit, - Mem: attr.Memory, - MemoryMB: attr.MemoryMB, - MemThreshold: attr.MemLimit, - HostName: attr.Hostname, - ServiceName: attr.ServiceName, - // cpu内存 request - //ServiceMemRequests: attr.ServiceMemRequests, - //ServiceMemLimits: attr.ServiceMemLimits, - //ServiceCPURequests: attr.ServiceCPURequests, - //ServiceCPULimits: attr.ServiceCPULimits, - EventTime: time.Now().UnixNano() / 1e6, - } - event.Client().Report(event.EVENT_TYPE_RESOURCE, data) - recordResourceWarning(data) - } - } -} - -var ( - _metricsFields = make(map[string]interface{}) - _tagFields = make(map[string]string) -) - -// 时序日志 -func AddMetricsPoint(attr *MonitorAttributes) { - if !core.Is.Config.Influxdb.Enabled { - return - } - - _metricsFields["cpu"] = attr.Cpu - _metricsFields["cpu_core_count"] = attr.CpuCoreCount - _metricsFields["disk_read"] = attr.DiskRead - _metricsFields["disk_write"] = attr.DiskWrite - _metricsFields["total_memory"] = attr.TotalMemory - _metricsFields["memory"] = attr.Memory - _metricsFields["memory_mb"] = attr.MemoryMB - _metricsFields["memory_mb_av"] = attr.MemoryMBAV - - _tagFields["pod_ip"] = attr.PodIp - _tagFields["unique_id"] = attr.UniqueId - _tagFields["hostname"] = attr.Hostname - _tagFields["service_name"] = attr.ServiceName - - core.Is.Metrics.AddPoint(&influxdb.MetricsData{ - Measurement: schema.MetricsCaCpuMem, - Fields: _metricsFields, - Tags: _tagFields, - }) -} - -type resourceData struct { - ContainerTotalMemoryMB int64 // 容器总内存,单位:mb - ContainerUsedMemoryMB int64 // 容器使用的内存,单位:mb - ProcessUsedMemoryMB int64 // 当前进程所使用的内存 - UsedMemoryRatio float64 // 已经使用内存的比例 - ContainerCpuUsedRatio float64 // 容器内cpu使用率,单位:% - ContainerDiskRead uint64 // 容器内磁盘读取,单位bytes/sec - ContainerDiskWrite uint64 // 容器内磁盘写入,单位bytes/sec - ContainerNetFlowRX uint64 // pod内网卡接收流量,单位bytes/sec - ContainerNetFlowTX uint64 // pod内网卡输出流量,单位bytes/sec - Netstat map[string]int // TCP 连接详情 -} - -// isRemote 则不获取进程内存 -func (res *resourceData) getResource(rs resource.Resource, cpu chan float64, cpuTimes time.Duration, disk chan resource.DiskStat) *resourceData { - if !rs.InitSuccess() { - return res - } - var err error - res.ContainerTotalMemoryMB, res.ContainerUsedMemoryMB = utils.GetContainerMemory(rs) - if res.ContainerTotalMemoryMB == 0 || res.ContainerUsedMemoryMB == 0 { - return res - } - - res.ProcessUsedMemoryMB, err = rs.GetRss() - if err != nil { - logger.Warnf("获取服务进程内存失败", err) - } - - used := decimal.NewFromInt(res.ContainerUsedMemoryMB) - total := decimal.NewFromInt(res.ContainerTotalMemoryMB) - res.UsedMemoryRatio, _ = used.DivRound(total, 2).Mul(decimal.NewFromInt(100)).Float64() - // disk - go utils.GetContainerDisk(rs, disk, time.Second) - diskChan := <-disk - res.ContainerDiskRead = diskChan.Read - res.ContainerDiskWrite = diskChan.Write - // cpu - go utils.GetContainerCpu(rs, cpu, cpuTimes) - res.ContainerCpuUsedRatio, _ = decimal.NewFromFloat(<-cpu).Round(2).Float64() - return res -} - -func (r *resourceData) reset() { - r.ContainerCpuUsedRatio = 0 - r.ContainerTotalMemoryMB = 0 // 容器总内存,单位:mb - r.ContainerUsedMemoryMB = 0 // 容器使用的内存,单位:mb - r.ProcessUsedMemoryMB = 0 // 当前进程所使用的内存 - r.UsedMemoryRatio = 0 - r.ContainerCpuUsedRatio = 0 // 容器内cpu使用率,单位:% - r.ContainerDiskRead = 0 // 容器内磁盘读取,单位bytes/sec - r.ContainerDiskWrite = 0 // 容器内磁盘写入,单位bytes/sec -} - -// 上报到redis msp -// -type MSPReportResource struct { - UniqueId string `json:"unique_id"` - AlertType int `json:"alert_type"` // 1注册中心兜底2注册中心自定义3sidecar资源兜底4服务资源兜底 - ResourceType int `json:"resource_type"` // 资源类型1cpu2内存 3所有 - Cpu float64 `json:"cpu"` // cpu百分比 - CpuCoreCount float64 `json:"cpu_core_count"` // cpu核数 - CpuThreshold float64 `json:"cpu_threshold"` // cpu阈值 - Mem float64 `json:"mem"` // 百分比 - MemoryMB int64 `json:"memory_mb"` - MemThreshold float64 `json:"mem_threshold"` // 内存阈值 - HostName string `json:"hostname"` - ServiceName string `json:"service_name"` // 服务可读名称 - ServiceMemRequests int `json:"service_mem_requests"` // 服务内存request - ServiceMemLimits int `json:"service_mem_limits"` // 服务内存limit - ServiceCPURequests int `json:"service_cpu_requests"` // 服务cpu request - ServiceCPULimits int `json:"service_cpu_limits"` // 服务cpu limit - EventTime int64 `json:"event_time"` -} - -// 写入资源预警日志 -func recordResourceWarning(alert MSPReportResource) { - cpu := utils.FloatToString(alert.Cpu) + "%" - cpuThreshold := utils.FloatToString(alert.CpuThreshold) + "%" - mem := utils.FloatToString(alert.Mem) + "%" - memThreshold := utils.FloatToString(alert.MemThreshold) + "%" - - // 发送告警 - var eventMsg, rule, alertType, resourceType string - eventMsg = "微服务资源兜底预警" - alertType = alertTypeService - - if alert.Cpu >= alert.CpuThreshold { - alert.ResourceType = RESOURCE_CPU_TYPE - resourceType = resourceTypeCPU - rule = fmt.Sprintf("当前CPU: %s(核), 达到阈值: %s(核)", cpu, cpuThreshold) - } - if alert.Mem >= alert.MemThreshold { - alert.ResourceType = RESOURCE_MEM_TYPE - resourceType = resourceTypeMemory - rule = fmt.Sprintf("当前内存: %s, 达到阈值: %s", mem, memThreshold) - } - if alert.Cpu >= alert.CpuThreshold && alert.Mem >= alert.MemThreshold { - alert.ResourceType = RESOURCE_BOTH_TYPE - resourceType = resourceTypeBoth - rule = fmt.Sprintf("当前CPU: %s(核), 达到阈值: %s(核); 当前内存: %s, 达到阈值: %s", - cpu, - cpuThreshold, - mem, - memThreshold, - ) - } - // 日志写入 - textLog := fmt.Sprintf(`发生时间:%s;事件:%s;服务名称:%s;服务识别号:%s;HOSTNAME:%s;触发规则:%s`, - time.Unix(0, alert.EventTime*int64(time.Millisecond)).Format("2006-01-02 15:04:05.000"), - eventMsg, - alert.ServiceName, - alert.UniqueId, - alert.HostName, - rule, - ) - logger.With("customLog1", "resource", "customLog2", alertType, "customLog3", resourceType).Error(textLog) -} diff --git a/initer/registry.go b/initer/registry.go deleted file mode 100644 index c215cfc..0000000 --- a/initer/registry.go +++ /dev/null @@ -1,3 +0,0 @@ -package initer - -// TODO 注册自身 diff --git a/logic/ca/base.go b/logic/ca/base.go index 58a6c8c..8993751 100644 --- a/logic/ca/base.go +++ b/logic/ca/base.go @@ -1,11 +1,11 @@ package ca import ( - v2log "gitlab.oneitfarm.com/bifrost/cilog/v2" + "github.com/ztalab/ZACA/pkg/logger" "go.uber.org/zap" "gorm.io/gorm" - "gitlab.oneitfarm.com/bifrost/capitalizone/core" + "github.com/ztalab/ZACA/core" ) type Logic struct { @@ -16,7 +16,7 @@ type Logic struct { func NewLogic() *Logic { return &Logic{ db: core.Is.Db, - logger: v2log.Named("logic").SugaredLogger, + logger: logger.Named("logic").SugaredLogger, } } diff --git a/logic/ca/config.go b/logic/ca/config.go index 9d4f728..2f1973d 100644 --- a/logic/ca/config.go +++ b/logic/ca/config.go @@ -1,4 +1,4 @@ -// Package ca 配置类展示 +// Package ca Configuration class display package ca import ( @@ -6,9 +6,9 @@ import ( "time" "github.com/pkg/errors" - "gitlab.oneitfarm.com/bifrost/cfssl/config" + "github.com/ztalab/cfssl/config" - "gitlab.oneitfarm.com/bifrost/capitalizone/core" + "github.com/ztalab/ZACA/core" ) type RoleProfile struct { @@ -20,13 +20,13 @@ type RoleProfile struct { IsCa bool `json:"is_ca"` } -// RoleProfiles 展示环境隔离状态 -// 不需要参数 +// RoleProfiles Show environmental isolation status +// No parameters are required func (l *Logic) RoleProfiles() ([]RoleProfile, error) { cfg := core.Is.Config.Singleca.CfsslConfig if cfg == nil { - l.logger.Error("cfssl config 为空") - return nil, errors.New("cfssl config 为空") + l.logger.Error("cfssl config Empty") + return nil, errors.New("cfssl config Empty") } roles := make([]RoleProfile, 0, len(cfg.Signing.Profiles)+1) diff --git a/logic/ca/intermediate.go b/logic/ca/intermediate.go index 3966881..bfe1370 100644 --- a/logic/ca/intermediate.go +++ b/logic/ca/intermediate.go @@ -7,14 +7,14 @@ import ( "github.com/go-resty/resty/v2" jsoniter "github.com/json-iterator/go" "github.com/pkg/errors" - "gitlab.oneitfarm.com/bifrost/cfssl/helpers" + "github.com/ztalab/cfssl/helpers" "gorm.io/gorm" - "gitlab.oneitfarm.com/bifrost/capitalizone/ca/upperca" - "gitlab.oneitfarm.com/bifrost/capitalizone/core" - "gitlab.oneitfarm.com/bifrost/capitalizone/database/mysql/cfssl-model/dao" - "gitlab.oneitfarm.com/bifrost/capitalizone/logic/schema" - "gitlab.oneitfarm.com/bifrost/capitalizone/pkg/caclient" + "github.com/ztalab/ZACA/ca/upperca" + "github.com/ztalab/ZACA/core" + "github.com/ztalab/ZACA/database/mysql/cfssl-model/dao" + "github.com/ztalab/ZACA/logic/schema" + "github.com/ztalab/ZACA/pkg/caclient" ) const ( @@ -36,7 +36,7 @@ type IntermediateObject struct { Current bool `json:"current"` } -// IntermediateTopology 获取自身签发的子集群证书 +// IntermediateTopology Obtain the sub cluster certificate issued by itself func (l *Logic) IntermediateTopology() ([]*IntermediateObject, error) { db := l.db.Session(&gorm.Session{}) db = db.Where("ca_label = ?", caclient.RoleIntermediate) @@ -53,18 +53,18 @@ func (l *Logic) IntermediateTopology() ([]*IntermediateObject, error) { ) list, _, err := dao.GetAllCertificates(db, 1, 100, "issued_at desc") if err != nil { - return nil, errors.Wrap(err, "数据库查询错误") + return nil, errors.Wrap(err, "Database query error") } - l.logger.Debugf("查询结果数量: %v", len(list)) + l.logger.Debugf("Number of query results: %v", len(list)) intermediateMap := make(map[string]*IntermediateObject, 0) for _, row := range list { rawCert, err := helpers.ParseCertificatePEM([]byte(row.Pem)) if err != nil { - l.logger.With("row", row).Errorf("CA 证书解析错误: %s", err) + l.logger.With("row", row).Errorf("CA Certificate parsing error: %s", err) continue } if len(rawCert.Subject.OrganizationalUnit) == 0 || len(rawCert.Subject.Organization) == 0 { - l.logger.With("row", row).Warn("CA 证书缺少 O/OU 字段") + l.logger.With("row", row).Warn("CA Certificate missing O/OU Field") continue } ou := rawCert.Subject.OrganizationalUnit[0] @@ -84,7 +84,7 @@ func (l *Logic) IntermediateTopology() ([]*IntermediateObject, error) { return result, nil } -// UpperCaIntermediateTopology 获取上级 CA 的 +// UpperCaIntermediateTopology Get parent CA's func (l *Logic) UpperCaIntermediateTopology() ([]*IntermediateObject, error) { if core.Is.Config.Keymanager.SelfSign { return l.IntermediateTopology() @@ -94,14 +94,14 @@ func (l *Logic) UpperCaIntermediateTopology() ([]*IntermediateObject, error) { err := upperca.ProxyRequest(func(host string) error { res, err := httpClient.R().Get(host + UpperCaApiIntermediateTopology) if err != nil { - l.logger.With("upperca", host).Errorf("UpperCA 请求错误: %s", err) + l.logger.With("upperca", host).Errorf("UpperCA Request error: %s", err) return err } resp = res return nil }) if err != nil { - l.logger.Errorf("UpperCA 子CA拓扑获取失败: %s", err) + l.logger.Errorf("UpperCA Sub CA topology acquisition failed: %s", err) return nil, err } @@ -110,8 +110,8 @@ func (l *Logic) UpperCaIntermediateTopology() ([]*IntermediateObject, error) { Data []*IntermediateObject `json:"data"` } if err := jsoniter.Unmarshal(body, &response); err != nil { - l.logger.With("body", string(body)).Errorf("json 解析错误: %s", err) - return nil, errors.Wrap(err, "json 解析错误") + l.logger.With("body", string(body)).Errorf("json Parsing error: %s", err) + return nil, errors.Wrap(err, "json Parsing error") } return response.Data, nil diff --git a/logic/ca/workload.go b/logic/ca/workload.go index d1138bc..c324927 100644 --- a/logic/ca/workload.go +++ b/logic/ca/workload.go @@ -1,4 +1,4 @@ -// Package ca workload 相关 +// Package ca workload Relevant package ca import ( @@ -9,21 +9,21 @@ import ( "github.com/tal-tech/go-zero/core/fx" "gorm.io/gorm" - "gitlab.oneitfarm.com/bifrost/capitalizone/database/mysql/cfssl-model/dao" - "gitlab.oneitfarm.com/bifrost/capitalizone/database/mysql/cfssl-model/model" - "gitlab.oneitfarm.com/bifrost/capitalizone/pkg/caclient" - "gitlab.oneitfarm.com/bifrost/capitalizone/util" + "github.com/ztalab/ZACA/database/mysql/cfssl-model/dao" + "github.com/ztalab/ZACA/database/mysql/cfssl-model/model" + "github.com/ztalab/ZACA/pkg/caclient" + "github.com/ztalab/ZACA/util" ) const AllCertsCacheKey = "all_certs_cache" -// WorkloadUnit 以 UniqueID 划分的 Workload 单元 +// WorkloadUnit UniqueID Divided workload unit type WorkloadUnit struct { Role caclient.Role `json:"role"` - ValidNum int `json:"valid_num"` // 有效证书数量 - FirstIssuedAt time.Time `json:"first_issued_at"` // 首次签发证书日期 + ValidNum int `json:"valid_num"` // Number of valid certificates + FirstIssuedAt time.Time `json:"first_issued_at"` // Date of first issuance of certificate UniqueId string `json:"unique_id"` - Forbidden bool `json:"forbidden"` // 是否被禁止 + Forbidden bool `json:"forbidden"` // Is it prohibited } type WorkloadUnitsParams struct { @@ -31,11 +31,11 @@ type WorkloadUnitsParams struct { UniqueId string } -// WorkloadUnits CA 下 Units -// 返回目前活跃的 Units 及概要 +// WorkloadUnits CA Units +// Return to currently active units and summary func (l *Logic) WorkloadUnits(params *WorkloadUnitsParams) ([]*WorkloadUnit, int64, error) { db := l.db.Session(&gorm.Session{}) - // 默认筛选没有过期的 + // The default filter has no expired db = db.Where("expiry > ?", time.Now()). Where("status", "good") db = db.Select( @@ -52,7 +52,7 @@ func (l *Logic) WorkloadUnits(params *WorkloadUnitsParams) ([]*WorkloadUnit, int certs, err := getCerts(db) if err != nil { - return make([]*WorkloadUnit, 0), 0, errors.Wrap(err, "数据库查询错误") + return make([]*WorkloadUnit, 0), 0, errors.Wrap(err, "Database query error") } var i int @@ -113,7 +113,7 @@ func getCerts(db *gorm.DB) ([]*model.Certificates, error) { if !ok { certs, _, err = dao.GetAllCertificates(db, 1, 10000, "issued_at desc") if err != nil { - return nil, errors.Wrap(err, "数据库查询错误") + return nil, errors.Wrap(err, "Database query error") } util.MapCache.SetDefault(AllCertsCacheKey, certs) } diff --git a/logic/certleaf/base.go b/logic/certleaf/base.go index 1370e27..8c59cfc 100644 --- a/logic/certleaf/base.go +++ b/logic/certleaf/base.go @@ -1,11 +1,11 @@ package certleaf import ( - v2log "gitlab.oneitfarm.com/bifrost/cilog/v2" + "github.com/ztalab/ZACA/pkg/logger" "go.uber.org/zap" "gorm.io/gorm" - "gitlab.oneitfarm.com/bifrost/capitalizone/core" + "github.com/ztalab/ZACA/core" ) type Logic struct { @@ -16,7 +16,7 @@ type Logic struct { func NewLogic() *Logic { return &Logic{ db: core.Is.Db, - logger: v2log.Named("logic").SugaredLogger, + logger: logger.Named("logic").SugaredLogger, } } diff --git a/logic/certleaf/certleaf.go b/logic/certleaf/certleaf.go index cc30ca6..1bd0672 100644 --- a/logic/certleaf/certleaf.go +++ b/logic/certleaf/certleaf.go @@ -5,12 +5,12 @@ import ( "encoding/hex" "github.com/pkg/errors" - "gitlab.oneitfarm.com/bifrost/cfssl/helpers" + "github.com/ztalab/cfssl/helpers" "gorm.io/gorm" - "gitlab.oneitfarm.com/bifrost/capitalizone/ca/keymanager" - "gitlab.oneitfarm.com/bifrost/capitalizone/database/mysql/cfssl-model/model" - "gitlab.oneitfarm.com/bifrost/capitalizone/logic/schema" + "github.com/ztalab/ZACA/ca/keymanager" + "github.com/ztalab/ZACA/database/mysql/cfssl-model/model" + "github.com/ztalab/ZACA/logic/schema" ) type LeafCert struct { @@ -19,17 +19,16 @@ type LeafCert struct { } type CertChainParams struct { - SelfCert bool `form:"self_cert"` // 展示自己的证书 + SelfCert bool `form:"self_cert"` // Show your certificate SN string `form:"sn"` AKI string `form:"aki"` } -// CertChain 证书链 +// CertChain func (l *Logic) CertChain(params *CertChainParams) (*LeafCert, error) { var cert *x509.Certificate var err error if params.SelfCert { - // Ca 自身证书 _, cert, err = keymanager.GetKeeper().GetCachedSelfKeyPair() if err != nil { return nil, err @@ -41,11 +40,11 @@ func (l *Logic) CertChain(params *CertChainParams) (*LeafCert, error) { SerialNumber: params.SN, AuthorityKeyIdentifier: params.AKI, }).First(&row).Error; err != nil { - return nil, errors.Wrap(err, "数据库查询错误") + return nil, errors.Wrap(err, "Database query error") } parsedCert, err := helpers.ParseCertificatePEM([]byte(row.Pem)) if err != nil { - l.logger.Errorf("证书解析错误: %s", err) + l.logger.Errorf("Certificate parsing error: %s", err) return nil, err } cert = parsedCert diff --git a/logic/events/logger.go b/logic/events/logger.go index ac10c20..dcdbb40 100644 --- a/logic/events/logger.go +++ b/logic/events/logger.go @@ -4,7 +4,7 @@ import ( "fmt" jsoniter "github.com/json-iterator/go" - v2log "gitlab.oneitfarm.com/bifrost/cilog/v2" + "github.com/ztalab/ZACA/pkg/logger" ) const ( @@ -13,11 +13,11 @@ const ( ) var CategoriesStrings = map[string]string{ - CategoryWorkloadLifecycle: "Workload生命周期", + CategoryWorkloadLifecycle: "Workload life cycle", } const ( - OperatorMSP = "MSP平台" + OperatorMSP = "MSP platform" OperatorSDK = "SDK" ) @@ -27,18 +27,18 @@ type CertOp struct { AKI string `json:"aki"` } -// Op 操作记录 +// Op Operation record type Op struct { - Operator string `json:"operator"` // 操作人 - Category string `json:"category"` // 分类 - Type string `json:"type"` // 操作类型 - Obj interface{} `json:"obj"` // 操作对象 + Operator string `json:"operator"` // Operator + Category string `json:"category"` // Classification + Type string `json:"type"` // Operation type + Obj interface{} `json:"obj"` // Operation object } func (o *Op) Log() { objStr, _ := jsoniter.MarshalToString(o.Obj) - v2log.Named(LoggerName). - With(v2log.DynFieldCustomLog1, fmt.Sprintf("%s.%s", o.Category, o.Type)). - With(v2log.DynFieldCustomLog3, o.Obj). - Infof("分类: %s, 操作: %s, 操作者: %s, 操作对象: %v", CategoriesStrings[o.Category], o.Type, o.Operator, objStr) + logger.Named(LoggerName). + With("flag", fmt.Sprintf("%s.%s", o.Category, o.Type)). + With("data", o.Obj). + Infof("Classification: %s, Operation: %s, Operator: %s, Operation object: %v", CategoriesStrings[o.Category], o.Type, o.Operator, objStr) } diff --git a/logic/schema/cert.go b/logic/schema/cert.go index 64633f0..eb21b24 100644 --- a/logic/schema/cert.go +++ b/logic/schema/cert.go @@ -4,13 +4,13 @@ import ( "crypto/x509" "time" - "gitlab.oneitfarm.com/bifrost/cfssl/certinfo" + "github.com/ztalab/cfssl/certinfo" - "gitlab.oneitfarm.com/bifrost/capitalizone/pkg/caclient" - "gitlab.oneitfarm.com/bifrost/capitalizone/pkg/spiffe" + "github.com/ztalab/ZACA/pkg/caclient" + "github.com/ztalab/ZACA/pkg/spiffe" ) -// SampleCert 证书列表 cert +// SampleCert Certificate list cert type SampleCert struct { SN string `mapstructure:"sn,omitempty" json:"sn"` AKI string `mapstructure:"aki,omitempty" json:"aki"` @@ -40,10 +40,10 @@ type Certificate struct { RawPEM string `mapstructure:"-" json:"-"` } -// 证书详情 cert +// Certificate details cert type FullCert struct { SampleCert - CertStr string `mapstructure:"cert_str,omitempty" json:"cert_str"` // 展示证书的详细信息 + CertStr string `mapstructure:"cert_str,omitempty" json:"cert_str"` // Show certificate details CertInfo *Certificate `mapstructure:"cert_info,omitempty" json:"cert_info,omitempty"` RawCert *x509.Certificate `mapstructure:"-" json:"-"` } diff --git a/logic/schema/getter.go b/logic/schema/getter.go index b0de455..2b92d17 100644 --- a/logic/schema/getter.go +++ b/logic/schema/getter.go @@ -7,12 +7,12 @@ import ( "github.com/mayocream/pki/pkg/x509util" "github.com/pkg/errors" - cfCertInfo "gitlab.oneitfarm.com/bifrost/cfssl/certinfo" - "gitlab.oneitfarm.com/bifrost/cfssl/helpers" + cfCertInfo "github.com/ztalab/cfssl/certinfo" + "github.com/ztalab/cfssl/helpers" - "gitlab.oneitfarm.com/bifrost/capitalizone/database/mysql/cfssl-model/model" - "gitlab.oneitfarm.com/bifrost/capitalizone/pkg/caclient" - "gitlab.oneitfarm.com/bifrost/capitalizone/pkg/spiffe" + "github.com/ztalab/ZACA/database/mysql/cfssl-model/model" + "github.com/ztalab/ZACA/pkg/caclient" + "github.com/ztalab/ZACA/pkg/spiffe" ) func GetFullCertByX509Cert(cert *x509.Certificate) *FullCert { @@ -26,7 +26,7 @@ func GetFullCertByX509Cert(cert *x509.Certificate) *FullCert { SN: cert.SerialNumber.String(), AKI: hex.EncodeToString(cert.SubjectKeyId), CN: cert.Subject.CommonName, - // TODO 加入证书标识获取 role + // TODO Join certificate ID acquisition role NotBefore: cert.NotBefore, Expiry: cert.NotAfter, }, diff --git a/logic/schema/influx.go b/logic/schema/influx.go index d9b1348..358d0b5 100644 --- a/logic/schema/influx.go +++ b/logic/schema/influx.go @@ -4,7 +4,7 @@ import ( "net" "net/url" - "gitlab.oneitfarm.com/bifrost/capitalizone/core/config" + "github.com/ztalab/ZACA/core/config" ) const ( @@ -35,7 +35,7 @@ func GetLocalIpLabel() string { var internetIP = getInternetIP() func getInternetIP() (IP string) { - // 查找本机IP + // Find native IP addrs, err := net.InterfaceAddrs() if err != nil { return "" @@ -44,7 +44,7 @@ func getInternetIP() (IP string) { if ipnet, ok := addr.(*net.IPNet); ok && !ipnet.IP.IsLoopback() { if ip4 := ipnet.IP.To4(); ip4 != nil { if ip4[0] == 10 { - // 赋值新的IP + // Assign new IP IP = ip4.String() } } diff --git a/logic/workload/base.go b/logic/workload/base.go index f62e86f..48c102a 100644 --- a/logic/workload/base.go +++ b/logic/workload/base.go @@ -1,11 +1,11 @@ package workload import ( - v2log "gitlab.oneitfarm.com/bifrost/cilog/v2" + "github.com/ztalab/ZACA/pkg/logger" "go.uber.org/zap" "gorm.io/gorm" - "gitlab.oneitfarm.com/bifrost/capitalizone/core" + "github.com/ztalab/ZACA/core" ) type Logic struct { @@ -16,6 +16,6 @@ type Logic struct { func NewLogic() *Logic { return &Logic{ db: core.Is.Db, - logger: v2log.Named("logic").SugaredLogger, + logger: logger.Named("logic").SugaredLogger, } } diff --git a/logic/workload/display.go b/logic/workload/display.go index 8b71635..1543b13 100644 --- a/logic/workload/display.go +++ b/logic/workload/display.go @@ -1,4 +1,4 @@ -// Package workload 展示层 +// Package workload Display layer package workload import ( @@ -8,17 +8,17 @@ import ( "github.com/araddon/dateparse" "github.com/pkg/errors" - "gitlab.oneitfarm.com/bifrost/capitalizone/database/mysql/cfssl-model/dao" - "gitlab.oneitfarm.com/bifrost/capitalizone/database/mysql/cfssl-model/model" - "gitlab.oneitfarm.com/bifrost/capitalizone/logic/schema" + "github.com/ztalab/ZACA/database/mysql/cfssl-model/dao" + "github.com/ztalab/ZACA/database/mysql/cfssl-model/model" + "github.com/ztalab/ZACA/logic/schema" "gorm.io/gorm" ) type CertListParams struct { - // 查询条件 + // query criteria CertSN string Role, UniqueID string - // 分页条件 + // Paging condition Page, PageSize int Status string Order string @@ -30,7 +30,7 @@ type CertListResult struct { Total int64 } -// CertList 获取证书列表 +// CertList Get certificate list func (l *Logic) CertList(params *CertListParams) (*CertListResult, error) { query := l.db.Session(&gorm.Session{}) if params.CertSN != "" { @@ -60,14 +60,14 @@ func (l *Logic) CertList(params *CertListParams) (*CertListResult, error) { if params.ExpiryStartTime != "" { date, err := dateparse.ParseAny(params.ExpiryStartTime) if err != nil { - return nil, errors.Wrap(err, "过期时间错误") + return nil, errors.Wrap(err, "Expiration time error") } query = query.Where("expiry > ?", date) } if params.ExpiryEndTime != "" { date, err := dateparse.ParseAny(params.ExpiryEndTime) if err != nil { - return nil, errors.Wrap(err, "过期时间错误") + return nil, errors.Wrap(err, "Expiration time error") } query = query.Where("expiry < ?", date) } @@ -87,7 +87,7 @@ func (l *Logic) CertList(params *CertListParams) (*CertListResult, error) { list, total, err := dao.GetAllCertificates(query, params.Page, params.PageSize, params.Order) if err != nil { - return nil, errors.Wrap(err, "数据库查询错误") + return nil, errors.Wrap(err, "Database query error") } var result CertListResult result.CertList = make([]*schema.FullCert, 0, len(list)) @@ -114,7 +114,7 @@ func (l *Logic) CertDetail(params *CertDetailParams) (*schema.FullCert, error) { SerialNumber: params.SN, AuthorityKeyIdentifier: params.AKI, }).First(&row).Error; err != nil { - return nil, errors.Wrap(err, "数据库查询错误") + return nil, errors.Wrap(err, "Database query error") } cert, err := schema.GetFullCertByModelCert(row) if err != nil { diff --git a/logic/workload/lifecycle.go b/logic/workload/lifecycle.go index 3a12523..2914522 100644 --- a/logic/workload/lifecycle.go +++ b/logic/workload/lifecycle.go @@ -1,16 +1,16 @@ -// Package workload 证书生命周期管理 +// Package workload Certificate lifecycle management package workload import ( "time" "github.com/pkg/errors" - "gitlab.oneitfarm.com/bifrost/cfssl/ocsp" + "github.com/ztalab/cfssl/ocsp" "gorm.io/gorm" - "gitlab.oneitfarm.com/bifrost/capitalizone/database/mysql/cfssl-model/dao" - "gitlab.oneitfarm.com/bifrost/capitalizone/database/mysql/cfssl-model/model" - "gitlab.oneitfarm.com/bifrost/capitalizone/logic/events" + "github.com/ztalab/ZACA/database/mysql/cfssl-model/dao" + "github.com/ztalab/ZACA/database/mysql/cfssl-model/model" + "github.com/ztalab/ZACA/logic/events" ) type RevokeCertsParams struct { @@ -19,11 +19,11 @@ type RevokeCertsParams struct { UniqueId string `json:"unique_id"` } -// RevokeCerts 吊销证书 -// 1. 通过 SN/AKI 吊销证书 -// 2. 通过 UniqueId 统一吊销证书 +// RevokeCerts Revocation of certificate +// 1. Revoke certificate through snaki +// 2. Unified revocation of certificates through uniqueID func (l *Logic) RevokeCerts(params *RevokeCertsParams) error { - // 1. 通过标识找到证书 + // 1. Certificate found by identity db := l.db.Session(&gorm.Session{}) db = db.Where("status = ?", "good"). @@ -34,20 +34,20 @@ func (l *Logic) RevokeCerts(params *RevokeCertsParams) error { } else if params.AKI != "" && params.SN != "" { db = db.Where("serial_number = ? AND authority_key_identifier = ?", params.SN, params.AKI) } else { - return errors.New("参数错误") + return errors.New("Parameter error") } certs, _, err := dao.GetAllCertificates(db, 1, 1000, "issued_at desc") if err != nil { - l.logger.With("params", params).Errorf("数据库查询错误: %s", err) - return errors.Wrap(err, "数据库查询错误") + l.logger.With("params", params).Errorf("Database query error: %s", err) + return errors.Wrap(err, "Database query error") } if len(certs) == 0 { - return errors.New("未找到证书") + return errors.New("Certificate not found") } - // 2. 批量吊销证书 + // 2. Batch revocation certificate reason, _ := ocsp.ReasonStringToCode("cacompromise") err = l.db.Transaction(func(tx *gorm.DB) error { for _, cert := range certs { @@ -64,11 +64,11 @@ func (l *Logic) RevokeCerts(params *RevokeCertsParams) error { return nil }) if err != nil { - l.logger.Errorf("批量吊销证书错误: %s", err) - return errors.Wrap(err, "批量吊销证书错误") + l.logger.Errorf("Batch revocation certificate error: %s", err) + return errors.Wrap(err, "Batch revocation certificate error") } - // 3. 记录操作日志 + // 3. Record operation log for _, cert := range certs { events.NewWorkloadLifeCycle("revoke", events.OperatorMSP, events.CertOp{ UniqueId: cert.CommonName.String, @@ -86,11 +86,11 @@ type RecoverCertsParams struct { UniqueId string `json:"unique_id"` } -// RecoverCerts 恢复证书 -// 1. 通过 SN/AKI 恢复证书 -// 2. 通过 UniqueId 统一恢复证书 +// RecoverCerts Restore certificate +// 1. Recover certificate through snaki +// 2. Unified certificate recovery through uniqueID func (l *Logic) RecoverCerts(params *RecoverCertsParams) error { - // 1. 通过标识找到证书 + // 1. Certificate found by identity db := l.db.Session(&gorm.Session{}) db = db.Where("status = ?", "revoked"). @@ -102,20 +102,20 @@ func (l *Logic) RecoverCerts(params *RecoverCertsParams) error { case params.AKI != "" && params.SN != "": db = db.Where("serial_number = ? AND authority_key_identifier = ?", params.SN, params.AKI) default: - return errors.New("参数错误") + return errors.New("Parameter error") } certs, _, err := dao.GetAllCertificates(db, 1, 1000, "issued_at desc") if err != nil { - l.logger.With("params", params).Errorf("数据库查询错误: %s", err) - return errors.Wrap(err, "数据库查询错误") + l.logger.With("params", params).Errorf("Database query error: %s", err) + return errors.Wrap(err, "Database query error") } if len(certs) == 0 { - return errors.New("未找到证书") + return errors.New("Certificate not found") } - // 2. 批量恢复证书 + // 2. Batch recovery certificate err = l.db.Transaction(func(tx *gorm.DB) error { for _, cert := range certs { err := tx.Model(&model.Certificates{}).Where(&model.Certificates{ @@ -134,7 +134,7 @@ func (l *Logic) RecoverCerts(params *RecoverCertsParams) error { return err } - // 3. 记录操作日志 + // 3. Record operation log for _, cert := range certs { events.NewWorkloadLifeCycle("recover", events.OperatorMSP, events.CertOp{ UniqueId: cert.CommonName.String, @@ -150,9 +150,9 @@ type ForbidNewCertsParams struct { UniqueIds []string `json:"unique_ids"` } -// ForbidNewCerts 禁止某个 UniqueID 申请证书 -// 1. 禁止 UniqueId 申请新证书 -// 2. 日志记录 +// ForbidNewCerts Prohibit a uniqueID from requesting a certificate +// 1.UniqueID is not allowed to apply for a new certificate +// 2. Logging func (l *Logic) ForbidNewCerts(params *ForbidNewCertsParams) error { err := l.db.Transaction(func(tx *gorm.DB) error { for _, uid := range params.UniqueIds { @@ -163,18 +163,18 @@ func (l *Logic) ForbidNewCerts(params *ForbidNewCertsParams) error { } _, _, err := dao.AddForbid(tx, &record) if err != nil { - l.logger.With("record", record).Errorf("数据库插入错误: %s", err) + l.logger.With("record", record).Errorf("Database insert error: %s", err) return err } } return nil }) if err != nil { - l.logger.Errorf("数据库插入错误: %s", err) + l.logger.Errorf("Database insert error: %s", err) return err } - // 日志记录 + // Logging for _, uid := range params.UniqueIds { events.NewWorkloadLifeCycle("forbid", events.OperatorMSP, events.CertOp{ UniqueId: uid, @@ -184,8 +184,8 @@ func (l *Logic) ForbidNewCerts(params *ForbidNewCertsParams) error { return nil } -// RecoverForbidNewCerts 恢复允许某个 UniqueID 申请证书 -// 1. 允许 UniqueId 申请新证书 +// RecoverForbidNewCerts Recovery allows a uniqueID to request a certificate +// 1. Allow uniqueID to request a new certificate func (l *Logic) RecoverForbidNewCerts(params *ForbidNewCertsParams) error { err := l.db.Transaction(func(tx *gorm.DB) error { for _, uid := range params.UniqueIds { @@ -193,18 +193,18 @@ func (l *Logic) RecoverForbidNewCerts(params *ForbidNewCertsParams) error { Where("deleted_at IS NULL"). Update("deleted_at", time.Now()).Error if err != nil { - l.logger.With("unique_id", uid).Errorf("数据库更新错误: %s", err) + l.logger.With("unique_id", uid).Errorf("Database update error: %s", err) return err } } return nil }) if err != nil { - l.logger.Errorf("数据库更新错误: %s", err) + l.logger.Errorf("Database update error: %s", err) return err } - // 日志记录 + // Logging for _, uid := range params.UniqueIds { events.NewWorkloadLifeCycle("recover-forbid", events.OperatorMSP, events.CertOp{ UniqueId: uid, diff --git a/logic/workload/unit.go b/logic/workload/unit.go index 6106103..e7d2cad 100644 --- a/logic/workload/unit.go +++ b/logic/workload/unit.go @@ -1,7 +1,7 @@ package workload import ( - "gitlab.oneitfarm.com/bifrost/capitalizone/database/mysql/cfssl-model/dao" + "github.com/ztalab/ZACA/database/mysql/cfssl-model/dao" ) type UnitsForbidQueryParams struct { @@ -17,20 +17,20 @@ type UnitsForbidQueryResult struct { Status map[string]UnitForbidQueryItem `json:"status"` } -// UnitsForbidQuery 查询 unique_id 是否被禁止申请证书 +// UnitsForbidQuery Query unique_id Is it forbidden to apply for certificate func (l *Logic) UnitsForbidQuery(params *UnitsForbidQueryParams) (*UnitsForbidQueryResult, error) { db := l.db.Where("unique_id IN ?", params.UniqueIds). Where("deleted_at IS NULL") list, _, err := dao.GetAllForbid(db, 1, 1000, "id desc") if err != nil { - l.logger.Errorf("数据库查询错误: %s", err) + l.logger.Errorf("Database query error: %s", err) return nil, err } result := UnitsForbidQueryResult{ Status: make(map[string]UnitForbidQueryItem), } - l.logger.Debugf("查询结果: %v", list) + l.logger.Debugf("Query results: %v", list) for _, uid := range params.UniqueIds { result.Status[uid] = UnitForbidQueryItem{ diff --git a/main.go b/main.go index caa846e..4ec093d 100644 --- a/main.go +++ b/main.go @@ -3,9 +3,9 @@ package main import ( "context" "github.com/urfave/cli" - "gitlab.oneitfarm.com/bifrost/capitalizone/cmd" - "gitlab.oneitfarm.com/bifrost/capitalizone/initer" - logger "gitlab.oneitfarm.com/bifrost/cilog/v2" + "github.com/ztalab/ZACA/cmd" + "github.com/ztalab/ZACA/initer" + "github.com/ztalab/ZACA/pkg/logger" "os" ) @@ -17,7 +17,7 @@ func main() { app.Version = "1.0.0" app.Usage = "capitalizone" app.Commands = []cli.Command{ - newHttpCmd(ctx), + newApiCmd(ctx), newTlsCmd(ctx), newOcspCmd(ctx), } @@ -28,33 +28,33 @@ func main() { } } -// newHttpCmd 运行http服务 -func newHttpCmd(ctx context.Context) cli.Command { +// newApiCmd Running API services +func newApiCmd(ctx context.Context) cli.Command { return cli.Command{ - Name: "http", - Usage: "运行http服务", + Name: "api", + Usage: "Running API service", Action: func(c *cli.Context) error { return cmd.RunHttp(ctx) }, } } -// newTlsCmd 运行tls服务 +// newTlsCmd Running TLS service func newTlsCmd(ctx context.Context) cli.Command { return cli.Command{ Name: "tls", - Usage: "运行tls服务", + Usage: "Running TLS service", Action: func(c *cli.Context) error { return cmd.RunTls(ctx) }, } } -// newOcspCmd 运行tls服务 +// newOcspCmd Running OCSP service func newOcspCmd(ctx context.Context) cli.Command { return cli.Command{ Name: "ocsp", - Usage: "运行ocsp服务", + Usage: "Run OCSP service", Action: func(c *cli.Context) error { return cmd.RunOcsp(ctx) }, diff --git a/mtls-server.Dockerfile b/mtls-server.Dockerfile deleted file mode 100644 index 80db989..0000000 --- a/mtls-server.Dockerfile +++ /dev/null @@ -1,4 +0,0 @@ -FROM golang:1.15 AS build -WORKDIR /capitalizone -ADD . . -CMD ["go", "run", "pkg/caclient/examples/server/server.go"] \ No newline at end of file diff --git a/pkg/attrmgr/attrmgr_test.go b/pkg/attrmgr/attrmgr_test.go deleted file mode 100644 index 577efe9..0000000 --- a/pkg/attrmgr/attrmgr_test.go +++ /dev/null @@ -1,107 +0,0 @@ -/* -Copyright IBM Corp. All Rights Reserved. - -SPDX-License-Identifier: Apache-2.0 -*/ - -package attrmgr_test - -import ( - "crypto/x509" - "testing" - - "github.com/hyperledger/fabric-ca/lib/attrmgr" - "github.com/stretchr/testify/assert" -) - -// TestAttrs tests attributes -func TestAttrs(t *testing.T) { - mgr := attrmgr.New() - attrs := []attrmgr.Attribute{ - &Attribute{Name: "attr1", Value: "val1"}, - &Attribute{Name: "attr2", Value: "val2"}, - &Attribute{Name: "attr3", Value: "val3"}, - &Attribute{Name: "boolAttr", Value: "true"}, - } - reqs := []attrmgr.AttributeRequest{ - &AttributeRequest{Name: "attr1", Require: false}, - &AttributeRequest{Name: "attr2", Require: true}, - &AttributeRequest{Name: "boolAttr", Require: true}, - &AttributeRequest{Name: "noattr1", Require: false}, - } - cert := &x509.Certificate{} - - // Verify that the certificate has no attributes - at, err := mgr.GetAttributesFromCert(cert) - if err != nil { - t.Fatalf("Failed to GetAttributesFromCert: %s", err) - } - numAttrs := len(at.Names()) - assert.True(t, numAttrs == 0, "expecting 0 attributes but found %d", numAttrs) - - // Add attributes to certificate - err = mgr.ProcessAttributeRequestsForCert(reqs, attrs, cert) - if err != nil { - t.Fatalf("Failed to ProcessAttributeRequestsForCert: %s", err) - } - - // Get attributes from the certificate and verify the count is correct - at, err = mgr.GetAttributesFromCert(cert) - if err != nil { - t.Fatalf("Failed to GetAttributesFromCert: %s", err) - } - numAttrs = len(at.Names()) - assert.True(t, numAttrs == 3, "expecting 3 attributes but found %d", numAttrs) - - // Check individual attributes - checkAttr(t, "attr1", "val1", at) - checkAttr(t, "attr2", "val2", at) - checkAttr(t, "attr3", "", at) - checkAttr(t, "noattr1", "", at) - assert.NoError(t, at.True("boolAttr")) - - // Negative test case: add required attributes which don't exist - reqs = []attrmgr.AttributeRequest{ - &AttributeRequest{Name: "noattr1", Require: true}, - } - err = mgr.ProcessAttributeRequestsForCert(reqs, attrs, cert) - assert.Error(t, err) -} - -func checkAttr(t *testing.T, name, val string, attrs *attrmgr.Attributes) { - v, ok, err := attrs.Value(name) - assert.NoError(t, err) - if val == "" { - assert.False(t, attrs.Contains(name), "contains attribute '%s'", name) - assert.False(t, ok, "attribute '%s' was found", name) - } else { - assert.True(t, attrs.Contains(name), "does not contain attribute '%s'", name) - assert.True(t, ok, "attribute '%s' was not found", name) - assert.True(t, v == val, "incorrect value for '%s'; expected '%s' but found '%s'", name, val, v) - } -} - -type Attribute struct { - Name, Value string -} - -func (a *Attribute) GetName() string { - return a.Name -} - -func (a *Attribute) GetValue() string { - return a.Value -} - -type AttributeRequest struct { - Name string - Require bool -} - -func (ar *AttributeRequest) GetName() string { - return ar.Name -} - -func (ar *AttributeRequest) IsRequired() bool { - return ar.Require -} diff --git a/pkg/caclient/cai.go b/pkg/caclient/cai.go index 975ae59..5558413 100644 --- a/pkg/caclient/cai.go +++ b/pkg/caclient/cai.go @@ -1,13 +1,12 @@ package caclient import ( + "github.com/ztalab/ZACA/pkg/logger" "time" - "gitlab.oneitfarm.com/bifrost/capitalizone/pkg/keygen" - "gitlab.oneitfarm.com/bifrost/cfssl/csr" - cflog "gitlab.oneitfarm.com/bifrost/cfssl/log" - "gitlab.oneitfarm.com/bifrost/cfssl/transport/core" - v2 "gitlab.oneitfarm.com/bifrost/cilog/v2" + "github.com/ztalab/ZACA/pkg/keygen" + "github.com/ztalab/cfssl/csr" + "github.com/ztalab/cfssl/transport/core" "go.uber.org/zap" ) @@ -15,18 +14,10 @@ import ( type Role string const ( - // RoleSidecar ... - RoleSidecar Role = "sidecar" - // RoleGateway ... - RoleGateway Role = "gateway" - // RoleStandAlone ... - RoleStandAlone Role = "standalone" + // RoleDefault ... + RoleDefault Role = "default" // RoleIntermediate ... RoleIntermediate Role = "intermediate" - // RoleIDGRegistry 中台注册中心 - RoleIDGRegistry Role = "idg-registry" - // RoleGatekeeper 哨兵 - RoleGatekeeper Role = "gatekeeper" ) // Conf ... @@ -50,7 +41,7 @@ func NewCAI(opts ...OptionFunc) *CAInstance { opt(conf) } conf.Logger.Sugar().Debugf("cai conf: %v", conf) - cflog.Logger = conf.Logger.Named("cfssl") + //cflog.Logger = conf.Logger.Named("cfssl") return &CAInstance{ Conf: *conf, } @@ -125,5 +116,5 @@ var defaultConf = Conf{ }, }, RotateAfter: 5 * time.Minute, - Logger: v2.N().Named("cai"), + Logger: logger.N().Named("cai"), } diff --git a/pkg/caclient/cert_manager.go b/pkg/caclient/cert_manager.go index 4ec965d..cd04a3b 100644 --- a/pkg/caclient/cert_manager.go +++ b/pkg/caclient/cert_manager.go @@ -7,20 +7,20 @@ import ( "encoding/hex" "net/http" - "gitlab.oneitfarm.com/bifrost/cfssl/hook" + "github.com/ztalab/cfssl/hook" - "gitlab.oneitfarm.com/bifrost/cfssl/helpers" - "gitlab.oneitfarm.com/bifrost/cfssl/info" + "github.com/ztalab/cfssl/helpers" + "github.com/ztalab/cfssl/info" jsoniter "github.com/json-iterator/go" "github.com/pkg/errors" - "gitlab.oneitfarm.com/bifrost/cfssl/api/client" - "gitlab.oneitfarm.com/bifrost/cfssl/auth" - "gitlab.oneitfarm.com/bifrost/cfssl/signer" + "github.com/ztalab/cfssl/api/client" + "github.com/ztalab/cfssl/auth" + "github.com/ztalab/cfssl/signer" "go.uber.org/zap" ) -// CertManager 证书管理器 +// CertManager Certificate manager type CertManager struct { logger *zap.SugaredLogger apiClient *client.AuthRemote @@ -28,15 +28,15 @@ type CertManager struct { caAddr string authKey string ocspFetcher OcspClient - // TODO 证书储存 + // TODO Certificate storage caCertTmp *x509.Certificate } -// NewCertManager 创建证书管理 Instance +// NewCertManager Create certificate management Instance func (cai *CAInstance) NewCertManager() (*CertManager, error) { ap, err := auth.New(cai.Conf.CFIdentity.Profiles["cfssl"]["auth-key"], nil) if err != nil { - return nil, errors.Wrap(err, "Auth key 配置错误") + return nil, errors.Wrap(err, "Auth key Configuration error") } caAddr := cai.CaAddr ocspAddr := cai.OcspAddr @@ -95,11 +95,11 @@ func (cm *CertManager) SignPEM(csrPEM []byte, uniqueID string) ([]byte, error) { return nil, err } - cm.logger.With("req", signReq).Debug("请求签发证书") + cm.logger.With("req", signReq).Debug("Request for certificate") certPEM, err := cm.apiClient.Sign(signReqBytes) if err != nil { - cm.logger.Errorf("请求签发证书失败: %s", err) + cm.logger.Errorf("Request to issue certificate failed: %s", err) return nil, err } @@ -108,10 +108,6 @@ func (cm *CertManager) SignPEM(csrPEM []byte, uniqueID string) ([]byte, error) { // RevokeIDGRegistryCert ... func (cm *CertManager) RevokeIDGRegistryCert(certPEM []byte) error { - if cm.profile != string(RoleIDGRegistry) { - return errors.New("Profile not valid") - } - cert, err := helpers.ParseCertificatePEM(certPEM) if err != nil { return err @@ -120,7 +116,7 @@ func (cm *CertManager) RevokeIDGRegistryCert(certPEM []byte) error { req := &RevokeRequest{ Serial: cert.SerialNumber.String(), AKI: hex.EncodeToString(cert.AuthorityKeyId), - Reason: "", // 默认为 0 + Reason: "", // Default to 0 AuthKey: cm.authKey, Profile: cm.profile, } @@ -131,13 +127,13 @@ func (cm *CertManager) RevokeIDGRegistryCert(certPEM []byte) error { resp, err := httpClient.Post(cm.caAddr+revokePath, "application/json", buf) if err != nil { - return errors.Wrap(err, "请求错误") + return errors.Wrap(err, "Request error") } defer resp.Body.Close() if resp.StatusCode != http.StatusOK { - cm.logger.With("status", resp.StatusCode).Errorf("请求错误") - return errors.New("请求错误") + cm.logger.With("status", resp.StatusCode).Errorf("Request error") + return errors.New("Request error") } return nil @@ -170,7 +166,7 @@ func (cm *CertManager) VerifyCertDefaultIssuer(leafPEM []byte) error { return err } if !ok { - return errors.New("证书被吊销") + return errors.New("Certificate revoked") } return nil } diff --git a/pkg/caclient/examples/certmanager/server.go b/pkg/caclient/examples/certmanager/server.go deleted file mode 100644 index 2b3b415..0000000 --- a/pkg/caclient/examples/certmanager/server.go +++ /dev/null @@ -1,103 +0,0 @@ -package main - -import ( - "crypto/x509/pkix" - "encoding/asn1" - "fmt" - "os" - - "github.com/spf13/pflag" - "gitlab.oneitfarm.com/bifrost/capitalizone/pkg/keygen" - "gitlab.oneitfarm.com/bifrost/capitalizone/pkg/spiffe" - logger "gitlab.oneitfarm.com/bifrost/cilog/v2" - "go.uber.org/zap/zapcore" - - "gitlab.oneitfarm.com/bifrost/capitalizone/pkg/caclient" -) - -var ( - caAddr = pflag.String("ca", "https://127.0.0.1:8081", "CA Server") - ocspAddr = pflag.String("ocsp", "http://127.0.0.1:8082", "Ocsp Server") - authKey = pflag.String("auth-key", "ea62fa7c27307017694689f0adff09f63186cadfe92fb802133f980b75858fc6", "Auth Key") -) - -func init() { - _ = logger.GlobalConfig(logger.Conf{ - Debug: true, - Level: zapcore.DebugLevel, - }) -} - -func main() { - pflag.Parse() - err := NewIDGRegistry() - if err != nil { - fmt.Fprint(os.Stderr, err) - os.Exit(1) - } -} - -// NewIDGRegistry 注册中心测试示例 -func NewIDGRegistry() error { - cai := caclient.NewCAI( - caclient.WithCAServer(caclient.RoleIDGRegistry, *caAddr), - caclient.WithAuthKey(*authKey), - caclient.WithOcspAddr(*ocspAddr), - ) - cm, err := cai.NewCertManager() - if err != nil { - logger.Errorf("cert manager 创建错误: %s", err) - return err - } - - _, keyPEM, _ := keygen.GenKey(keygen.RsaSigAlg) - logger.Info("生成 RSA 私钥") - - csrBytes, err := keygen.GenCustomExtendCSR(keyPEM, &spiffe.IDGIdentity{ - SiteID: "test_site", - ClusterID: "test_cluster", - UniqueID: "idg_registy_0001", - }, &keygen.CertOptions{ - CN: "test", - }, []pkix.Extension{ - { - Id: asn1.ObjectIdentifier{1, 2, 3, 4, 5, 6, 7, 8, 1}, - Critical: true, - Value: []byte("fake data"), - }, - { - Id: asn1.ObjectIdentifier{1, 2, 3, 4, 5, 6, 7, 8, 2}, - Critical: true, - Value: []byte("fake data"), - }, - }) - if err != nil { - return err - } - logger.Infof("生成自定义 CSR: \n%s", string(csrBytes)) - - // 申请证书 - certBytes, err := cm.SignPEM(csrBytes, "test") - if err != nil { - logger.Errorf("申请证书失败: %s", err) - return err - } - - logger.Infof("从 CA 申请证书: \n%s", string(certBytes)) - - // 验证证书 - if err := cm.VerifyCertDefaultIssuer(certBytes); err != nil { - logger.Errorf("验证证书失败: %s", err) - return err - } - logger.Infof("验证证书成功, 证书有效") - - // 吊销证书 - if err := cm.RevokeIDGRegistryCert(certBytes); err != nil { - logger.Errorf("吊销证书失败: %s", err) - return err - } - logger.Infof("吊销证书成功") - - return nil -} diff --git a/pkg/caclient/examples/certrotator/server.go b/pkg/caclient/examples/certrotator/server.go deleted file mode 100644 index bd047b5..0000000 --- a/pkg/caclient/examples/certrotator/server.go +++ /dev/null @@ -1,93 +0,0 @@ -package main - -import ( - "crypto/tls" - "flag" - "github.com/pkg/errors" - "github.com/valyala/fasthttp" - "gitlab.oneitfarm.com/bifrost/capitalizone/pkg/caclient" - "gitlab.oneitfarm.com/bifrost/capitalizone/pkg/caclient/examples/util" - "gitlab.oneitfarm.com/bifrost/capitalizone/pkg/spiffe" - logger "gitlab.oneitfarm.com/bifrost/cilog/v2" - "go.uber.org/zap/zapcore" - "net" - "time" -) - -var ( - caAddr = flag.String("ca", "", "CA Server") -) - -func main() { - logger.GlobalConfig(logger.Conf{ - Debug: true, - Level: zapcore.DebugLevel, - }) - - flag.Parse() - err := NewSidecarMTLSServer() - if err != nil { - logger.Fatal(err) - } - select {} -} - -// mTLS Server 使用示例 -func NewSidecarMTLSServer() error { - c := caclient.NewCAI( - caclient.WithCAServer(caclient.RoleSidecar, *caAddr), - caclient.WithRotateAfter(10*time.Second), - caclient.WithAuthKey("0739a645a7d6601d9d45f6b237c4edeadad904f2fce53625dfdd541ec4fc8134"), - ) - ex, err := c.NewExchanger(&spiffe.IDGIdentity{ - SiteID: "test_site", - ClusterID: "cluster_test", - UniqueID: "server1", - }) - if err != nil { - return errors.Wrap(err, "Exchanger 初始化失败") - } - tlsCfg, err := ex.ServerTLSConfig() - if err != nil { - panic(err) - } - go func() { - httpsServer(tlsCfg.TLSConfig()) - }() - // 启动证书轮换 - go ex.RotateController().Run() - go func() { - <-time.After(15 * time.Second) - logger.Infof("手动触发证书失效") - ex.Transport.ManualRevoke() - ex.Transport.RefreshKeys() - logger.Infof("证书轮换完成") - // ex.RevokeItSelf() - }() - util.ExtractCertFromExchanger(ex) - return nil -} - -func httpsServer(cfg *tls.Config) { - ln, err := net.Listen("tcp4", "0.0.0.0:8082") - if err != nil { - panic(err) - } - - defer ln.Close() - - lnTls := tls.NewListener(ln, cfg) - - if err := fasthttp.Serve(lnTls, func(ctx *fasthttp.RequestCtx) { - str := ctx.Request.String() - logger.Info("Recv: ", str) - ctx.SetStatusCode(200) - ctx.SetBody([]byte("Hello")) - }); err != nil { - panic(err) - } -} - -func revokeCert(ex *caclient.Exchanger) { - ex.Transport.RefreshKeys() -} diff --git a/pkg/caclient/examples/client/client.go b/pkg/caclient/examples/client/client.go index b3c8c10..90a605a 100644 --- a/pkg/caclient/examples/client/client.go +++ b/pkg/caclient/examples/client/client.go @@ -5,9 +5,9 @@ import ( "flag" "fmt" "github.com/pkg/errors" - "gitlab.oneitfarm.com/bifrost/capitalizone/pkg/caclient" - "gitlab.oneitfarm.com/bifrost/capitalizone/pkg/spiffe" - logger "gitlab.oneitfarm.com/bifrost/cilog/v2" + "github.com/ztalab/ZACA/pkg/caclient" + "github.com/ztalab/ZACA/pkg/logger" + "github.com/ztalab/ZACA/pkg/spiffe" "go.uber.org/zap/zapcore" "io/ioutil" "net/http" @@ -30,7 +30,7 @@ func init() { func main() { flag.Parse() - client, err := NewSidecarMTLSClient() + client, err := NewMTLSClient() if err != nil { logger.Fatalf("Client init error: %v", err) } @@ -44,18 +44,18 @@ func main() { continue } body, _ := ioutil.ReadAll(resp.Body) - logger.Infof("请求结果: %v, %s", resp.StatusCode, body) + logger.Infof("Request result: %v, %s", resp.StatusCode, body) } } -// mTLS Client 使用示例 -func NewSidecarMTLSClient() (*http.Client, error) { +// mTLS Client Use example +func NewMTLSClient() (*http.Client, error) { l, _ := logger.NewZapLogger(&logger.Conf{ // Level: 2, Level: -1, }) c := caclient.NewCAI( - caclient.WithCAServer(caclient.RoleSidecar, *caAddr), + caclient.WithCAServer(caclient.RoleDefault, *caAddr), caclient.WithAuthKey(authKey), caclient.WithOcspAddr(*ocspAddr), caclient.WithLogger(l), @@ -66,7 +66,7 @@ func NewSidecarMTLSClient() (*http.Client, error) { UniqueID: "client1", }) if err != nil { - return nil, errors.Wrap(err, "Exchanger 初始化失败") + return nil, errors.Wrap(err, "Exchanger initialization failed") } cfger, err := ex.ClientTLSConfig("supreme") if err != nil { @@ -79,11 +79,10 @@ func NewSidecarMTLSClient() (*http.Client, error) { tlsCfg := cfger.TLSConfig() //tlsCfg.VerifyConnection = func(state tls.ConnectionState) error { // cert := state.PeerCertificates[0] - // fmt.Println("服务器证书生成时间: ", cert.NotBefore.String()) + // fmt.Println("Server certificate generation time: ", cert.NotBefore.String()) // return nil //} client := httpClient(tlsCfg) - // 启动证书轮换 go ex.RotateController().Run() // util.ExtractCertFromExchanger(ex) @@ -92,7 +91,7 @@ func NewSidecarMTLSClient() (*http.Client, error) { panic(err) } - fmt.Println("baidu 测试: ", resp.StatusCode) + fmt.Println("baidu test: ", resp.StatusCode) return client, nil } diff --git a/pkg/caclient/examples/fakeclient/client.go b/pkg/caclient/examples/fakeclient/client.go deleted file mode 100644 index fa000a9..0000000 --- a/pkg/caclient/examples/fakeclient/client.go +++ /dev/null @@ -1,102 +0,0 @@ -package main - -import ( - "crypto/tls" - "flag" - "fmt" - "github.com/pkg/errors" - "gitlab.oneitfarm.com/bifrost/capitalizone/pkg/caclient" - "gitlab.oneitfarm.com/bifrost/capitalizone/pkg/spiffe" - "gitlab.oneitfarm.com/bifrost/cfssl/hook" - logger "gitlab.oneitfarm.com/bifrost/cilog/v2" - "io/ioutil" - "net/http" - "os" - "time" -) - -func main() { - flag.Parse() - Start(func() { - client, err := NewFakeMTLSClient() - if err != nil { - logger.Fatalf("Client init error: %v", err) - } - ticker := time.Tick(time.Second) - for i := 0; i < 100; i++ { - <-ticker - - resp, err := client.Get("http://127.0.0.1:8082") - if err != nil { - logger.With("resp", resp).Error(err) - continue - } - body, _ := ioutil.ReadAll(resp.Body) - logger.Infof("请求结果: %v, %s", resp.StatusCode, body) - } - }) -} - -// mTLS Client 使用示例 -func NewFakeMTLSClient() (*http.Client, error) { - c := caclient.NewCAI( - caclient.WithCAServer(caclient.RoleSidecar, "https://127.0.0.1:8081"), - caclient.WithOcspAddr("http://127.0.0.1:8082")) - ex, err := c.NewExchanger(&spiffe.IDGIdentity{ - SiteID: "test_site", - ClusterID: "cluster_test", - UniqueID: "client1", - }) - if err != nil { - return nil, errors.Wrap(err, "Exchanger 初始化失败") - } - cfgg, err := ex.ClientTLSConfig("") - tlsCfg := cfgg.TLSConfig() - tlsCfg.InsecureSkipVerify = true - tlsCfg.VerifyPeerCertificate = nil - tlsCfg.VerifyConnection = func(state tls.ConnectionState) error { - cert := state.PeerCertificates[0] - fmt.Println("服务器证书生成时间: ", cert.NotBefore.String()) - return nil - } - client := httpClient(tlsCfg) - // 启动证书轮换 - go ex.RotateController().Run() - // util.ExtractCertFromExchanger(ex) - return client, nil -} - -func httpClient(cfg *tls.Config) *http.Client { - client := http.Client{ - Transport: &http.Transport{ - TLSClientConfig: cfg, - MaxIdleConns: 50, - MaxIdleConnsPerHost: 50, - }, - } - return &client -} - -func Start(f func()) { - hook.ClientInsecureSkipVerify = true - os.Chdir("./../../../../") - os.Setenv("IS_ENV", "test") - //cli.Start(func(i *core.I) error { - // // CA Start - // go func() { - // err := singleca.Server() - // if err != nil { - // i.Logger.Fatal(err) - // } - // }() - // return nil - //}, func(i *core.I) error { - // time.Sleep(2 * time.Second) - // - // f() - // - // os.Exit(0) - // return nil - //}) - -} diff --git a/pkg/caclient/examples/gatekeeper/main.go b/pkg/caclient/examples/gatekeeper/main.go deleted file mode 100644 index 73717cf..0000000 --- a/pkg/caclient/examples/gatekeeper/main.go +++ /dev/null @@ -1,159 +0,0 @@ -package main - -import ( - "crypto" - "crypto/x509/pkix" - "flag" - "fmt" - "github.com/davecgh/go-spew/spew" - "gitlab.oneitfarm.com/bifrost/capitalizone/pkg/attrmgr" - "gitlab.oneitfarm.com/bifrost/capitalizone/pkg/caclient" - "gitlab.oneitfarm.com/bifrost/capitalizone/pkg/keygen" - "gitlab.oneitfarm.com/bifrost/capitalizone/pkg/spiffe" - "gitlab.oneitfarm.com/bifrost/cfssl/helpers" - logger "gitlab.oneitfarm.com/bifrost/cilog/v2" - "go.uber.org/zap/zapcore" - "golang.org/x/crypto/ocsp" -) - -var ( - caAddr = flag.String("ca", "https://127.0.0.1:8081", "CA Server") - ocspAddr = flag.String("ocsp", "http://capitalizone-tls.msp:8082", "Ocsp Server") -) - -var ( - keyPEM []byte - certPEM []byte -) - -func init() { - logger.GlobalConfig(logger.Conf{ - Debug: true, - Level: zapcore.DebugLevel, - }) -} - -func main() { - - flag.Parse() - generateCert() - getCertAttr() - //httpServer() -} - -func generateCert() { - cai := caclient.NewCAI( - caclient.WithCAServer(caclient.RoleGatekeeper, *caAddr), - caclient.WithOcspAddr(*ocspAddr), - caclient.WithAuthKey("1fb4d8144367a1cdc59500a2e81f7902a4cd5da4a1f1b2211eff42202b5b70e8"), - caclient.WithLogger(logger.N())) - cm, err := cai.NewCertManager() - if err != nil { - panic(err) - } - _, keyPEM, _ = keygen.GenKey(keygen.EcdsaSigAlg) - fmt.Println("keyPEM:\n", string(keyPEM)) - // attr - mgr := attrmgr.New() - ext, _ := mgr.ToPkixExtension(&attrmgr.Attributes{ - Attrs: map[string]interface{}{ - "allow_site": []string{"*"}, - "inbound_port": 5092, - "socks5": []string{"*"}, - "tunnel": map[string]string{}, - "websocket": []string{"*"}, - "websocket_port": 5091, - "type": "server", - - //"type": "client", - //"site@test.zsnb.xyz:443": map[string]map[int][]string{ - // "websocket": map[int][]string{ - // 48080: []string{"*"}, - // }, - //}, - }, - }) - - // gen csr - csrPEM, _ := keygen.GenCustomExtendCSR(keyPEM, &spiffe.IDGIdentity{ - SiteID: "site", - ClusterID: "cluster", - UniqueID: "gatekeeper", - }, &keygen.CertOptions{ - CN: "gatekeeper", - //Host: "test.zsnb.xyz", - Host: "site@test.zsnb.xyz:443", - }, []pkix.Extension{ext}) - - fmt.Println("CSR:\n", string(csrPEM)) - - // get cert - certPEM, err = cm.SignPEM(csrPEM, "gatekeeper") - if err != nil { - panic(err) - } - fmt.Println("CERT:\n", string(certPEM)) - - caCert, err := cm.CACertsPEM() - if err != nil { - panic(err) - } - fmt.Println("caCert:\n", string(caCert)) -} - -func getCertAttr() { - mgr := attrmgr.New() - cert, err := helpers.ParseCertificatePEM(certPEM) - if err != nil { - panic(err) - } - attr, err := mgr.GetAttributesFromCert(cert) - if err != nil { - panic(err) - } - - spew.Dump(attr) -} - -func httpServer() { - fmt.Println("get ca client...") - cai := caclient.NewCAI( - caclient.WithCAServer(caclient.RoleGatekeeper, *caAddr), - caclient.WithOcspAddr(*ocspAddr), - caclient.WithAuthKey("1fb4d8144367a1cdc59500a2e81f7902a4cd5da4a1f1b2211eff42202b5b70e8"), - caclient.WithLogger(logger.N())) - fmt.Println("get keypair ...") - ex, err := cai.NewExchangerWithKeypair(&spiffe.IDGIdentity{}, keyPEM, certPEM) - if err != nil { - panic(err) - } - fmt.Println("get cert...") - tlsCert, err := ex.Transport.GetCertificate() - if err != nil { - panic(err) - } - cm, err := cai.NewCertManager() - if err != nil { - panic(err) - } - caCert, err := cm.CACert() - if err != nil { - panic(err) - } - fmt.Println("ocsp validate...") - ocspReq, _ := ocsp.CreateRequest(tlsCert.Leaf, caCert, &ocsp.RequestOptions{ - Hash: crypto.SHA1, - }) - ocspResp, err := caclient.SendOcspRequest("http://ocspv2.gw108.oneitfarm.com", ocspReq, tlsCert.Leaf, caCert) - if err != nil { - panic(err) - } - fmt.Println("OCSP Status: ", ocspResp.Status) - fmt.Println("get tls config...") - tlsCfger, err := ex.ServerTLSConfig() - if err != nil { - panic(err) - } - tlsCfg := tlsCfger.TLSConfig() - fmt.Printf("tls config: %#v", tlsCfg) -} diff --git a/pkg/caclient/examples/logger/logger.go b/pkg/caclient/examples/logger/logger.go deleted file mode 100644 index f6a073e..0000000 --- a/pkg/caclient/examples/logger/logger.go +++ /dev/null @@ -1,10 +0,0 @@ -package main - -import ( - _ "gitlab.oneitfarm.com/bifrost/capitalizone/pkg/caclient" - "log" -) - -func main() { - log.Println("test2") -} diff --git a/pkg/caclient/examples/sentry-build/main.go b/pkg/caclient/examples/sentry-build/main.go deleted file mode 100644 index 1e73a67..0000000 --- a/pkg/caclient/examples/sentry-build/main.go +++ /dev/null @@ -1,88 +0,0 @@ -package main - -import ( - "crypto/x509/pkix" - "fmt" - - "github.com/spf13/pflag" - "gitlab.oneitfarm.com/bifrost/capitalizone/pkg/attrmgr" - "gitlab.oneitfarm.com/bifrost/capitalizone/pkg/caclient" - "gitlab.oneitfarm.com/bifrost/capitalizone/pkg/keygen" - "gitlab.oneitfarm.com/bifrost/capitalizone/pkg/spiffe" -) - -var ( - caURL = pflag.String("ca-url", "https://127.0.0.1:8081", "CA URL") - token = pflag.String("token", "1fb4d8144367a1cdc59500a2e81f7902a4cd5da4a1f1b2211eff42202b5b70e8", "Auth Token") -) - -// PEM -var ( - caPEM string - certPEM string - keyPEM string -) - -func init() { - pflag.Parse() -} - -func main() { - sign() -} - -func sign() error { - cai := caclient.NewCAI( - caclient.WithCAServer(caclient.RoleGatekeeper /*哨兵*/, *caURL), - caclient.WithAuthKey(*token), - ) - - mgr, err := cai.NewCertManager() - if err != nil { - return err - } - - // CA PEM - caPEMBytes, err := mgr.CACertsPEM() - if err != nil { - return err - } - caPEM = string(caPEMBytes) - - fmt.Println("certs pem: \n", caPEM) - - // KEY PEM - _, keyPEMBytes, _ := keygen.GenKey(keygen.EcdsaSigAlg) - - // 证书扩展字段 - attr := attrmgr.New() - ext, _ := attr.ToPkixExtension(&attrmgr.Attributes{ - // 注入参数 Map[string]interface{} - Attrs: map[string]interface{}{ - "k1": "v1", - "k2": "v2", - }, - }) - - // gen csr - csrPEM, _ := keygen.GenCustomExtendCSR(keyPEMBytes, &spiffe.IDGIdentity{ - SiteID: "site", /* Site 标识 */ - ClusterID: "cluster", - UniqueID: "gatekeeper", - }, &keygen.CertOptions{ /* 通常为固定值 */ - CN: "msp.sentry", - Host: "msp.sentry,127.0.0.1", - }, []pkix.Extension{ext} /* 注入扩展字段 */) - - fmt.Println("CSR PEM:\n", string(csrPEM)) - - // get cert - certPEMBytes, err := mgr.SignPEM(csrPEM, "sentry") - if err != nil { - panic(err) - } - certPEM = string(certPEMBytes) - fmt.Println("CERT:\n", certPEM) - - return nil -} diff --git a/pkg/caclient/examples/server/server.go b/pkg/caclient/examples/server/server.go index 28cbffe..aac1c1b 100644 --- a/pkg/caclient/examples/server/server.go +++ b/pkg/caclient/examples/server/server.go @@ -4,21 +4,21 @@ import ( "crypto/tls" "flag" "fmt" - "gitlab.oneitfarm.com/bifrost/capitalizone/pkg/caclient/examples/util" + "github.com/ztalab/ZACA/pkg/caclient/examples/util" "net" "github.com/pkg/errors" "github.com/valyala/fasthttp" - "gitlab.oneitfarm.com/bifrost/capitalizone/pkg/caclient" - "gitlab.oneitfarm.com/bifrost/capitalizone/pkg/keygen" - "gitlab.oneitfarm.com/bifrost/capitalizone/pkg/spiffe" - logger "gitlab.oneitfarm.com/bifrost/cilog/v2" + "github.com/ztalab/ZACA/pkg/caclient" + "github.com/ztalab/ZACA/pkg/keygen" + "github.com/ztalab/ZACA/pkg/logger" + "github.com/ztalab/ZACA/pkg/spiffe" "go.uber.org/zap/zapcore" ) var ( - caAddr = flag.String("ca", "https://192.168.2.80:8381", "CA Server") - ocspAddr = flag.String("ocsp", "http://192.168.2.80:8382", "Ocsp Server") + caAddr = flag.String("ca", "https://127.0.0.1:8081", "CA Server") + ocspAddr = flag.String("ocsp", "http://127.0.0.1:8382", "Ocsp Server") addr = flag.String("addr", ":6066", "") authKey = "0739a645a7d6601d9d45f6b237c4edeadad904f2fce53625dfdd541ec4fc8134" ) @@ -32,21 +32,21 @@ func init() { func main() { flag.Parse() - err := NewSidecarMTLSServer() + err := NewMTLSServer() if err != nil { logger.Fatal(err) } select {} } -// NewSidecarMTLSServer mTLS Server 使用示例 -func NewSidecarMTLSServer() error { +// NewMTLSServer mTLS Server Use example +func NewMTLSServer() error { l, _ := logger.NewZapLogger(&logger.Conf{ // Level: 2, Level: 0, }) c := caclient.NewCAI( - caclient.WithCAServer(caclient.RoleSidecar, *caAddr), + caclient.WithCAServer(caclient.RoleDefault, *caAddr), caclient.WithOcspAddr(*ocspAddr), caclient.WithAuthKey(authKey), caclient.WithLogger(l), @@ -61,10 +61,10 @@ func NewSidecarMTLSServer() error { UniqueID: "server1", }) if err != nil { - return errors.Wrap(err, "Exchanger 初始化失败") + return errors.Wrap(err, "Exchanger initialization failed") } - // 启动证书轮换 + // Start certificate rotation go ex.RotateController().Run() cfger, err := ex.ServerTLSConfig() diff --git a/pkg/caclient/examples/util/cert_parser.go b/pkg/caclient/examples/util/cert_parser.go index 8b61c44..e48a69f 100644 --- a/pkg/caclient/examples/util/cert_parser.go +++ b/pkg/caclient/examples/util/cert_parser.go @@ -4,22 +4,22 @@ import ( "crypto/x509" "encoding/pem" "fmt" - "gitlab.oneitfarm.com/bifrost/capitalizone/pkg/caclient" - "gitlab.oneitfarm.com/bifrost/cfssl/helpers" - v2log "gitlab.oneitfarm.com/bifrost/cilog/v2" + "github.com/ztalab/ZACA/pkg/caclient" + "github.com/ztalab/ZACA/pkg/logger" + "github.com/ztalab/cfssl/helpers" ) func ExtractCertFromExchanger(ex *caclient.Exchanger) { - logger := v2log.Named("keypair-exporter") + logger := logger.Named("keypair-exporter") tlsCert, err := ex.Transport.GetCertificate() if err != nil { - logger.Errorf("TLS 证书获取失败: %v", err) + logger.Errorf("TLS Certificate acquisition failed: %v", err) return } cert := helpers.EncodeCertificatePEM(tlsCert.Leaf) keyBytes, err := x509.MarshalPKCS8PrivateKey(tlsCert.PrivateKey) if err != nil { - logger.Errorf("TLS 证书 Private Key 获取失败: %v", err) + logger.Errorf("TLS certificate private key acquisition failed: %v", err) return } @@ -31,19 +31,19 @@ func ExtractCertFromExchanger(ex *caclient.Exchanger) { trustCerts := ex.Transport.TrustStore.Certificates() caCerts := make([][]byte, 0, len(trustCerts)) - fmt.Println("--- CA 证书 Stared ---") + fmt.Println("--- CA Certificate Stared ---") for _, caCert := range trustCerts { caCertBytes := helpers.EncodeCertificatePEM(caCert) caCerts = append(caCerts, caCertBytes) fmt.Println("---\n", string(caCertBytes), "\n---") } - fmt.Println("--- CA 证书 End ---") + fmt.Println("--- CA Certificate End ---") fmt.Println() fmt.Println() fmt.Println() fmt.Println() fmt.Println() - fmt.Println("--- 私钥 Stared ---\n", string(key), "\n--- 私钥 End ---") - fmt.Println("--- 证书 Stared ---\n", string(cert), "\n--- 证书 End ---") + fmt.Println("--- Private key Stared ---\n", string(key), "\n--- Private key End ---") + fmt.Println("--- Certificate Stared ---\n", string(cert), "\n--- Certificate End ---") } diff --git a/pkg/caclient/exchanger.go b/pkg/caclient/exchanger.go index bda0a0f..f4c34df 100644 --- a/pkg/caclient/exchanger.go +++ b/pkg/caclient/exchanger.go @@ -3,18 +3,18 @@ package caclient import ( "github.com/cloudflare/backoff" "github.com/pkg/errors" - "gitlab.oneitfarm.com/bifrost/capitalizone/pkg/keyprovider" - "gitlab.oneitfarm.com/bifrost/capitalizone/pkg/spiffe" - "gitlab.oneitfarm.com/bifrost/cfssl/hook" - "gitlab.oneitfarm.com/bifrost/cfssl/transport" - "gitlab.oneitfarm.com/bifrost/cfssl/transport/roots" + "github.com/ztalab/ZACA/pkg/keyprovider" + "github.com/ztalab/ZACA/pkg/spiffe" + "github.com/ztalab/cfssl/hook" + "github.com/ztalab/cfssl/transport" + "github.com/ztalab/cfssl/transport/roots" "go.uber.org/zap" "net/url" "reflect" ) const ( - // CertRefreshDurationRate 证书轮回时间率 + // CertRefreshDurationRate Certificate cycle time rate CertRefreshDurationRate int = 2 ) @@ -31,7 +31,7 @@ type Exchanger struct { } func init() { - // CFSSL API Client 连接 API Server 不进行证书验证 (单向 TLS) + // Cfssl API client connects to API server without certificate verification (one-way TLS) hook.ClientInsecureSkipVerify = true } @@ -81,10 +81,10 @@ func (cai *CAInstance) NewExchanger(id *spiffe.IDGIdentity) (*Exchanger, error) func (cai *CAInstance) NewTransport(id *spiffe.IDGIdentity, keyPEM []byte, certPEM []byte) (*Transport, error) { l := cai.Logger.Sugar() - l.Debug("NewTransport 开始") + l.Debug("NewTransport Start") if _, err := url.Parse(cai.CaAddr); err != nil { - return nil, errors.Wrap(err, "CA ADDR 错误") + return nil, errors.Wrap(err, "CA ADDR Error") } var tr = &Transport{ @@ -94,18 +94,17 @@ func (cai *CAInstance) NewTransport(id *spiffe.IDGIdentity, keyPEM []byte, certP logger: l.Named("ca"), } - l.Debugf("[NEW]: 证书轮换率: %v", tr.CertRefreshDurationRate) + l.Debugf("[NEW]: Certificate rotation rate: %v", tr.CertRefreshDurationRate) - l.Debug("roots 初始化") + l.Debug("roots Initialization") store, err := roots.New(cai.CFIdentity.Roots) if err != nil { return nil, err } tr.TrustStore = store - l.Debug("client roots 初始化") + l.Debug("client roots Initialization") if len(cai.CFIdentity.ClientRoots) > 0 { - // 如果 cai.CFIdentity.Roots 与cai.CFIdentity.ClientRoots 相同,则不重复请求 if !reflect.DeepEqual(cai.CFIdentity.Roots, cai.CFIdentity.ClientRoots) { store, err = roots.New(cai.CFIdentity.ClientRoots) if err != nil { @@ -116,7 +115,7 @@ func (cai *CAInstance) NewTransport(id *spiffe.IDGIdentity, keyPEM []byte, certP tr.ClientTrustStore = store } - l.Debug("xkeyProvider 初始化") + l.Debug("xkeyProvider Initialization") xkey, err := keyprovider.NewXKeyProvider(id) if err != nil { return nil, err @@ -124,18 +123,18 @@ func (cai *CAInstance) NewTransport(id *spiffe.IDGIdentity, keyPEM []byte, certP xkey.CSRConf = cai.CSRConf if keyPEM != nil && certPEM != nil { - l.Debug("xkeyProvider 设置 keyPEM") + l.Debug("xkeyProvider set up keyPEM") if err := xkey.SetPrivateKeyPEM(keyPEM); err != nil { return nil, err } - l.Debug("xkeyProvider 设置 certPEM") + l.Debug("xkeyProvider set up certPEM") if err := xkey.SetCertificatePEM(certPEM); err != nil { return nil, err } } tr.Provider = xkey - l.Debug("CA 初始化") + l.Debug("CA Initialization") tr.CA, err = transport.NewCA(cai.CFIdentity) if err != nil { return nil, err diff --git a/pkg/caclient/http.go b/pkg/caclient/http.go index 3b7c09a..47ab37e 100644 --- a/pkg/caclient/http.go +++ b/pkg/caclient/http.go @@ -14,5 +14,5 @@ var httpClient = http.Client{ MaxIdleConns: 50, MaxIdleConnsPerHost: 50, }, - Timeout: 1 * time.Second, // 超时时间 + Timeout: 1 * time.Second, } diff --git a/pkg/caclient/logger.go b/pkg/caclient/logger.go index ad42a13..4d48e5d 100644 --- a/pkg/caclient/logger.go +++ b/pkg/caclient/logger.go @@ -3,12 +3,12 @@ package caclient import ( "log" - v2log "gitlab.oneitfarm.com/bifrost/cilog/v2" + "github.com/ztalab/ZACA/pkg/logger" "go.uber.org/zap" ) func init() { - f := zap.RedirectStdLog(v2log.S().Desugar()) + f := zap.RedirectStdLog(logger.S().Desugar()) f() log.SetFlags(log.LstdFlags) } diff --git a/pkg/caclient/ocsp_impl.go b/pkg/caclient/ocsp_impl.go index a10081e..74263d8 100644 --- a/pkg/caclient/ocsp_impl.go +++ b/pkg/caclient/ocsp_impl.go @@ -13,7 +13,7 @@ import ( "golang.org/x/sync/singleflight" "github.com/pkg/errors" - v2log "gitlab.oneitfarm.com/bifrost/cilog/v2" + "github.com/ztalab/ZACA/pkg/logger" ) var ocspBlockSign int64 = 0 @@ -44,8 +44,8 @@ func SendOcspRequest(server string, req []byte, leaf, issuer *x509.Certificate) defer resp.Body.Close() if resp.StatusCode != http.StatusOK { - v2log.With("url", server, v2log.DynFieldErrCode, resp.Status, "body", string(body)). - Warnf("请求错误") + logger.With("url", server, resp.Status, "body", string(body)). + Warnf("Request error") return nil, fmt.Errorf("ocsp response err: %v", resp.Status) } @@ -64,19 +64,19 @@ func SendOcspRequest(server string, req []byte, leaf, issuer *x509.Certificate) parsedOcspResp, err := ocsp.ParseResponseForCert(body, leaf, issuer) if err != nil { - v2log.With("body", string(body)).Errorf("ocsp 解析错误: %v", err) - return nil, errors.Wrap(err, "ocsp 解析错误") + logger.With("body", string(body)).Errorf("ocsp Parsing error: %v", err) + return nil, errors.Wrap(err, "ocsp Parsing error") } return parsedOcspResp, nil } -// BlockOcspRequests 阻止 Ocsp 请求, 会导致 mTLS 握手失败 +// BlockOcspRequests Blocking OCSP requests will cause the MTLs handshake to fail func BlockOcspRequests() { atomic.StoreInt64(&ocspBlockSign, 1) } -// AllowOcspRequests 允许 Ocsp 请求 +// AllowOcspRequests func AllowOcspRequests() { atomic.StoreInt64(&ocspBlockSign, 0) } diff --git a/pkg/caclient/ocsp_interface.go b/pkg/caclient/ocsp_interface.go index e149a79..c46ea9b 100644 --- a/pkg/caclient/ocsp_interface.go +++ b/pkg/caclient/ocsp_interface.go @@ -2,7 +2,7 @@ package caclient import "crypto/x509" -// OcspClient Ocsp 客户端 +// OcspClient Ocsp Client type OcspClient interface { Validate(leaf, issuer *x509.Certificate) (bool, error) Reset() diff --git a/pkg/caclient/ocsp_mem_cache.go b/pkg/caclient/ocsp_mem_cache.go index 0cf2cba..0265093 100644 --- a/pkg/caclient/ocsp_mem_cache.go +++ b/pkg/caclient/ocsp_mem_cache.go @@ -10,7 +10,7 @@ import ( "github.com/pkg/errors" "golang.org/x/crypto/ocsp" - "gitlab.oneitfarm.com/bifrost/go-toolbox/memorycacher" + "github.com/ztalab/ZACA/pkg/memorycacher" "go.uber.org/zap" ) @@ -35,42 +35,42 @@ func NewOcspMemCache(logger *zap.SugaredLogger, ocspAddr string) (OcspClient, er // Validate ... func (of *ocspMemCache) Validate(leaf, issuer *x509.Certificate) (bool, error) { if atomic.LoadInt64(&ocspBlockSign) == 1 { - return false, errors.New("ocsp 请求被禁用") + return false, errors.New("ocsp Request disabled") } if leaf == nil || issuer == nil { - return false, errors.New("leaf/issuer 参数缺失") + return false, errors.New("leaf/issuer Missing parameter") } lo := of.logger.With("sn", leaf.SerialNumber.String(), "aki", hex.EncodeToString(leaf.AuthorityKeyId), "id", leaf.URIs[0]) - // 缓存获取 + // Cache fetch if _, ok := of.cache.Get(leaf.SerialNumber.String()); ok { return true, nil } ocspRequest, err := ocsp.CreateRequest(leaf, issuer, &ocspOpts) if err != nil { lo.Errorf("ocsp req create err: %s", err) - return false, errors.Wrap(err, "ocsp req 创建失败") + return false, errors.Wrap(err, "ocsp req Creation failed") } getOcspFunc := func() (interface{}, error) { return SendOcspRequest(of.ocspURL, ocspRequest, leaf, issuer) } sgValue, err, _ := sg.Do("ocsp"+leaf.SerialNumber.String(), getOcspFunc) if err != nil { - lo.Errorf("ocsp 请求错误: %v", err) - // 这里因为 Ca Server 原因导致验证失败, 允许请求, 下一次再重试 - return true, errors.Wrap(err, "ocsp 请求错误") + lo.Errorf("ocsp Request error: %v", err) + // Here, the authentication fails due to CA server. The request is allowed. Try again next time + return true, errors.Wrap(err, "ocsp Request error") } ocspResp, ok := sgValue.(*ocsp.Response) if !ok { - lo.Error("single flight 解析错误") - return false, errors.New("single flight 解析错误") + lo.Error("single flight Parsing error") + return false, errors.New("single flight Parsing error") } - lo.Debugf("验证 OCSP, 结果: %v", ocspResp.Status) + lo.Debugf("Verify OCSP and the results: %v", ocspResp.Status) if ocspResp.Status == int(ocsp.Success) { of.cache.SetDefault(leaf.SerialNumber.String(), true) return true, nil } - lo.Warnf("证书 OCSP 验证失效") - return false, errors.New("ocsp 验证失败, 证书被吊销") + lo.Warnf("Certificate OCSP validation invalid") + return false, errors.New("ocsp Authentication failed and the certificate was revoked") } func (of *ocspMemCache) Reset() { diff --git a/pkg/caclient/revoke.go b/pkg/caclient/revoke.go index ec8522f..3711ca0 100644 --- a/pkg/caclient/revoke.go +++ b/pkg/caclient/revoke.go @@ -10,7 +10,7 @@ import ( "github.com/pkg/errors" jsoniter "github.com/json-iterator/go" - "gitlab.oneitfarm.com/bifrost/capitalizone/pkg/signature" + "github.com/ztalab/ZACA/pkg/signature" ) var revokePath = "/api/v1/cfssl/revoke" @@ -26,7 +26,7 @@ type RevokeRequest struct { Profile string `json:"profile"` } -// RevokeItSelf 吊销自身证书 +// RevokeItSelf Revoke one's own certificate func (ex *Exchanger) RevokeItSelf() error { tlsCert, err := ex.Transport.GetCertificate() if err != nil { @@ -38,7 +38,7 @@ func (ex *Exchanger) RevokeItSelf() error { if err := revokeCert(ex.caAddr, priv, cert); err != nil { return err } - ex.logger.With("sn", cert.SerialNumber.String()).Info("服务下线吊销自身证书") + ex.logger.With("sn", cert.SerialNumber.String()).Info("Service offline revoking its own certificate") return nil } @@ -60,7 +60,7 @@ func revokeCert(caAddr string, priv crypto.PublicKey, cert *x509.Certificate) er req := &RevokeRequest{ Serial: cert.SerialNumber.String(), AKI: hex.EncodeToString(cert.AuthorityKeyId), - Reason: "", // 默认为 0 + Reason: "", Nonce: nonce, Sign: sign, } @@ -71,12 +71,12 @@ func revokeCert(caAddr string, priv crypto.PublicKey, cert *x509.Certificate) er resp, err := httpClient.Post(caAddr+revokePath, "application/json", buf) if err != nil { - return errors.Wrap(err, "请求错误") + return errors.Wrap(err, "Request error") } defer resp.Body.Close() if resp.StatusCode != http.StatusOK { - return errors.New("请求错误") + return errors.New("Request error") } return nil diff --git a/pkg/caclient/rorate_controller.go b/pkg/caclient/rorate_controller.go index 152a92c..334ce1f 100644 --- a/pkg/caclient/rorate_controller.go +++ b/pkg/caclient/rorate_controller.go @@ -3,8 +3,7 @@ package caclient import ( "time" - "gitlab.oneitfarm.com/bifrost/capitalizone/pkg/kutil/wait" - "gitlab.oneitfarm.com/bifrost/cfssl/transport/roots" + "github.com/ztalab/cfssl/transport/roots" "go.uber.org/zap" ) @@ -16,7 +15,6 @@ type RotateController struct { } // Run ... -// TODO CA 证书定时更换、CFSSL Info 接口返回值 RootCerts 更改为 Map[string]string、SPIFFE 认证结构调整 func (rc *RotateController) Run() { log := rc.logger ticker := time.NewTicker(60 * time.Minute) @@ -26,10 +24,10 @@ func (rc *RotateController) Run() { for { select { case <-ticker.C: - // 自动更新证书 + // Automatically update certificates err := rc.transport.AutoUpdate() if err != nil { - log.Errorf("证书轮换失败: %v", err) + log.Errorf("Certificate rotation failed: %v", err) } rc.AddCert() } @@ -38,27 +36,20 @@ func (rc *RotateController) Run() { func (rc *RotateController) AddCert() { log := rc.logger - _ = wait.ExponentialBackoff(wait.Backoff{ - Steps: 5, - Duration: 1 * time.Second, - Factor: 3, - Jitter: 0.1, - }, func() (done bool, err error) { - store, err := roots.New(rc.transport.Identity.Roots) - if err != nil { - log.Errorf("获取 roots 失败: %v", err) - return false, nil - } - rc.transport.TrustStore.AddCerts(store.Certificates()) + store, err := roots.New(rc.transport.Identity.Roots) + if err != nil { + log.Errorf("Failed to get roots: %v", err) + return + } + rc.transport.TrustStore.AddCerts(store.Certificates()) - if len(rc.transport.Identity.ClientRoots) > 0 { - store, err = roots.New(rc.transport.Identity.ClientRoots) - if err != nil { - log.Errorf("获取 client roots 失败: %v", err) - return false, nil - } - rc.transport.ClientTrustStore.AddCerts(store.Certificates()) + if len(rc.transport.Identity.ClientRoots) > 0 { + store, err = roots.New(rc.transport.Identity.ClientRoots) + if err != nil { + log.Errorf("Failed to get client roots: %v", err) + return } - return true, nil - }) + rc.transport.ClientTrustStore.AddCerts(store.Certificates()) + } + return } diff --git a/pkg/caclient/test/bench_test.go b/pkg/caclient/test/bench_test.go index d8ca9fc..eeb4729 100644 --- a/pkg/caclient/test/bench_test.go +++ b/pkg/caclient/test/bench_test.go @@ -3,9 +3,9 @@ package test import ( "fmt" "github.com/valyala/fasthttp" - "gitlab.oneitfarm.com/bifrost/capitalizone/pkg/caclient" - "gitlab.oneitfarm.com/bifrost/capitalizone/pkg/spiffe" - cflog "gitlab.oneitfarm.com/bifrost/cfssl/log" + "github.com/ztalab/ZACA/pkg/caclient" + "github.com/ztalab/ZACA/pkg/spiffe" + cflog "github.com/ztalab/cfssl/log" "net" "testing" "time" @@ -26,7 +26,7 @@ func BenchmarkNormalHTTP(b *testing.B) { func BenchmarkHTTPS(b *testing.B) { cflog.Level = cflog.LevelDebug c := caclient.NewCAI( - caclient.WithCAServer(caclient.RoleSidecar, "https://127.0.0.1:8081"), + caclient.WithCAServer(caclient.RoleDefault, "https://127.0.0.1:8081"), caclient.WithOcspAddr("http://127.0.0.1:8082")) serverEx, err := c.NewExchanger(&spiffe.IDGIdentity{ SiteID: "test_site", @@ -39,12 +39,12 @@ func BenchmarkHTTPS(b *testing.B) { UniqueID: "client1", }) if err != nil { - b.Error("transport 错误: ", err) + b.Error("transport Error: ", err) } serverTls, err := serverEx.ServerTLSConfig() if err != nil { - b.Error("服务器 tls 获取错误: ", err) + b.Error("Server TLS get error: ", err) } clientTls, err := clientEx.ClientTLSConfig("127.0.0.1") if err != nil { @@ -71,7 +71,7 @@ func httpServer() { if err := fasthttp.Serve(ln, func(ctx *fasthttp.RequestCtx) { str := ctx.Request.String() - fmt.Println("服务器接收: ", str) + fmt.Println("Server reception: ", str) ctx.SetStatusCode(200) ctx.SetBody([]byte("AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA")) }); err != nil { diff --git a/pkg/caclient/test/cert_test.go b/pkg/caclient/test/cert_test.go index 95a3e6d..379084b 100644 --- a/pkg/caclient/test/cert_test.go +++ b/pkg/caclient/test/cert_test.go @@ -2,11 +2,11 @@ package test import ( "fmt" - "gitlab.oneitfarm.com/bifrost/capitalizone/pkg/caclient" - "gitlab.oneitfarm.com/bifrost/capitalizone/pkg/spiffe" - "gitlab.oneitfarm.com/bifrost/cfssl/helpers" - "gitlab.oneitfarm.com/bifrost/cfssl/hook" - cflog "gitlab.oneitfarm.com/bifrost/cfssl/log" + "github.com/ztalab/ZACA/pkg/caclient" + "github.com/ztalab/ZACA/pkg/spiffe" + "github.com/ztalab/cfssl/helpers" + "github.com/ztalab/cfssl/hook" + cflog "github.com/ztalab/cfssl/log" "testing" ) @@ -14,7 +14,7 @@ func TestCert(t *testing.T) { hook.ClientInsecureSkipVerify = true cflog.Level = -1 c := caclient.NewCAI( - caclient.WithCAServer(caclient.RoleSidecar, "https://127.0.0.1:8081"), + caclient.WithCAServer(caclient.RoleDefault, "https://127.0.0.1:8081"), caclient.WithOcspAddr("http://127.0.0.1:8082")) ex, err := c.NewExchanger(&spiffe.IDGIdentity{ SiteID: "test_site", diff --git a/pkg/caclient/test/main_test.go b/pkg/caclient/test/main_test.go index 1c6bc29..bf63840 100644 --- a/pkg/caclient/test/main_test.go +++ b/pkg/caclient/test/main_test.go @@ -1,7 +1,7 @@ package test import ( - "gitlab.oneitfarm.com/bifrost/cfssl/hook" + "github.com/ztalab/cfssl/hook" "os" "testing" ) @@ -28,4 +28,4 @@ func TestMain(m *testing.M) { // os.Exit(0) // return nil //}) -} \ No newline at end of file +} diff --git a/pkg/caclient/test/mtls_test.go b/pkg/caclient/test/mtls_test.go index e1dc9da..1ec2493 100644 --- a/pkg/caclient/test/mtls_test.go +++ b/pkg/caclient/test/mtls_test.go @@ -4,10 +4,10 @@ import ( "crypto/tls" "fmt" "github.com/valyala/fasthttp" - "gitlab.oneitfarm.com/bifrost/capitalizone/pkg/caclient" - "gitlab.oneitfarm.com/bifrost/capitalizone/pkg/spiffe" - "gitlab.oneitfarm.com/bifrost/cfssl/helpers" - cflog "gitlab.oneitfarm.com/bifrost/cfssl/log" + "github.com/ztalab/ZACA/pkg/caclient" + "github.com/ztalab/ZACA/pkg/spiffe" + "github.com/ztalab/cfssl/helpers" + cflog "github.com/ztalab/cfssl/log" "net" "net/http" "os" @@ -18,7 +18,7 @@ import ( func TestMTls(t *testing.T) { cflog.Level = cflog.LevelDebug c := caclient.NewCAI( - caclient.WithCAServer(caclient.RoleSidecar, "https://127.0.0.1:8081"), + caclient.WithCAServer(caclient.RoleDefault, "https://127.0.0.1:8081"), caclient.WithOcspAddr("http://127.0.0.1:8082")) serverEx, err := c.NewExchanger(&spiffe.IDGIdentity{ SiteID: "test_site", @@ -31,24 +31,24 @@ func TestMTls(t *testing.T) { UniqueID: "client1", }) if err != nil { - t.Error("transport 错误: ", err) + t.Error("transport Error: ", err) } serverTls, err := serverEx.ServerTLSConfig() if err != nil { - t.Error("服务器 tls 获取错误: ", err) + t.Error("Server TLS get error: ", err) } - fmt.Println("------------- 服务器信任证书 --------------") + fmt.Println("------------- Server trust certificate --------------") fmt.Println(string(helpers.EncodeCertificatesPEM(serverEx.Transport.ClientTrustStore.Certificates()))) - fmt.Println("------------- END 服务器信任证书 --------------") + fmt.Println("------------- END Server trust certificate --------------") clientTls, err := clientEx.ClientTLSConfig("") if err != nil { t.Error("client tls config get error: ", err) } - fmt.Println("------------- 客户端信任证书 --------------") + fmt.Println("------------- Client trust certificate --------------") fmt.Println(string(helpers.EncodeCertificatesPEM(clientEx.Transport.TrustStore.Certificates()))) - fmt.Println("------------- END 客户端信任证书 --------------") + fmt.Println("------------- END Client trust certificate --------------") go func() { httpsServer(serverTls.TLSConfig()) @@ -60,10 +60,10 @@ func TestMTls(t *testing.T) { for range messages { resp, err := client.Get("https://127.0.0.1:8082/test111111") if err != nil { - fmt.Fprint(os.Stderr, "请求失败: ", err) + fmt.Fprint(os.Stderr, "request was aborted: ", err) } - fmt.Println("请求成功: ", resp.Status) + fmt.Println("Request succeeded: ", resp.Status) } } @@ -90,7 +90,7 @@ func httpsServer(cfg *tls.Config) { if err := fasthttp.Serve(lnTls, func(ctx *fasthttp.RequestCtx) { str := ctx.Request.String() - fmt.Println("服务器接收: ", str) + fmt.Println("Server reception: ", str) ctx.SetStatusCode(200) ctx.SetBody([]byte("AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA")) }); err != nil { diff --git a/pkg/caclient/tls.go b/pkg/caclient/tls.go index 5eea389..a14eaaa 100644 --- a/pkg/caclient/tls.go +++ b/pkg/caclient/tls.go @@ -6,9 +6,9 @@ import ( "github.com/pkg/errors" - "gitlab.oneitfarm.com/bifrost/capitalizone/pkg/spiffe" - "gitlab.oneitfarm.com/bifrost/cfssl/transport/core" - logger "gitlab.oneitfarm.com/bifrost/cilog/v2" + "github.com/ztalab/ZACA/pkg/logger" + "github.com/ztalab/ZACA/pkg/spiffe" + "github.com/ztalab/cfssl/transport/core" ) // TLSGenerator ... @@ -21,13 +21,13 @@ func NewTLSGenerator(cfg *tls.Config) *TLSGenerator { return &TLSGenerator{Cfg: cfg} } -// ExtraValidator 自定义验证函数, 在验证证书成功后执行 +// ExtraValidator User defined verification function, which is executed after the certificate is verified successfully type ExtraValidator func(identity *spiffe.IDGIdentity) error -// BindExtraValidator 注册自定义验证函数 +// BindExtraValidator Register custom validation function func (tg *TLSGenerator) BindExtraValidator(validator ExtraValidator) { vc := func(state tls.ConnectionState) error { - // 若没有证书, 会在上一阶段被阻断 + // If there is no certificate, it will be blocked in the previous stage if len(state.PeerCertificates) == 0 { return nil } @@ -40,7 +40,7 @@ func (tg *TLSGenerator) BindExtraValidator(validator ExtraValidator) { } getServerTls := tg.Cfg.GetConfigForClient if getServerTls != nil { - // 服务端动态获取 + // Server dynamic acquisition tg.Cfg.GetConfigForClient = func(info *tls.ClientHelloInfo) (*tls.Config, error) { tlsCfg, err := getServerTls(info) if err != nil { @@ -54,7 +54,7 @@ func (tg *TLSGenerator) BindExtraValidator(validator ExtraValidator) { } } -// TLSConfig 获取 Golang 原生 TLS Config +// TLSConfig Get golang native TLS config func (tg *TLSGenerator) TLSConfig() *tls.Config { return tg.Cfg } @@ -64,7 +64,7 @@ func (ex *Exchanger) ClientTLSConfig(host string) (*TLSGenerator, error) { lo := ex.logger lo.Debug("client tls started.") if _, err := ex.Transport.GetCertificate(); err != nil { - return nil, errors.Wrap(err, "客户端证书获取错误") + return nil, errors.Wrap(err, "Client certificate acquisition error") } c, err := ex.Transport.TLSClientAuthClientConfig(host) if err != nil { @@ -74,7 +74,7 @@ func (ex *Exchanger) ClientTLSConfig(host string) (*TLSGenerator, error) { if len(rawCerts) > 0 && len(verifiedChains) > 0 { leaf, err := x509.ParseCertificate(rawCerts[0]) if err != nil { - lo.Errorf("leaf 证书解析错误: %v", err) + lo.Errorf("leaf Certificate parsing error: %v", err) return err } if ok, err := ex.OcspFetcher.Validate(leaf, verifiedChains[0][1]); !ok { @@ -91,7 +91,7 @@ func (ex *Exchanger) ServerHTTPSConfig() (*TLSGenerator, error) { lo := ex.logger lo.Debug("server tls started.") if _, err := ex.Transport.GetCertificate(); err != nil { - return nil, errors.Wrap(err, "服务器证书获取错误") + return nil, errors.Wrap(err, "Server certificate acquisition error") } c, err := ex.Transport.TLSClientAuthServerConfig() if err != nil { @@ -102,7 +102,7 @@ func (ex *Exchanger) ServerHTTPSConfig() (*TLSGenerator, error) { GetCertificate: func(info *tls.ClientHelloInfo) (*tls.Certificate, error) { cert, err := ex.Transport.GetCertificate() if err != nil { - logger.Named("transport").Errorf("服务器证书获取错误: %v", err) + logger.Named("transport").Errorf("Server certificate acquisition error: %v", err) return nil, err } return cert, nil @@ -121,7 +121,7 @@ func (ex *Exchanger) ServerTLSConfig() (*TLSGenerator, error) { lo := ex.logger lo.Debug("server tls started.") if _, err := ex.Transport.GetCertificate(); err != nil { - return nil, errors.Wrap(err, "服务器证书获取错误") + return nil, errors.Wrap(err, "Server certificate acquisition error") } c, err := ex.Transport.TLSClientAuthServerConfig() if err != nil { @@ -132,7 +132,7 @@ func (ex *Exchanger) ServerTLSConfig() (*TLSGenerator, error) { GetCertificate: func(info *tls.ClientHelloInfo) (*tls.Certificate, error) { cert, err := ex.Transport.GetCertificate() if err != nil { - logger.Named("transport").Errorf("服务器证书获取错误: %v", err) + logger.Named("transport").Errorf("Server certificate acquisition error: %v", err) return nil, err } return cert, nil @@ -143,7 +143,7 @@ func (ex *Exchanger) ServerTLSConfig() (*TLSGenerator, error) { if len(rawCerts) > 0 && len(verifiedChains) > 0 { leaf, err := x509.ParseCertificate(rawCerts[0]) if err != nil { - lo.Errorf("leaf 证书解析错误: %v", err) + lo.Errorf("leaf Certificate parsing error: %v", err) return err } if ok, err := ex.OcspFetcher.Validate(leaf, verifiedChains[0][1]); !ok { diff --git a/pkg/caclient/transport.go b/pkg/caclient/transport.go index 66b05c0..79f1d11 100644 --- a/pkg/caclient/transport.go +++ b/pkg/caclient/transport.go @@ -8,18 +8,16 @@ import ( "go.uber.org/zap" "github.com/cloudflare/backoff" - "gitlab.oneitfarm.com/bifrost/cfssl/csr" - "gitlab.oneitfarm.com/bifrost/cfssl/transport/ca" - "gitlab.oneitfarm.com/bifrost/cfssl/transport/core" - "gitlab.oneitfarm.com/bifrost/cfssl/transport/kp" - "gitlab.oneitfarm.com/bifrost/cfssl/transport/roots" + "github.com/ztalab/cfssl/csr" + "github.com/ztalab/cfssl/transport/ca" + "github.com/ztalab/cfssl/transport/core" + "github.com/ztalab/cfssl/transport/kp" + "github.com/ztalab/cfssl/transport/roots" ) // A Transport is capable of providing transport-layer security using // TLS. type Transport struct { - // 证书过期比率 - // 总时间 / rate = 剩余时间 CertRefreshDurationRate int // Provider contains a key management provider. @@ -51,19 +49,18 @@ type Transport struct { // error. RevokeSoftFail bool - // 实验性, 测试使用 manualRevoke bool logger *zap.SugaredLogger } -// TLSClientAuthClientConfig 客户端 TLS 配置,动态更改证书 +// TLSClientAuthClientConfig Client TLS configuration, changing certificate dynamically func (tr *Transport) TLSClientAuthClientConfig(host string) (*tls.Config, error) { return &tls.Config{ GetClientCertificate: func(info *tls.CertificateRequestInfo) (*tls.Certificate, error) { cert, err := tr.GetCertificate() if err != nil { - tr.logger.Errorf("客户端证书获取错误: %v", err) + tr.logger.Errorf("Client certificate acquisition error: %v", err) return nil, err } return cert, nil @@ -75,16 +72,16 @@ func (tr *Transport) TLSClientAuthClientConfig(host string) (*tls.Config, error) }, nil } -// TLSClientAuthServerConfig 服务器 TLS 配置,需要动态更改证书 +// TLSClientAuthServerConfig The server TLS configuration needs to be changed dynamically func (tr *Transport) TLSClientAuthServerConfig() (*tls.Config, error) { return &tls.Config{ - // 动态获取配置 + // Get configuration dynamically GetConfigForClient: func(info *tls.ClientHelloInfo) (*tls.Config, error) { tlsConfig := &tls.Config{ GetCertificate: func(info *tls.ClientHelloInfo) (*tls.Certificate, error) { cert, err := tr.GetCertificate() if err != nil { - tr.logger.Errorf("服务器证书获取错误: %v", err) + tr.logger.Errorf("Server certificate acquisition error: %v", err) return nil, err } return cert, nil @@ -107,7 +104,7 @@ func (tr *Transport) TLSServerConfig() (*tls.Config, error) { GetCertificate: func(info *tls.ClientHelloInfo) (*tls.Certificate, error) { cert, err := tr.GetCertificate() if err != nil { - tr.logger.Errorf("服务器证书获取错误: %v", err) + tr.logger.Errorf("Server certificate acquisition error: %v", err) return nil, err } return cert, nil @@ -120,8 +117,8 @@ func (tr *Transport) TLSServerConfig() (*tls.Config, error) { }, nil } -// Lifespan 返回一个证书剩余更换时间, 小于等于 0 则必须更换证书 -// remain 证书剩余总时长, ava 更新时间 +// Lifespan Returns the remaining replacement time of a certificate. If it is less than or equal to 0, the certificate must be replaced +// remain Total remaining time of certificate, ava update time func (tr *Transport) Lifespan() (remain time.Duration, ava time.Duration) { cert := tr.Provider.Certificate() if cert == nil { @@ -162,13 +159,13 @@ func (tr *Transport) RefreshKeys() (err error) { select { case err := <-ch: return err - case <-time.After(5 * time.Second): // 5秒超时 + case <-time.After(5 * time.Second): // 5 seconds timeout return errors.New("RefreshKeys timeout") } } -// AsyncRefreshKeys 超时处理 +// AsyncRefreshKeys timeout handler func (tr *Transport) AsyncRefreshKeys() error { if !tr.Provider.Ready() { tr.logger.Debug("key and certificate aren't ready, loading") @@ -180,22 +177,22 @@ func (tr *Transport) AsyncRefreshKeys() error { kr = csr.NewKeyRequest() } - // 创建新的私钥 - tr.logger.Debug("创建新私钥") + // Create a new private key + tr.logger.Debug("Create a new private key") err = tr.Provider.Generate(kr.Algo(), kr.Size()) if err != nil { tr.logger.Debugf("failed to generate key: %v", err) return err } - tr.logger.Debug("创建成功") + tr.logger.Debug("Created successfully") } } - // 证书有效期 + // Certificate validity remain, lifespan := tr.Lifespan() if remain < lifespan || lifespan <= 0 { - // 从填写的 request 结构体读取 CSR 配置 - tr.logger.Debug("创建 csr") + // Read the CSR configuration from the filled in request structure + tr.logger.Debug("Create csr") req, err := tr.Provider.CertificateRequest(tr.Identity.Request) if err != nil { tr.logger.Debugf("couldn't get a CSR: %v", err) @@ -204,10 +201,9 @@ func (tr *Transport) AsyncRefreshKeys() error { } return err } - tr.logger.Debug("创建 csr 完成") + tr.logger.Debug("Create CSR complete") tr.logger.Debug("requesting certificate from CA") - // 调用 CA cert, err := tr.CA.SignCSR(req) if err != nil { if tr.Provider.SignalFailure(err) { @@ -276,8 +272,8 @@ func (tr *Transport) AutoUpdate() error { }() remain, nextUpdateAt := tr.Lifespan() tr.logger.Debugf("attempting to refresh keypair") - if remain > nextUpdateAt { // 未到达轮换时间:轮换时间为证书有效期1/2 - tr.logger.Debugf("未到达轮换时间 %v %v", remain, nextUpdateAt) + if remain > nextUpdateAt { // Failure to arrive at the rotation time: the rotation time is the certificate validity period of 1/2 + tr.logger.Debugf("Rotation time not reached %v %v", remain, nextUpdateAt) return nil } err := tr.RefreshKeys() diff --git a/pkg/caclient/transport_test.go b/pkg/caclient/transport_test.go index f411288..12a9d00 100644 --- a/pkg/caclient/transport_test.go +++ b/pkg/caclient/transport_test.go @@ -26,8 +26,8 @@ func testLifeSpan(notBefore, notAfter time.Time, rate int) (remain time.Duration certLong := notAfter.Sub(notBefore) ava = certLong / time.Duration(rate) - fmt.Println("剩余 hours: ", remain.Hours()) - fmt.Println("下次更换 hours: ", ava.Hours()) + fmt.Println("Surplus hours: ", remain.Hours()) + fmt.Println("Next replacement hours: ", ava.Hours()) return remain, ava } diff --git a/pkg/certinfo/certinfo.go b/pkg/certinfo/certinfo.go deleted file mode 100644 index 2aea8ca..0000000 --- a/pkg/certinfo/certinfo.go +++ /dev/null @@ -1,585 +0,0 @@ -package certinfo - -import ( - "bytes" - "crypto/dsa" - "crypto/ecdsa" - "crypto/rsa" - "crypto/x509" - "crypto/x509/pkix" - "encoding/asn1" - "errors" - "fmt" - "math/big" - "net" - "time" -) - -// Extra ASN1 OIDs that we may need to handle -var ( - oidEmailAddress = []int{1, 2, 840, 113549, 1, 9, 1} - oidExtensionAuthorityInfoAccess = []int{1, 3, 6, 1, 5, 5, 7, 1, 1} - oidNSComment = []int{2, 16, 840, 1, 113730, 1, 13} -) - -// validity allows unmarshaling the certificate validity date range -type validity struct { - NotBefore, NotAfter time.Time -} - -// publicKeyInfo allows unmarshaling the public key -type publicKeyInfo struct { - Algorithm pkix.AlgorithmIdentifier - PublicKey asn1.BitString -} - -// tbsCertificate allows unmarshaling of the "To-Be-Signed" principle portion -// of the certificate -type tbsCertificate struct { - Version int `asn1:"optional,explicit,default:1,tag:0"` - SerialNumber *big.Int - SignatureAlgorithm pkix.AlgorithmIdentifier - Issuer asn1.RawValue - Validity validity - Subject asn1.RawValue - PublicKey publicKeyInfo - UniqueID asn1.BitString `asn1:"optional,tag:1"` - SubjectUniqueID asn1.BitString `asn1:"optional,tag:2"` - Extensions []pkix.Extension `asn1:"optional,explicit,tag:3"` -} - -// certUniqueIDs extracts the subject and issuer unique IDs which are -// byte strings. These are not common but may be present in x509v2 certificates -// or later under tags 1 and 2 (before x509v3 extensions). -func certUniqueIDs(tbsAsnData []byte) (issuerUniqueID, subjectUniqueID []byte, err error) { - var tbs tbsCertificate - rest, err := asn1.Unmarshal(tbsAsnData, &tbs) - if err != nil { - return nil, nil, err - } - if len(rest) > 0 { - return nil, nil, asn1.SyntaxError{Msg: "trailing data"} - } - iuid := tbs.UniqueID.RightAlign() - suid := tbs.SubjectUniqueID.RightAlign() - return iuid, suid, err -} - -// printName prints the fields of a distinguished name, which include such -// things as its common name and locality. -func printName(names []pkix.AttributeTypeAndValue, buf *bytes.Buffer) []string { - values := []string{} - for _, name := range names { - oid := name.Type - if len(oid) == 4 && oid[0] == 2 && oid[1] == 5 && oid[2] == 4 { - switch oid[3] { - case 3: - values = append(values, fmt.Sprintf("CN=%s", name.Value)) - case 6: - values = append(values, fmt.Sprintf("C=%s", name.Value)) - case 8: - values = append(values, fmt.Sprintf("ST=%s", name.Value)) - case 10: - values = append(values, fmt.Sprintf("O=%s", name.Value)) - case 11: - values = append(values, fmt.Sprintf("OU=%s", name.Value)) - default: - values = append(values, fmt.Sprintf("UnknownOID=%s", name.Type.String())) - } - } else if oid.Equal(oidEmailAddress) { - values = append(values, fmt.Sprintf("emailAddress=%s", name.Value)) - } else { - values = append(values, fmt.Sprintf("UnknownOID=%s", name.Type.String())) - } - } - if len(values) > 0 { - buf.WriteString(values[0]) - for i := 1; i < len(values); i++ { - buf.WriteString("," + values[i]) - } - buf.WriteString("\n") - } - return values -} - -// dsaKeyPrinter formats the Y, P, Q, or G components of a DSA public key. -func dsaKeyPrinter(name string, val *big.Int, buf *bytes.Buffer) { - buf.WriteString(fmt.Sprintf("%16s%s:", "", name)) - for i, b := range val.Bytes() { - if (i % 15) == 0 { - buf.WriteString(fmt.Sprintf("\n%20s", "")) - } - buf.WriteString(fmt.Sprintf("%02x", b)) - if i != len(val.Bytes())-1 { - buf.WriteString(":") - } - } - buf.WriteString("\n") -} - -func printVersion(version int, buf *bytes.Buffer) { - hexVersion := version - 1 - if hexVersion < 0 { - hexVersion = 0 - } - buf.WriteString(fmt.Sprintf("%8sVersion: %d (%#x)\n", "", version, hexVersion)) -} - -func printSubjectInformation(subj *pkix.Name, pkAlgo x509.PublicKeyAlgorithm, pk interface{}, buf *bytes.Buffer) error { - buf.WriteString(fmt.Sprintf("%8sSubject: ", "")) - printName(subj.Names, buf) - buf.WriteString(fmt.Sprintf("%8sSubject Public Key Info:\n%12sPublic Key Algorithm: ", "", "")) - switch pkAlgo { - case x509.RSA: - buf.WriteString(fmt.Sprintf("RSA\n")) - if rsaKey, ok := pk.(*rsa.PublicKey); ok { - buf.WriteString(fmt.Sprintf("%16sPublic-Key: (%d bit)\n", "", rsaKey.N.BitLen())) - // Some implementations (notably OpenSSL) prepend 0x00 to the modulus - // if its most-significant bit is set. There is no need to do that here - // because the modulus is always unsigned and the extra byte can be - // confusing given the bit length. - buf.WriteString(fmt.Sprintf("%16sModulus:", "")) - for i, val := range rsaKey.N.Bytes() { - if (i % 15) == 0 { - buf.WriteString(fmt.Sprintf("\n%20s", "")) - } - buf.WriteString(fmt.Sprintf("%02x", val)) - if i != len(rsaKey.N.Bytes())-1 { - buf.WriteString(":") - } - } - buf.WriteString(fmt.Sprintf("\n%16sExponent: %d (%#x)\n", "", rsaKey.E, rsaKey.E)) - } else { - return errors.New("certinfo: Expected rsa.PublicKey for type x509.RSA") - } - case x509.DSA: - buf.WriteString(fmt.Sprintf("DSA\n")) - if dsaKey, ok := pk.(*dsa.PublicKey); ok { - dsaKeyPrinter("pub", dsaKey.Y, buf) - dsaKeyPrinter("P", dsaKey.P, buf) - dsaKeyPrinter("Q", dsaKey.Q, buf) - dsaKeyPrinter("G", dsaKey.G, buf) - } else { - return errors.New("certinfo: Expected dsa.PublicKey for type x509.DSA") - } - case x509.ECDSA: - buf.WriteString(fmt.Sprintf("ECDSA\n")) - if ecdsaKey, ok := pk.(*ecdsa.PublicKey); ok { - buf.WriteString(fmt.Sprintf("%16sPublic-Key: (%d bit)\n", "", ecdsaKey.Params().BitSize)) - dsaKeyPrinter("X", ecdsaKey.X, buf) - dsaKeyPrinter("Y", ecdsaKey.Y, buf) - buf.WriteString(fmt.Sprintf("%16sCurve: %s\n", "", ecdsaKey.Params().Name)) - } else { - return errors.New("certinfo: Expected ecdsa.PublicKey for type x509.DSA") - } - default: - return errors.New("certinfo: Unknown public key type") - } - return nil -} - -func printSubjKeyId(ext pkix.Extension, buf *bytes.Buffer) error { - // subjectKeyIdentifier: RFC 5280, 4.2.1.2 - buf.WriteString(fmt.Sprintf("%12sX509v3 Subject Key Identifier:", "")) - if ext.Critical { - buf.WriteString(" critical\n") - } else { - buf.WriteString("\n") - } - var subjectKeyId []byte - if _, err := asn1.Unmarshal(ext.Value, &subjectKeyId); err != nil { - return err - } - for i := 0; i < len(subjectKeyId); i++ { - if i == 0 { - buf.WriteString(fmt.Sprintf("%16s%02X", "", subjectKeyId[0])) - } else { - buf.WriteString(fmt.Sprintf(":%02X", subjectKeyId[i])) - } - } - buf.WriteString("\n") - return nil -} - -func printSubjAltNames(ext pkix.Extension, dnsNames []string, emailAddresses []string, ipAddresses []net.IP, buf *bytes.Buffer) error { - // subjectAltName: RFC 5280, 4.2.1.6 - // TODO: Currently crypto/x509 only extracts DNS, email, and IP addresses. - // We should add the others to it or implement them here. - buf.WriteString(fmt.Sprintf("%12sX509v3 Subject Alternative Name:", "")) - if ext.Critical { - buf.WriteString(" critical\n") - } else { - buf.WriteString("\n") - } - if len(dnsNames) > 0 { - buf.WriteString(fmt.Sprintf("%16sDNS:%s", "", dnsNames[0])) - for i := 1; i < len(dnsNames); i++ { - buf.WriteString(fmt.Sprintf(", DNS:%s", dnsNames[i])) - } - buf.WriteString("\n") - } - if len(emailAddresses) > 0 { - buf.WriteString(fmt.Sprintf("%16semail:%s", "", emailAddresses[0])) - for i := 1; i < len(emailAddresses); i++ { - buf.WriteString(fmt.Sprintf(", email:%s", emailAddresses[i])) - } - buf.WriteString("\n") - } - if len(ipAddresses) > 0 { - buf.WriteString(fmt.Sprintf("%16sIP Address:%s", "", ipAddresses[0].String())) // XXX verify string format - for i := 1; i < len(ipAddresses); i++ { - buf.WriteString(fmt.Sprintf(", IP Address:%s", ipAddresses[i].String())) - } - buf.WriteString("\n") - } - return nil -} - -func printSignature(sigAlgo x509.SignatureAlgorithm, sig []byte, buf *bytes.Buffer) { - buf.WriteString(fmt.Sprintf("%4sSignature Algorithm: %s", "", sigAlgo)) - for i, val := range sig { - if (i % 18) == 0 { - buf.WriteString(fmt.Sprintf("\n%9s", "")) - } - buf.WriteString(fmt.Sprintf("%02x", val)) - if i != len(sig)-1 { - buf.WriteString(":") - } - } - buf.WriteString("\n") -} - -// CertificateText returns a human-readable string representation -// of the certificate cert. The format is similar (but not identical) -// to the OpenSSL way of printing certificates. -func CertificateText(cert *x509.Certificate) (string, error) { - var buf bytes.Buffer - buf.Grow(4096) // 4KiB should be enough - - buf.WriteString(fmt.Sprintf("Certificate:\n")) - buf.WriteString(fmt.Sprintf("%4sData:\n", "")) - printVersion(cert.Version, &buf) - buf.WriteString(fmt.Sprintf("%8sSerial Number: %d (%#x)\n", "", cert.SerialNumber, cert.SerialNumber)) - buf.WriteString(fmt.Sprintf("%4sSignature Algorithm: %s\n", "", cert.SignatureAlgorithm)) - - // Issuer information - buf.WriteString(fmt.Sprintf("%8sIssuer: ", "")) - printName(cert.Issuer.Names, &buf) - - // Validity information - buf.WriteString(fmt.Sprintf("%8sValidity\n", "")) - buf.WriteString(fmt.Sprintf("%12sNot Before: %s\n", "", cert.NotBefore.Format("Jan 2 15:04:05 2006 MST"))) - buf.WriteString(fmt.Sprintf("%12sNot After : %s\n", "", cert.NotAfter.Format("Jan 2 15:04:05 2006 MST"))) - - // Subject information - err := printSubjectInformation(&cert.Subject, cert.PublicKeyAlgorithm, cert.PublicKey, &buf) - if err != nil { - return "", err - } - - // Issuer/Subject Unique ID, typically used in old v2 certificates - issuerUID, subjectUID, err := certUniqueIDs(cert.RawTBSCertificate) - if err != nil { - return "", errors.New(fmt.Sprintf("certinfo: Error parsing TBS unique attributes: %s\n", err.Error())) - } - if len(issuerUID) > 0 { - buf.WriteString(fmt.Sprintf("%8sIssuer Unique ID: %02x", "", issuerUID[0])) - for i := 1; i < len(issuerUID); i++ { - buf.WriteString(fmt.Sprintf(":%02x", issuerUID[i])) - } - buf.WriteString("\n") - } - if len(subjectUID) > 0 { - buf.WriteString(fmt.Sprintf("%8sSubject Unique ID: %02x", "", subjectUID[0])) - for i := 1; i < len(subjectUID); i++ { - buf.WriteString(fmt.Sprintf(":%02x", subjectUID[i])) - } - buf.WriteString("\n") - } - - // Optional extensions for X509v3 - if cert.Version == 3 && len(cert.Extensions) > 0 { - buf.WriteString(fmt.Sprintf("%8sX509v3 extensions:\n", "")) - for _, ext := range cert.Extensions { - if len(ext.Id) == 4 && ext.Id[0] == 2 && ext.Id[1] == 5 && ext.Id[2] == 29 { - switch ext.Id[3] { - case 14: - err = printSubjKeyId(ext, &buf) - case 15: - // keyUsage: RFC 5280, 4.2.1.3 - buf.WriteString(fmt.Sprintf("%12sX509v3 Key Usage:", "")) - if ext.Critical { - buf.WriteString(" critical\n") - } else { - buf.WriteString("\n") - } - usages := []string{} - if cert.KeyUsage&x509.KeyUsageDigitalSignature > 0 { - usages = append(usages, "Digital Signature") - } - if cert.KeyUsage&x509.KeyUsageContentCommitment > 0 { - usages = append(usages, "Content Commitment") - } - if cert.KeyUsage&x509.KeyUsageKeyEncipherment > 0 { - usages = append(usages, "Key Encipherment") - } - if cert.KeyUsage&x509.KeyUsageDataEncipherment > 0 { - usages = append(usages, "Data Encipherment") - } - if cert.KeyUsage&x509.KeyUsageKeyAgreement > 0 { - usages = append(usages, "Key Agreement") - } - if cert.KeyUsage&x509.KeyUsageCertSign > 0 { - usages = append(usages, "Certificate Sign") - } - if cert.KeyUsage&x509.KeyUsageCRLSign > 0 { - usages = append(usages, "CRL Sign") - } - if cert.KeyUsage&x509.KeyUsageEncipherOnly > 0 { - usages = append(usages, "Encipher Only") - } - if cert.KeyUsage&x509.KeyUsageDecipherOnly > 0 { - usages = append(usages, "Decipher Only") - } - if len(usages) > 0 { - buf.WriteString(fmt.Sprintf("%16s%s", "", usages[0])) - for i := 1; i < len(usages); i++ { - buf.WriteString(fmt.Sprintf(", %s", usages[i])) - } - buf.WriteString("\n") - } else { - buf.WriteString(fmt.Sprintf("%16sNone\n", "")) - } - case 17: - err = printSubjAltNames(ext, cert.DNSNames, cert.EmailAddresses, cert.IPAddresses, &buf) - case 19: - // basicConstraints: RFC 5280, 4.2.1.9 - if !cert.BasicConstraintsValid { - break - } - buf.WriteString(fmt.Sprintf("%12sX509v3 Basic Constraints:", "")) - if ext.Critical { - buf.WriteString(" critical\n") - } else { - buf.WriteString("\n") - } - if cert.IsCA { - buf.WriteString(fmt.Sprintf("%16sCA:TRUE", "")) - } else { - buf.WriteString(fmt.Sprintf("%16sCA:FALSE", "")) - } - if cert.MaxPathLenZero { - buf.WriteString(fmt.Sprintf(", pathlen:0\n")) - } else if cert.MaxPathLen > 0 { - buf.WriteString(fmt.Sprintf(", pathlen:%d\n", cert.MaxPathLen)) - } else { - buf.WriteString("\n") - } - case 30: - // nameConstraints: RFC 5280, 4.2.1.10 - // TODO: Currently crypto/x509 only supports "Permitted" and not "Excluded" - // subtrees. Furthermore it assumes all types are DNS names which is not - // necessarily true. This missing functionality should be implemented. - buf.WriteString(fmt.Sprintf("%12sX509v3 Name Constraints:", "")) - if ext.Critical { - buf.WriteString(" critical\n") - } else { - buf.WriteString("\n") - } - if len(cert.PermittedDNSDomains) > 0 { - buf.WriteString(fmt.Sprintf("%16sPermitted:\n%18s%s", "", "", cert.PermittedDNSDomains[0])) - for i := 1; i < len(cert.PermittedDNSDomains); i++ { - buf.WriteString(fmt.Sprintf(", %s", cert.PermittedDNSDomains[i])) - } - buf.WriteString("\n") - } - case 31: - // CRLDistributionPoints: RFC 5280, 4.2.1.13 - // TODO: Currently crypto/x509 does not fully implement this section, - // including types and reason flags. - buf.WriteString(fmt.Sprintf("%12sX509v3 CRL Distribution Points:", "")) - if ext.Critical { - buf.WriteString(" critical\n") - } else { - buf.WriteString("\n") - } - if len(cert.CRLDistributionPoints) > 0 { - buf.WriteString(fmt.Sprintf("\n%16sFull Name:\n%18sURI:%s", "", "", cert.CRLDistributionPoints[0])) - for i := 1; i < len(cert.CRLDistributionPoints); i++ { - buf.WriteString(fmt.Sprintf(", URI:%s", cert.CRLDistributionPoints[i])) - } - buf.WriteString("\n\n") - } - case 32: - // certificatePoliciesExt: RFC 5280, 4.2.1.4 - // TODO: Currently crypto/x509 does not fully impelment this section, - // including the Certification Practice Statement (CPS) - buf.WriteString(fmt.Sprintf("%12sX509v3 Certificate Policies:", "")) - if ext.Critical { - buf.WriteString(" critical\n") - } else { - buf.WriteString("\n") - } - for _, val := range cert.PolicyIdentifiers { - buf.WriteString(fmt.Sprintf("%16sPolicy: %s\n", "", val.String())) - } - case 35: - // authorityKeyIdentifier: RFC 5280, 4.2.1.1 - buf.WriteString(fmt.Sprintf("%12sX509v3 Authority Key Identifier:", "")) - if ext.Critical { - buf.WriteString(" critical\n") - } else { - buf.WriteString("\n") - } - buf.WriteString(fmt.Sprintf("%16skeyid", "")) - for _, val := range cert.AuthorityKeyId { - buf.WriteString(fmt.Sprintf(":%02X", val)) - } - buf.WriteString("\n") - case 37: - // extKeyUsage: RFC 5280, 4.2.1.12 - buf.WriteString(fmt.Sprintf("%12sX509v3 Extended Key Usage:", "")) - if ext.Critical { - buf.WriteString(" critical\n") - } else { - buf.WriteString("\n") - } - var list []string - for _, val := range cert.ExtKeyUsage { - switch val { - case x509.ExtKeyUsageAny: - list = append(list, "Any Usage") - case x509.ExtKeyUsageServerAuth: - list = append(list, "TLS Web Server Authentication") - case x509.ExtKeyUsageClientAuth: - list = append(list, "TLS Web Client Authentication") - case x509.ExtKeyUsageCodeSigning: - list = append(list, "Code Signing") - case x509.ExtKeyUsageEmailProtection: - list = append(list, "E-mail Protection") - case x509.ExtKeyUsageIPSECEndSystem: - list = append(list, "IPSec End System") - case x509.ExtKeyUsageIPSECTunnel: - list = append(list, "IPSec Tunnel") - case x509.ExtKeyUsageIPSECUser: - list = append(list, "IPSec User") - case x509.ExtKeyUsageTimeStamping: - list = append(list, "Time Stamping") - case x509.ExtKeyUsageOCSPSigning: - list = append(list, "OCSP Signing") - default: - list = append(list, "UNKNOWN") - } - } - if len(list) > 0 { - buf.WriteString(fmt.Sprintf("%16s%s", "", list[0])) - for i := 1; i < len(list); i++ { - buf.WriteString(fmt.Sprintf(", %s", list[i])) - } - buf.WriteString("\n") - } - default: - buf.WriteString(fmt.Sprintf("Unknown extension 2.5.29.%d\n", ext.Id[3])) - } - if err != nil { - return "", err - } - } else if ext.Id.Equal(oidExtensionAuthorityInfoAccess) { - // authorityInfoAccess: RFC 5280, 4.2.2.1 - buf.WriteString(fmt.Sprintf("%12sAuthority Information Access:", "")) - if ext.Critical { - buf.WriteString(" critical\n") - } else { - buf.WriteString("\n") - } - if len(cert.OCSPServer) > 0 { - buf.WriteString(fmt.Sprintf("%16sOCSP - URI:%s", "", cert.OCSPServer[0])) - for i := 1; i < len(cert.OCSPServer); i++ { - buf.WriteString(fmt.Sprintf(",URI:%s", cert.OCSPServer[i])) - } - buf.WriteString("\n") - } - if len(cert.IssuingCertificateURL) > 0 { - buf.WriteString(fmt.Sprintf("%16sCA Issuers - URI:%s", "", cert.IssuingCertificateURL[0])) - for i := 1; i < len(cert.IssuingCertificateURL); i++ { - buf.WriteString(fmt.Sprintf(",URI:%s", cert.IssuingCertificateURL[i])) - } - buf.WriteString("\n") - } - buf.WriteString("\n") - } else if ext.Id.Equal(oidNSComment) { - // Netscape comment - var comment string - rest, err := asn1.Unmarshal(ext.Value, &comment) - if err != nil || len(rest) > 0 { - return "", errors.New("certinfo: Error parsing OID " + ext.Id.String()) - } - if ext.Critical { - buf.WriteString(fmt.Sprintf("%12sNetscape Comment: critical\n%16s%s\n", "", "", comment)) - } else { - buf.WriteString(fmt.Sprintf("%12sNetscape Comment:\n%16s%s\n", "", "", comment)) - } - } else { - buf.WriteString(fmt.Sprintf("%12sUnknown extension %s\n", "", ext.Id.String())) - } - } - buf.WriteString("\n") - } - - // Signature - printSignature(cert.SignatureAlgorithm, cert.Signature, &buf) - - // Optional: Print the full PEM certificate - /* - pemBlock := pem.Block{ - Type: "CERTIFICATE", - Bytes: cert.Raw, - } - buf.Write(pem.EncodeToMemory(&pemBlock)) - */ - - return buf.String(), nil -} - -// CertificateRequestText returns a human-readable string representation -// of the certificate request csr. The format is similar (but not identical) -// to the OpenSSL way of printing certificates. -func CertificateRequestText(csr *x509.CertificateRequest) (string, error) { - var buf bytes.Buffer - buf.Grow(4096) // 4KiB should be enough - - buf.WriteString(fmt.Sprintf("Certificate Request:\n")) - buf.WriteString(fmt.Sprintf("%4sData:\n", "")) - printVersion(csr.Version, &buf) - - // Subject information - err := printSubjectInformation(&csr.Subject, csr.PublicKeyAlgorithm, csr.PublicKey, &buf) - if err != nil { - return "", err - } - - // Optional extensions for X509v3 - if csr.Version == 3 && len(csr.Extensions) > 0 { - buf.WriteString(fmt.Sprintf("%8sRequested Extensions:\n", "")) - var err error - for _, ext := range csr.Extensions { - if len(ext.Id) == 4 && ext.Id[0] == 2 && ext.Id[1] == 5 && ext.Id[2] == 29 { - switch ext.Id[3] { - case 14: - err = printSubjKeyId(ext, &buf) - case 17: - err = printSubjAltNames(ext, csr.DNSNames, csr.EmailAddresses, csr.IPAddresses, &buf) - } - } - if err != nil { - return "", err - } - } - buf.WriteString("\n") - } - - // Signature - printSignature(csr.SignatureAlgorithm, csr.Signature, &buf) - - return buf.String(), nil -} diff --git a/pkg/certinfo/certinfo_test.go b/pkg/certinfo/certinfo_test.go deleted file mode 100644 index 9e802bb..0000000 --- a/pkg/certinfo/certinfo_test.go +++ /dev/null @@ -1,85 +0,0 @@ -package certinfo - -import ( - "bytes" - "crypto/x509" - "encoding/pem" - "io/ioutil" - "testing" -) - -type InputType int - -const ( - tCertificate InputType = iota - tCertificateRequest -) - -// Compares a PEM-encoded certificate to a refernce file. -func testPair(t *testing.T, certFile, refFile string, inputType InputType) { - // Read and parse the certificate - pemData, err := ioutil.ReadFile(certFile) - if err != nil { - t.Fatal(err) - } - block, rest := pem.Decode([]byte(pemData)) - if block == nil || len(rest) > 0 { - t.Fatal("Certificate decoding error") - } - var result string - switch inputType { - case tCertificate: - cert, err := x509.ParseCertificate(block.Bytes) - if err != nil { - t.Fatal(err) - } - result, err = CertificateText(cert) - if err != nil { - t.Fatal(err) - } - case tCertificateRequest: - cert, err := x509.ParseCertificateRequest(block.Bytes) - if err != nil { - t.Fatal(err) - } - result, err = CertificateRequestText(cert) - if err != nil { - t.Fatal(err) - } - } - resultData := []byte(result) - - // Read the reference output - refData, err := ioutil.ReadFile(refFile) - if err != nil { - t.Fatal(err) - } - if !bytes.Equal(resultData, refData) { - t.Logf("'%s' did not match reference '%s'\n", certFile, refFile) - t.Errorf("Dump follows:\n%s\n", result) - } -} - -// Test the root CA certificate -func TestCertInfoRoot(t *testing.T) { - testPair(t, "test_certs/root1.cert.pem", "test_certs/root1.cert.text", tCertificate) - testPair(t, "test_certs/root1.csr.pem", "test_certs/root1.csr.text", tCertificateRequest) -} - -// Test the leaf (user) RSA certificate -func TestCertInfoLeaf1(t *testing.T) { - testPair(t, "test_certs/leaf1.cert.pem", "test_certs/leaf1.cert.text", tCertificate) - testPair(t, "test_certs/leaf1.csr.pem", "test_certs/leaf1.csr.text", tCertificateRequest) -} - -// Test the leaf (user) DSA certificate -func TestCertInfoLeaf2(t *testing.T) { - testPair(t, "test_certs/leaf2.cert.pem", "test_certs/leaf2.cert.text", tCertificate) - testPair(t, "test_certs/leaf2.csr.pem", "test_certs/leaf2.csr.text", tCertificateRequest) -} - -// Test the leaf (user) ECDSA certificate -func TestCertInfoLeaf3(t *testing.T) { - testPair(t, "test_certs/leaf3.cert.pem", "test_certs/leaf3.cert.text", tCertificate) - testPair(t, "test_certs/leaf3.csr.pem", "test_certs/leaf3.csr.text", tCertificateRequest) -} diff --git a/pkg/certinfo/test_certs/README b/pkg/certinfo/test_certs/README deleted file mode 100644 index 3580998..0000000 --- a/pkg/certinfo/test_certs/README +++ /dev/null @@ -1,21 +0,0 @@ -This directory contains configuration files and scripts to create custom -certificates that are useful for testing. They assume you have OpenSSL -and a bash-like shell. - -make-certs.sh creates signing requests and certificates from existing key files -and OpenSSL configuration files. This is useful, for example, if you want to test -a new extension but don't want to change the keys and dates of the certificates. - -new-keys.sh generates new key files. This should not be useful and is included -only for reproducibility. - - -Issues: -1. Unfortunately, OpenSSL uses non-deterministic signing for DSA and ECDSA certificate -requests, so running make-certs.sh will not reproduce the same CSRs despite having -static keys. These files have to be kept in-sync manually. - -2. The x509 package does not currently set CertificateRequest.SignatureAlgorithm for -DSA CSRs. Therefore the 'leaf2.csr.text' contains the line 'Signature Algorithm: 0' -instead of 'Signature Algorithm: DSAWithSHA256' to allow the test to pass and -indicate that the problem is with x509 and not this package. diff --git a/pkg/certinfo/test_certs/leaf1.cert.pem b/pkg/certinfo/test_certs/leaf1.cert.pem deleted file mode 100644 index 60f6fe4..0000000 --- a/pkg/certinfo/test_certs/leaf1.cert.pem +++ /dev/null @@ -1,53 +0,0 @@ -Certificate: - Data: - Version: 3 (0x2) - Serial Number: 2 (0x2) - Signature Algorithm: sha256WithRSAEncryption - Issuer: C=US, ST=California, O=World Widget Authority, OU=Identity Affairs, CN=worldwidgetauthority.com/emailAddress=nobody@worldwidgetauthority.com - Validity - Not Before: Jul 23 18:56:47 2020 GMT - Not After : Jun 30 07:37:21 2040 GMT - Subject: C=AU, ST=Victoria, O=Southern Widgets Corporation, OU=21st Century Department, CN=southernwidgets.com/emailAddress=nobody@southernwidgets.com - Subject Public Key Info: - Public Key Algorithm: rsaEncryption - Public-Key: (512 bit) - Modulus: - 00:be:52:9d:bf:c5:66:71:98:14:bd:fc:e7:2d:f9: - 41:83:a4:e4:db:a3:b7:a8:6c:5a:4c:43:a6:36:a8: - 47:6a:32:0a:a2:76:92:db:71:02:8d:ae:e8:85:71: - 1f:48:01:fb:15:30:ce:c6:f1:13:df:76:83:5a:d6: - a2:fc:4a:27:87 - Exponent: 65537 (0x10001) - X509v3 extensions: - X509v3 Basic Constraints: - CA:FALSE - Netscape Comment: - This is a test certificate only - X509v3 Subject Key Identifier: - 61:2E:64:28:69:1D:0A:03:87:0E:BC:FB:0A:4C:44:D0:8D:25:7F:B4 - X509v3 Authority Key Identifier: - keyid:7C:0F:26:9D:ED:C8:7A:C0:05:1E:99:A3:5D:A5:9E:8D:A6:A6:96:5E - - Signature Algorithm: sha256WithRSAEncryption - 8c:37:d9:86:6c:fc:29:6d:fe:87:68:a9:c3:22:98:71:57:57: - fd:65:45:d1:c6:f3:0b:75:6f:3f:fd:5e:74:1b:87:fa:9a:91: - 3e:21:59:79:9c:26:91:97:f4:36:9d:42:61:84:10:ac:f7:10: - 4b:2c:2d:42:fa:14:7f:02:c7:e0 ------BEGIN CERTIFICATE----- -MIIC0zCCAn2gAwIBAgIBAjANBgkqhkiG9w0BAQsFADCBsTELMAkGA1UEBhMCVVMx -EzARBgNVBAgMCkNhbGlmb3JuaWExHzAdBgNVBAoMFldvcmxkIFdpZGdldCBBdXRo -b3JpdHkxGTAXBgNVBAsMEElkZW50aXR5IEFmZmFpcnMxITAfBgNVBAMMGHdvcmxk -d2lkZ2V0YXV0aG9yaXR5LmNvbTEuMCwGCSqGSIb3DQEJARYfbm9ib2R5QHdvcmxk -d2lkZ2V0YXV0aG9yaXR5LmNvbTAeFw0yMDA3MjMxODU2NDdaFw00MDA2MzAwNzM3 -MjFaMIGyMQswCQYDVQQGEwJBVTERMA8GA1UECAwIVmljdG9yaWExJTAjBgNVBAoM -HFNvdXRoZXJuIFdpZGdldHMgQ29ycG9yYXRpb24xIDAeBgNVBAsMFzIxc3QgQ2Vu -dHVyeSBEZXBhcnRtZW50MRwwGgYDVQQDDBNzb3V0aGVybndpZGdldHMuY29tMSkw -JwYJKoZIhvcNAQkBFhpub2JvZHlAc291dGhlcm53aWRnZXRzLmNvbTBcMA0GCSqG -SIb3DQEBAQUAA0sAMEgCQQC+Up2/xWZxmBS9/Oct+UGDpOTbo7eobFpMQ6Y2qEdq -MgqidpLbcQKNruiFcR9IAfsVMM7G8RPfdoNa1qL8SieHAgMBAAGjfTB7MAkGA1Ud -EwQCMAAwLgYJYIZIAYb4QgENBCEWH1RoaXMgaXMgYSB0ZXN0IGNlcnRpZmljYXRl -IG9ubHkwHQYDVR0OBBYEFGEuZChpHQoDhw68+wpMRNCNJX+0MB8GA1UdIwQYMBaA -FHwPJp3tyHrABR6Zo12lno2mppZeMA0GCSqGSIb3DQEBCwUAA0EAjDfZhmz8KW3+ -h2ipwyKYcVdX/WVF0cbzC3VvP/1edBuH+pqRPiFZeZwmkZf0Np1CYYQQrPcQSywt -QvoUfwLH4A== ------END CERTIFICATE----- diff --git a/pkg/certinfo/test_certs/leaf1.cert.text b/pkg/certinfo/test_certs/leaf1.cert.text deleted file mode 100644 index 496fa0b..0000000 --- a/pkg/certinfo/test_certs/leaf1.cert.text +++ /dev/null @@ -1,35 +0,0 @@ -Certificate: - Data: - Version: 3 (0x2) - Serial Number: 2 (0x2) - Signature Algorithm: SHA256-RSA - Issuer: C=US,ST=California,O=World Widget Authority,OU=Identity Affairs,CN=worldwidgetauthority.com,emailAddress=nobody@worldwidgetauthority.com - Validity - Not Before: Jul 23 18:56:47 2020 UTC - Not After : Jun 30 07:37:21 2040 UTC - Subject: C=AU,ST=Victoria,O=Southern Widgets Corporation,OU=21st Century Department,CN=southernwidgets.com,emailAddress=nobody@southernwidgets.com - Subject Public Key Info: - Public Key Algorithm: RSA - Public-Key: (512 bit) - Modulus: - be:52:9d:bf:c5:66:71:98:14:bd:fc:e7:2d:f9:41: - 83:a4:e4:db:a3:b7:a8:6c:5a:4c:43:a6:36:a8:47: - 6a:32:0a:a2:76:92:db:71:02:8d:ae:e8:85:71:1f: - 48:01:fb:15:30:ce:c6:f1:13:df:76:83:5a:d6:a2: - fc:4a:27:87 - Exponent: 65537 (0x10001) - X509v3 extensions: - X509v3 Basic Constraints: - CA:FALSE - Netscape Comment: - This is a test certificate only - X509v3 Subject Key Identifier: - 61:2E:64:28:69:1D:0A:03:87:0E:BC:FB:0A:4C:44:D0:8D:25:7F:B4 - X509v3 Authority Key Identifier: - keyid:7C:0F:26:9D:ED:C8:7A:C0:05:1E:99:A3:5D:A5:9E:8D:A6:A6:96:5E - - Signature Algorithm: SHA256-RSA - 8c:37:d9:86:6c:fc:29:6d:fe:87:68:a9:c3:22:98:71:57:57: - fd:65:45:d1:c6:f3:0b:75:6f:3f:fd:5e:74:1b:87:fa:9a:91: - 3e:21:59:79:9c:26:91:97:f4:36:9d:42:61:84:10:ac:f7:10: - 4b:2c:2d:42:fa:14:7f:02:c7:e0 diff --git a/pkg/certinfo/test_certs/leaf1.cfg b/pkg/certinfo/test_certs/leaf1.cfg deleted file mode 100644 index 526c375..0000000 --- a/pkg/certinfo/test_certs/leaf1.cfg +++ /dev/null @@ -1,27 +0,0 @@ -# OpenSSL configuration for creating a test certiicate -# which can be signed by a certificate authority -# -[ req ] -default_bits = 512 -default_keyfile = leaf1.key.pem -encrypt_key = no -default_md = sha256 -distinguished_name = req_distinguished_name -req_extensions = req_extensions -prompt = no - -######################################## -# Settings for the certificate request # -######################################## -[ req_distinguished_name ] -C = AU -ST = Victoria -L = Melbourne -O = Southern Widgets Corporation -OU = 21st Century Department -CN = southernwidgets.com -emailAddress = nobody@southernwidgets.com - -# Extensions to place in a leaf certificate. -# Some may be overridden by the CA configuration -[ req_extensions ] diff --git a/pkg/certinfo/test_certs/leaf1.csr.pem b/pkg/certinfo/test_certs/leaf1.csr.pem deleted file mode 100644 index af05b1e..0000000 --- a/pkg/certinfo/test_certs/leaf1.csr.pem +++ /dev/null @@ -1,31 +0,0 @@ -Certificate Request: - Data: - Version: 0 (0x0) - Subject: C=AU, ST=Victoria, L=Melbourne, O=Southern Widgets Corporation, OU=21st Century Department, CN=southernwidgets.com/emailAddress=nobody@southernwidgets.com - Subject Public Key Info: - Public Key Algorithm: rsaEncryption - Public-Key: (512 bit) - Modulus: - 00:be:52:9d:bf:c5:66:71:98:14:bd:fc:e7:2d:f9: - 41:83:a4:e4:db:a3:b7:a8:6c:5a:4c:43:a6:36:a8: - 47:6a:32:0a:a2:76:92:db:71:02:8d:ae:e8:85:71: - 1f:48:01:fb:15:30:ce:c6:f1:13:df:76:83:5a:d6: - a2:fc:4a:27:87 - Exponent: 65537 (0x10001) - Attributes: - Signature Algorithm: sha256WithRSAEncryption - 26:26:07:7f:3f:11:08:44:9c:97:36:d7:de:75:8c:9f:6d:48: - a1:55:e0:d1:5f:d3:6c:d2:7c:78:9b:4c:d7:87:81:57:bb:3f: - 0f:1f:82:f6:2a:db:34:9b:6f:a2:80:09:a9:42:19:14:e6:7d: - ac:2e:08:0b:10:1b:32:e1:b8:e6 ------BEGIN CERTIFICATE REQUEST----- -MIIBkTCCATsCAQAwgcYxCzAJBgNVBAYTAkFVMREwDwYDVQQIDAhWaWN0b3JpYTES -MBAGA1UEBwwJTWVsYm91cm5lMSUwIwYDVQQKDBxTb3V0aGVybiBXaWRnZXRzIENv -cnBvcmF0aW9uMSAwHgYDVQQLDBcyMXN0IENlbnR1cnkgRGVwYXJ0bWVudDEcMBoG -A1UEAwwTc291dGhlcm53aWRnZXRzLmNvbTEpMCcGCSqGSIb3DQEJARYabm9ib2R5 -QHNvdXRoZXJud2lkZ2V0cy5jb20wXDANBgkqhkiG9w0BAQEFAANLADBIAkEAvlKd -v8VmcZgUvfznLflBg6Tk26O3qGxaTEOmNqhHajIKonaS23ECja7ohXEfSAH7FTDO -xvET33aDWtai/EonhwIDAQABoA8wDQYJKoZIhvcNAQkOMQAwDQYJKoZIhvcNAQEL -BQADQQAmJgd/PxEIRJyXNtfedYyfbUihVeDRX9Ns0nx4m0zXh4FXuz8PH4L2Kts0 -m2+igAmpQhkU5n2sLggLEBsy4bjm ------END CERTIFICATE REQUEST----- diff --git a/pkg/certinfo/test_certs/leaf1.csr.text b/pkg/certinfo/test_certs/leaf1.csr.text deleted file mode 100644 index 0df8b99..0000000 --- a/pkg/certinfo/test_certs/leaf1.csr.text +++ /dev/null @@ -1,19 +0,0 @@ -Certificate Request: - Data: - Version: 0 (0x0) - Subject: C=AU,ST=Victoria,UnknownOID=2.5.4.7,O=Southern Widgets Corporation,OU=21st Century Department,CN=southernwidgets.com,emailAddress=nobody@southernwidgets.com - Subject Public Key Info: - Public Key Algorithm: RSA - Public-Key: (512 bit) - Modulus: - be:52:9d:bf:c5:66:71:98:14:bd:fc:e7:2d:f9:41: - 83:a4:e4:db:a3:b7:a8:6c:5a:4c:43:a6:36:a8:47: - 6a:32:0a:a2:76:92:db:71:02:8d:ae:e8:85:71:1f: - 48:01:fb:15:30:ce:c6:f1:13:df:76:83:5a:d6:a2: - fc:4a:27:87 - Exponent: 65537 (0x10001) - Signature Algorithm: SHA256-RSA - 26:26:07:7f:3f:11:08:44:9c:97:36:d7:de:75:8c:9f:6d:48: - a1:55:e0:d1:5f:d3:6c:d2:7c:78:9b:4c:d7:87:81:57:bb:3f: - 0f:1f:82:f6:2a:db:34:9b:6f:a2:80:09:a9:42:19:14:e6:7d: - ac:2e:08:0b:10:1b:32:e1:b8:e6 diff --git a/pkg/certinfo/test_certs/leaf1.key.pem b/pkg/certinfo/test_certs/leaf1.key.pem deleted file mode 100644 index 7f18c5d..0000000 --- a/pkg/certinfo/test_certs/leaf1.key.pem +++ /dev/null @@ -1,9 +0,0 @@ ------BEGIN RSA PRIVATE KEY----- -MIIBOQIBAAJBAL5Snb/FZnGYFL385y35QYOk5Nujt6hsWkxDpjaoR2oyCqJ2kttx -Ao2u6IVxH0gB+xUwzsbxE992g1rWovxKJ4cCAwEAAQJAOt+xnq4At6j02aSPGuCd -DI0IooztdjM2Z5sRopzBoq6ZY1KJpQ/7jS3ufYCNNy7eGs0Lgsge4q/N5/MvOh7J -WQIhAOnUQTN5NhHzYJ8FwdWwY3CtGl2O6FjjUN7qhEGgP37DAiEA0F5UHw0MhoqN -lLqN4NlyM47TzWPhxAddQnOQ4oYUL+0CIHM4umBI7FHwIb56scdAwd92DTXpA6k5 -alJOMZm9A0zNAiB0gYm/ougyAh6P3o+d/XdhZKcA3KxLk2KN2VhVkGqRMQIgGcmJ -NL5qjWhesQgFGM+z4aPJa0NayC9X9qPokBeq7kE= ------END RSA PRIVATE KEY----- diff --git a/pkg/certinfo/test_certs/leaf2.cert.pem b/pkg/certinfo/test_certs/leaf2.cert.pem deleted file mode 100644 index ace5cf2..0000000 --- a/pkg/certinfo/test_certs/leaf2.cert.pem +++ /dev/null @@ -1,69 +0,0 @@ -Certificate: - Data: - Version: 3 (0x2) - Serial Number: 3 (0x3) - Signature Algorithm: sha256WithRSAEncryption - Issuer: C=US, ST=California, O=World Widget Authority, OU=Identity Affairs, CN=worldwidgetauthority.com/emailAddress=nobody@worldwidgetauthority.com - Validity - Not Before: Jul 23 18:56:47 2020 GMT - Not After : Jun 30 07:37:21 2040 GMT - Subject: C=US, ST=Alaska, O=Northern Widgets Corporation, OU=21st Century Department, CN=northernwidgets.com/emailAddress=nobody@northernwidgets.com - Subject Public Key Info: - Public Key Algorithm: dsaEncryption - pub: - 3b:2b:f7:05:e5:70:7d:2b:77:89:65:f5:4f:77:c6: - 59:17:50:a5:76:ee:51:94:43:b1:8e:51:64:35:4f: - 68:42:a3:85:d7:a0:57:a6:55:ef:e5:1a:a0:4f:1a: - 2a:61:8a:27:50:55:cc:5c:6b:4e:b1:67:ee:31:4e: - 26:33:91:1e - P: - 00:fa:9b:a9:7d:fb:27:a0:b5:26:15:2c:16:57:08: - b6:47:8b:ec:64:e2:03:b5:14:b7:c4:56:96:4d:00: - 28:b0:26:25:4c:2f:98:48:fc:df:17:71:29:cf:d2: - 18:8e:99:1e:fe:bb:78:47:8d:cc:42:9d:82:3a:6d: - 62:48:6c:0c:45 - Q: - 00:82:2b:9e:a3:a6:b4:b3:7d:ae:03:c0:df:2d:3c: - 99:e7:e4:72:a4:6f - G: - 5e:97:51:30:39:77:69:d4:d3:2b:c6:31:27:6d:4f: - c4:71:0a:be:0c:88:b9:53:64:05:ef:4a:0d:19:8f: - 71:bc:10:a6:45:dd:a0:55:e1:77:02:28:84:58:09: - da:f9:ad:16:6e:22:3f:13:f7:91:71:44:5b:5f:98: - 8c:92:21:13 - X509v3 extensions: - X509v3 Basic Constraints: - CA:FALSE - Netscape Comment: - This is a test certificate only - X509v3 Subject Key Identifier: - 8E:15:36:91:A2:96:AE:6B:9B:A3:6A:7C:30:7A:ED:1E:77:4E:16:64 - X509v3 Authority Key Identifier: - keyid:7C:0F:26:9D:ED:C8:7A:C0:05:1E:99:A3:5D:A5:9E:8D:A6:A6:96:5E - - Signature Algorithm: sha256WithRSAEncryption - 4f:9c:7e:54:4c:cd:3f:92:0d:d1:04:a6:86:08:a0:4e:b0:5b: - eb:78:02:76:81:d7:05:8e:54:b3:3a:fe:7f:b1:50:7e:30:a3: - 3e:a7:cf:8c:49:23:20:04:0b:dd:e2:92:c5:53:25:b5:87:3e: - 1b:9c:99:d4:b4:19:1b:45:c1:64 ------BEGIN CERTIFICATE----- -MIIDZjCCAxCgAwIBAgIBAzANBgkqhkiG9w0BAQsFADCBsTELMAkGA1UEBhMCVVMx -EzARBgNVBAgMCkNhbGlmb3JuaWExHzAdBgNVBAoMFldvcmxkIFdpZGdldCBBdXRo -b3JpdHkxGTAXBgNVBAsMEElkZW50aXR5IEFmZmFpcnMxITAfBgNVBAMMGHdvcmxk -d2lkZ2V0YXV0aG9yaXR5LmNvbTEuMCwGCSqGSIb3DQEJARYfbm9ib2R5QHdvcmxk -d2lkZ2V0YXV0aG9yaXR5LmNvbTAeFw0yMDA3MjMxODU2NDdaFw00MDA2MzAwNzM3 -MjFaMIGwMQswCQYDVQQGEwJVUzEPMA0GA1UECAwGQWxhc2thMSUwIwYDVQQKDBxO -b3J0aGVybiBXaWRnZXRzIENvcnBvcmF0aW9uMSAwHgYDVQQLDBcyMXN0IENlbnR1 -cnkgRGVwYXJ0bWVudDEcMBoGA1UEAwwTbm9ydGhlcm53aWRnZXRzLmNvbTEpMCcG -CSqGSIb3DQEJARYabm9ib2R5QG5vcnRoZXJud2lkZ2V0cy5jb20wgfAwgagGByqG -SM44BAEwgZwCQQD6m6l9+yegtSYVLBZXCLZHi+xk4gO1FLfEVpZNACiwJiVML5hI -/N8XcSnP0hiOmR7+u3hHjcxCnYI6bWJIbAxFAhUAgiueo6a0s32uA8DfLTyZ5+Ry -pG8CQF6XUTA5d2nU0yvGMSdtT8RxCr4MiLlTZAXvSg0Zj3G8EKZF3aBV4XcCKIRY -Cdr5rRZuIj8T95FxRFtfmIySIRMDQwACQDsr9wXlcH0rd4ll9U93xlkXUKV27lGU -Q7GOUWQ1T2hCo4XXoFemVe/lGqBPGiphiidQVcxca06xZ+4xTiYzkR6jfTB7MAkG -A1UdEwQCMAAwLgYJYIZIAYb4QgENBCEWH1RoaXMgaXMgYSB0ZXN0IGNlcnRpZmlj -YXRlIG9ubHkwHQYDVR0OBBYEFI4VNpGilq5rm6NqfDB67R53ThZkMB8GA1UdIwQY -MBaAFHwPJp3tyHrABR6Zo12lno2mppZeMA0GCSqGSIb3DQEBCwUAA0EAT5x+VEzN -P5IN0QSmhgigTrBb63gCdoHXBY5Uszr+f7FQfjCjPqfPjEkjIAQL3eKSxVMltYc+ -G5yZ1LQZG0XBZA== ------END CERTIFICATE----- diff --git a/pkg/certinfo/test_certs/leaf2.cert.text b/pkg/certinfo/test_certs/leaf2.cert.text deleted file mode 100644 index cc32e57..0000000 --- a/pkg/certinfo/test_certs/leaf2.cert.text +++ /dev/null @@ -1,48 +0,0 @@ -Certificate: - Data: - Version: 3 (0x2) - Serial Number: 3 (0x3) - Signature Algorithm: SHA256-RSA - Issuer: C=US,ST=California,O=World Widget Authority,OU=Identity Affairs,CN=worldwidgetauthority.com,emailAddress=nobody@worldwidgetauthority.com - Validity - Not Before: Jul 23 18:56:47 2020 UTC - Not After : Jun 30 07:37:21 2040 UTC - Subject: C=US,ST=Alaska,O=Northern Widgets Corporation,OU=21st Century Department,CN=northernwidgets.com,emailAddress=nobody@northernwidgets.com - Subject Public Key Info: - Public Key Algorithm: DSA - pub: - 3b:2b:f7:05:e5:70:7d:2b:77:89:65:f5:4f:77:c6: - 59:17:50:a5:76:ee:51:94:43:b1:8e:51:64:35:4f: - 68:42:a3:85:d7:a0:57:a6:55:ef:e5:1a:a0:4f:1a: - 2a:61:8a:27:50:55:cc:5c:6b:4e:b1:67:ee:31:4e: - 26:33:91:1e - P: - fa:9b:a9:7d:fb:27:a0:b5:26:15:2c:16:57:08:b6: - 47:8b:ec:64:e2:03:b5:14:b7:c4:56:96:4d:00:28: - b0:26:25:4c:2f:98:48:fc:df:17:71:29:cf:d2:18: - 8e:99:1e:fe:bb:78:47:8d:cc:42:9d:82:3a:6d:62: - 48:6c:0c:45 - Q: - 82:2b:9e:a3:a6:b4:b3:7d:ae:03:c0:df:2d:3c:99: - e7:e4:72:a4:6f - G: - 5e:97:51:30:39:77:69:d4:d3:2b:c6:31:27:6d:4f: - c4:71:0a:be:0c:88:b9:53:64:05:ef:4a:0d:19:8f: - 71:bc:10:a6:45:dd:a0:55:e1:77:02:28:84:58:09: - da:f9:ad:16:6e:22:3f:13:f7:91:71:44:5b:5f:98: - 8c:92:21:13 - X509v3 extensions: - X509v3 Basic Constraints: - CA:FALSE - Netscape Comment: - This is a test certificate only - X509v3 Subject Key Identifier: - 8E:15:36:91:A2:96:AE:6B:9B:A3:6A:7C:30:7A:ED:1E:77:4E:16:64 - X509v3 Authority Key Identifier: - keyid:7C:0F:26:9D:ED:C8:7A:C0:05:1E:99:A3:5D:A5:9E:8D:A6:A6:96:5E - - Signature Algorithm: SHA256-RSA - 4f:9c:7e:54:4c:cd:3f:92:0d:d1:04:a6:86:08:a0:4e:b0:5b: - eb:78:02:76:81:d7:05:8e:54:b3:3a:fe:7f:b1:50:7e:30:a3: - 3e:a7:cf:8c:49:23:20:04:0b:dd:e2:92:c5:53:25:b5:87:3e: - 1b:9c:99:d4:b4:19:1b:45:c1:64 diff --git a/pkg/certinfo/test_certs/leaf2.cfg b/pkg/certinfo/test_certs/leaf2.cfg deleted file mode 100644 index 3701ada..0000000 --- a/pkg/certinfo/test_certs/leaf2.cfg +++ /dev/null @@ -1,27 +0,0 @@ -# OpenSSL configuration for creating a test certiicate -# which can be signed by a certificate authority -# -[ req ] -default_bits = 512 -default_keyfile = leaf2.key.pem -encrypt_key = no -default_md = sha256 -distinguished_name = req_distinguished_name -req_extensions = req_extensions -prompt = no - -######################################## -# Settings for the certificate request # -######################################## -[ req_distinguished_name ] -C = US -ST = Alaska -L = Barrow -O = Northern Widgets Corporation -OU = 21st Century Department -CN = northernwidgets.com -emailAddress = nobody@northernwidgets.com - -# Extensions to place in a leaf certificate. -# Some may be overridden by the CA configuration -[ req_extensions ] diff --git a/pkg/certinfo/test_certs/leaf2.csr.pem b/pkg/certinfo/test_certs/leaf2.csr.pem deleted file mode 100644 index 48d8f50..0000000 --- a/pkg/certinfo/test_certs/leaf2.csr.pem +++ /dev/null @@ -1,49 +0,0 @@ -Certificate Request: - Data: - Version: 0 (0x0) - Subject: C=US, ST=Alaska, L=Barrow, O=Northern Widgets Corporation, OU=21st Century Department, CN=northernwidgets.com/emailAddress=nobody@northernwidgets.com - Subject Public Key Info: - Public Key Algorithm: dsaEncryption - pub: - 3b:2b:f7:05:e5:70:7d:2b:77:89:65:f5:4f:77:c6: - 59:17:50:a5:76:ee:51:94:43:b1:8e:51:64:35:4f: - 68:42:a3:85:d7:a0:57:a6:55:ef:e5:1a:a0:4f:1a: - 2a:61:8a:27:50:55:cc:5c:6b:4e:b1:67:ee:31:4e: - 26:33:91:1e - P: - 00:fa:9b:a9:7d:fb:27:a0:b5:26:15:2c:16:57:08: - b6:47:8b:ec:64:e2:03:b5:14:b7:c4:56:96:4d:00: - 28:b0:26:25:4c:2f:98:48:fc:df:17:71:29:cf:d2: - 18:8e:99:1e:fe:bb:78:47:8d:cc:42:9d:82:3a:6d: - 62:48:6c:0c:45 - Q: - 00:82:2b:9e:a3:a6:b4:b3:7d:ae:03:c0:df:2d:3c: - 99:e7:e4:72:a4:6f - G: - 5e:97:51:30:39:77:69:d4:d3:2b:c6:31:27:6d:4f: - c4:71:0a:be:0c:88:b9:53:64:05:ef:4a:0d:19:8f: - 71:bc:10:a6:45:dd:a0:55:e1:77:02:28:84:58:09: - da:f9:ad:16:6e:22:3f:13:f7:91:71:44:5b:5f:98: - 8c:92:21:13 - Attributes: - Signature Algorithm: dsa_with_SHA256 - r: - 6e:2f:4d:43:42:fe:ef:dd:d6:5d:82:ce:40:35:0f: - df:f6:03:d0:56 - s: - 2c:af:8d:a5:7e:00:cb:18:c9:eb:03:ee:9b:92:32: - c0:15:73:6a:29 ------BEGIN CERTIFICATE REQUEST----- -MIICDTCCAcsCAQAwgcExCzAJBgNVBAYTAlVTMQ8wDQYDVQQIDAZBbGFza2ExDzAN -BgNVBAcMBkJhcnJvdzElMCMGA1UECgwcTm9ydGhlcm4gV2lkZ2V0cyBDb3Jwb3Jh -dGlvbjEgMB4GA1UECwwXMjFzdCBDZW50dXJ5IERlcGFydG1lbnQxHDAaBgNVBAMM -E25vcnRoZXJud2lkZ2V0cy5jb20xKTAnBgkqhkiG9w0BCQEWGm5vYm9keUBub3J0 -aGVybndpZGdldHMuY29tMIHwMIGoBgcqhkjOOAQBMIGcAkEA+pupffsnoLUmFSwW -Vwi2R4vsZOIDtRS3xFaWTQAosCYlTC+YSPzfF3Epz9IYjpke/rt4R43MQp2COm1i -SGwMRQIVAIIrnqOmtLN9rgPA3y08mefkcqRvAkBel1EwOXdp1NMrxjEnbU/EcQq+ -DIi5U2QF70oNGY9xvBCmRd2gVeF3AiiEWAna+a0WbiI/E/eRcURbX5iMkiETA0MA -AkA7K/cF5XB9K3eJZfVPd8ZZF1Cldu5RlEOxjlFkNU9oQqOF16BXplXv5RqgTxoq -YYonUFXMXGtOsWfuMU4mM5EeoA8wDQYJKoZIhvcNAQkOMQAwCwYJYIZIAWUDBAMC -Ay8AMCwCFG4vTUNC/u/d1l2CzkA1D9/2A9BWAhQsr42lfgDLGMnrA+6bkjLAFXNq -KQ== ------END CERTIFICATE REQUEST----- diff --git a/pkg/certinfo/test_certs/leaf2.csr.text b/pkg/certinfo/test_certs/leaf2.csr.text deleted file mode 100644 index 60aa811..0000000 --- a/pkg/certinfo/test_certs/leaf2.csr.text +++ /dev/null @@ -1,31 +0,0 @@ -Certificate Request: - Data: - Version: 0 (0x0) - Subject: C=US,ST=Alaska,UnknownOID=2.5.4.7,O=Northern Widgets Corporation,OU=21st Century Department,CN=northernwidgets.com,emailAddress=nobody@northernwidgets.com - Subject Public Key Info: - Public Key Algorithm: DSA - pub: - 3b:2b:f7:05:e5:70:7d:2b:77:89:65:f5:4f:77:c6: - 59:17:50:a5:76:ee:51:94:43:b1:8e:51:64:35:4f: - 68:42:a3:85:d7:a0:57:a6:55:ef:e5:1a:a0:4f:1a: - 2a:61:8a:27:50:55:cc:5c:6b:4e:b1:67:ee:31:4e: - 26:33:91:1e - P: - fa:9b:a9:7d:fb:27:a0:b5:26:15:2c:16:57:08:b6: - 47:8b:ec:64:e2:03:b5:14:b7:c4:56:96:4d:00:28: - b0:26:25:4c:2f:98:48:fc:df:17:71:29:cf:d2:18: - 8e:99:1e:fe:bb:78:47:8d:cc:42:9d:82:3a:6d:62: - 48:6c:0c:45 - Q: - 82:2b:9e:a3:a6:b4:b3:7d:ae:03:c0:df:2d:3c:99: - e7:e4:72:a4:6f - G: - 5e:97:51:30:39:77:69:d4:d3:2b:c6:31:27:6d:4f: - c4:71:0a:be:0c:88:b9:53:64:05:ef:4a:0d:19:8f: - 71:bc:10:a6:45:dd:a0:55:e1:77:02:28:84:58:09: - da:f9:ad:16:6e:22:3f:13:f7:91:71:44:5b:5f:98: - 8c:92:21:13 - Signature Algorithm: 0 - 30:2c:02:14:6e:2f:4d:43:42:fe:ef:dd:d6:5d:82:ce:40:35: - 0f:df:f6:03:d0:56:02:14:2c:af:8d:a5:7e:00:cb:18:c9:eb: - 03:ee:9b:92:32:c0:15:73:6a:29 diff --git a/pkg/certinfo/test_certs/leaf2.key.pem b/pkg/certinfo/test_certs/leaf2.key.pem deleted file mode 100644 index 592b8b1..0000000 --- a/pkg/certinfo/test_certs/leaf2.key.pem +++ /dev/null @@ -1,14 +0,0 @@ ------BEGIN DSA PARAMETERS----- -MIGcAkEA+pupffsnoLUmFSwWVwi2R4vsZOIDtRS3xFaWTQAosCYlTC+YSPzfF3Ep -z9IYjpke/rt4R43MQp2COm1iSGwMRQIVAIIrnqOmtLN9rgPA3y08mefkcqRvAkBe -l1EwOXdp1NMrxjEnbU/EcQq+DIi5U2QF70oNGY9xvBCmRd2gVeF3AiiEWAna+a0W -biI/E/eRcURbX5iMkiET ------END DSA PARAMETERS----- ------BEGIN DSA PRIVATE KEY----- -MIH3AgEAAkEA+pupffsnoLUmFSwWVwi2R4vsZOIDtRS3xFaWTQAosCYlTC+YSPzf -F3Epz9IYjpke/rt4R43MQp2COm1iSGwMRQIVAIIrnqOmtLN9rgPA3y08mefkcqRv -AkBel1EwOXdp1NMrxjEnbU/EcQq+DIi5U2QF70oNGY9xvBCmRd2gVeF3AiiEWAna -+a0WbiI/E/eRcURbX5iMkiETAkA7K/cF5XB9K3eJZfVPd8ZZF1Cldu5RlEOxjlFk -NU9oQqOF16BXplXv5RqgTxoqYYonUFXMXGtOsWfuMU4mM5EeAhQnHhEUpjk7tS4T -yHMulcqOTLVxEQ== ------END DSA PRIVATE KEY----- diff --git a/pkg/certinfo/test_certs/leaf3.cert.pem b/pkg/certinfo/test_certs/leaf3.cert.pem deleted file mode 100644 index 65c47b4..0000000 --- a/pkg/certinfo/test_certs/leaf3.cert.pem +++ /dev/null @@ -1,54 +0,0 @@ -Certificate: - Data: - Version: 3 (0x2) - Serial Number: 4 (0x4) - Signature Algorithm: sha256WithRSAEncryption - Issuer: C=US, ST=California, O=World Widget Authority, OU=Identity Affairs, CN=worldwidgetauthority.com/emailAddress=nobody@worldwidgetauthority.com - Validity - Not Before: Jul 23 18:56:47 2020 GMT - Not After : Jun 30 07:37:21 2040 GMT - Subject: C=ZA, ST=Gauteng, O=Sub-Saharan Widgets Corporation, OU=21st Century Department, CN=subsaharanwidgets.com/emailAddress=nobody@subsaharanwidgets.com - Subject Public Key Info: - Public Key Algorithm: id-ecPublicKey - Public-Key: (256 bit) - pub: - 04:84:25:d6:a2:d0:91:8a:0b:9d:7e:82:51:29:29: - fc:61:d7:31:00:d9:31:62:39:1c:0a:72:65:20:94: - d8:c0:28:60:03:44:db:e4:e8:42:2e:a5:e4:36:ff: - 2d:13:12:b7:c2:9c:a8:af:10:2d:af:07:02:ba:08: - 7f:37:ce:1f:bc - ASN1 OID: prime256v1 - NIST CURVE: P-256 - X509v3 extensions: - X509v3 Basic Constraints: - CA:FALSE - Netscape Comment: - This is a test certificate only - X509v3 Subject Key Identifier: - 01:27:7C:0E:46:3D:5B:DA:40:EF:37:5B:B6:B4:4A:30:58:A8:4C:76 - X509v3 Authority Key Identifier: - keyid:7C:0F:26:9D:ED:C8:7A:C0:05:1E:99:A3:5D:A5:9E:8D:A6:A6:96:5E - - Signature Algorithm: sha256WithRSAEncryption - 4c:66:23:bd:22:0d:20:f4:d1:ce:ad:f6:55:ff:9e:6c:e5:fb: - 09:08:e6:2f:ad:a7:db:85:67:a8:d5:c6:1c:83:6c:8f:68:7c: - 8f:3e:f6:bd:d8:17:85:79:5b:6f:e6:57:c8:9e:12:13:e0:9f: - 88:ee:d5:75:66:fc:95:92:f9:7c ------BEGIN CERTIFICATE----- -MIIC1jCCAoCgAwIBAgIBBDANBgkqhkiG9w0BAQsFADCBsTELMAkGA1UEBhMCVVMx -EzARBgNVBAgMCkNhbGlmb3JuaWExHzAdBgNVBAoMFldvcmxkIFdpZGdldCBBdXRo -b3JpdHkxGTAXBgNVBAsMEElkZW50aXR5IEFmZmFpcnMxITAfBgNVBAMMGHdvcmxk -d2lkZ2V0YXV0aG9yaXR5LmNvbTEuMCwGCSqGSIb3DQEJARYfbm9ib2R5QHdvcmxk -d2lkZ2V0YXV0aG9yaXR5LmNvbTAeFw0yMDA3MjMxODU2NDdaFw00MDA2MzAwNzM3 -MjFaMIG4MQswCQYDVQQGEwJaQTEQMA4GA1UECAwHR2F1dGVuZzEoMCYGA1UECgwf -U3ViLVNhaGFyYW4gV2lkZ2V0cyBDb3Jwb3JhdGlvbjEgMB4GA1UECwwXMjFzdCBD -ZW50dXJ5IERlcGFydG1lbnQxHjAcBgNVBAMMFXN1YnNhaGFyYW53aWRnZXRzLmNv -bTErMCkGCSqGSIb3DQEJARYcbm9ib2R5QHN1YnNhaGFyYW53aWRnZXRzLmNvbTBZ -MBMGByqGSM49AgEGCCqGSM49AwEHA0IABIQl1qLQkYoLnX6CUSkp/GHXMQDZMWI5 -HApyZSCU2MAoYANE2+ToQi6l5Db/LRMSt8KcqK8QLa8HAroIfzfOH7yjfTB7MAkG -A1UdEwQCMAAwLgYJYIZIAYb4QgENBCEWH1RoaXMgaXMgYSB0ZXN0IGNlcnRpZmlj -YXRlIG9ubHkwHQYDVR0OBBYEFAEnfA5GPVvaQO83W7a0SjBYqEx2MB8GA1UdIwQY -MBaAFHwPJp3tyHrABR6Zo12lno2mppZeMA0GCSqGSIb3DQEBCwUAA0EATGYjvSIN -IPTRzq32Vf+ebOX7CQjmL62n24VnqNXGHINsj2h8jz72vdgXhXlbb+ZXyJ4SE+Cf -iO7VdWb8lZL5fA== ------END CERTIFICATE----- diff --git a/pkg/certinfo/test_certs/leaf3.cert.text b/pkg/certinfo/test_certs/leaf3.cert.text deleted file mode 100644 index 0d92b8c..0000000 --- a/pkg/certinfo/test_certs/leaf3.cert.text +++ /dev/null @@ -1,37 +0,0 @@ -Certificate: - Data: - Version: 3 (0x2) - Serial Number: 4 (0x4) - Signature Algorithm: SHA256-RSA - Issuer: C=US,ST=California,O=World Widget Authority,OU=Identity Affairs,CN=worldwidgetauthority.com,emailAddress=nobody@worldwidgetauthority.com - Validity - Not Before: Jul 23 18:56:47 2020 UTC - Not After : Jun 30 07:37:21 2040 UTC - Subject: C=ZA,ST=Gauteng,O=Sub-Saharan Widgets Corporation,OU=21st Century Department,CN=subsaharanwidgets.com,emailAddress=nobody@subsaharanwidgets.com - Subject Public Key Info: - Public Key Algorithm: ECDSA - Public-Key: (256 bit) - X: - 84:25:d6:a2:d0:91:8a:0b:9d:7e:82:51:29:29:fc: - 61:d7:31:00:d9:31:62:39:1c:0a:72:65:20:94:d8: - c0:28 - Y: - 60:03:44:db:e4:e8:42:2e:a5:e4:36:ff:2d:13:12: - b7:c2:9c:a8:af:10:2d:af:07:02:ba:08:7f:37:ce: - 1f:bc - Curve: P-256 - X509v3 extensions: - X509v3 Basic Constraints: - CA:FALSE - Netscape Comment: - This is a test certificate only - X509v3 Subject Key Identifier: - 01:27:7C:0E:46:3D:5B:DA:40:EF:37:5B:B6:B4:4A:30:58:A8:4C:76 - X509v3 Authority Key Identifier: - keyid:7C:0F:26:9D:ED:C8:7A:C0:05:1E:99:A3:5D:A5:9E:8D:A6:A6:96:5E - - Signature Algorithm: SHA256-RSA - 4c:66:23:bd:22:0d:20:f4:d1:ce:ad:f6:55:ff:9e:6c:e5:fb: - 09:08:e6:2f:ad:a7:db:85:67:a8:d5:c6:1c:83:6c:8f:68:7c: - 8f:3e:f6:bd:d8:17:85:79:5b:6f:e6:57:c8:9e:12:13:e0:9f: - 88:ee:d5:75:66:fc:95:92:f9:7c diff --git a/pkg/certinfo/test_certs/leaf3.cfg b/pkg/certinfo/test_certs/leaf3.cfg deleted file mode 100644 index 0f022c3..0000000 --- a/pkg/certinfo/test_certs/leaf3.cfg +++ /dev/null @@ -1,27 +0,0 @@ -# OpenSSL configuration for creating a test certiicate -# which can be signed by a certificate authority -# -[ req ] -default_bits = 512 -default_keyfile = leaf3.key.pem -encrypt_key = no -default_md = sha256 -distinguished_name = req_distinguished_name -req_extensions = req_extensions -prompt = no - -######################################## -# Settings for the certificate request # -######################################## -[ req_distinguished_name ] -C = ZA -ST = Gauteng -L = Pretoria -O = Sub-Saharan Widgets Corporation -OU = 21st Century Department -CN = subsaharanwidgets.com -emailAddress = nobody@subsaharanwidgets.com - -# Extensions to place in a leaf certificate. -# Some may be overridden by the CA configuration -[ req_extensions ] diff --git a/pkg/certinfo/test_certs/leaf3.csr.pem b/pkg/certinfo/test_certs/leaf3.csr.pem deleted file mode 100644 index 0c0f664..0000000 --- a/pkg/certinfo/test_certs/leaf3.csr.pem +++ /dev/null @@ -1,32 +0,0 @@ -Certificate Request: - Data: - Version: 0 (0x0) - Subject: C=ZA, ST=Gauteng, L=Pretoria, O=Sub-Saharan Widgets Corporation, OU=21st Century Department, CN=subsaharanwidgets.com/emailAddress=nobody@subsaharanwidgets.com - Subject Public Key Info: - Public Key Algorithm: id-ecPublicKey - Public-Key: (256 bit) - pub: - 04:84:25:d6:a2:d0:91:8a:0b:9d:7e:82:51:29:29: - fc:61:d7:31:00:d9:31:62:39:1c:0a:72:65:20:94: - d8:c0:28:60:03:44:db:e4:e8:42:2e:a5:e4:36:ff: - 2d:13:12:b7:c2:9c:a8:af:10:2d:af:07:02:ba:08: - 7f:37:ce:1f:bc - ASN1 OID: prime256v1 - NIST CURVE: P-256 - Attributes: - Signature Algorithm: ecdsa-with-SHA256 - 30:45:02:20:10:3a:61:7d:86:47:e7:51:ba:66:3f:14:a7:cb: - b7:e9:37:be:b9:22:8e:6b:ff:24:be:91:b9:50:4e:15:35:2d: - 02:21:00:e6:9a:55:21:3f:30:96:96:2a:5c:9f:dc:60:bf:44: - 9d:8d:ef:98:02:d6:17:fe:d0:2c:ae:f4:4a:02:84:a2:61 ------BEGIN CERTIFICATE REQUEST----- -MIIBlzCCAT0CAQAwgcsxCzAJBgNVBAYTAlpBMRAwDgYDVQQIDAdHYXV0ZW5nMREw -DwYDVQQHDAhQcmV0b3JpYTEoMCYGA1UECgwfU3ViLVNhaGFyYW4gV2lkZ2V0cyBD -b3Jwb3JhdGlvbjEgMB4GA1UECwwXMjFzdCBDZW50dXJ5IERlcGFydG1lbnQxHjAc -BgNVBAMMFXN1YnNhaGFyYW53aWRnZXRzLmNvbTErMCkGCSqGSIb3DQEJARYcbm9i -b2R5QHN1YnNhaGFyYW53aWRnZXRzLmNvbTBZMBMGByqGSM49AgEGCCqGSM49AwEH -A0IABIQl1qLQkYoLnX6CUSkp/GHXMQDZMWI5HApyZSCU2MAoYANE2+ToQi6l5Db/ -LRMSt8KcqK8QLa8HAroIfzfOH7ygDzANBgkqhkiG9w0BCQ4xADAKBggqhkjOPQQD -AgNIADBFAiAQOmF9hkfnUbpmPxSny7fpN765Io5r/yS+kblQThU1LQIhAOaaVSE/ -MJaWKlyf3GC/RJ2N75gC1hf+0Cyu9EoChKJh ------END CERTIFICATE REQUEST----- diff --git a/pkg/certinfo/test_certs/leaf3.csr.text b/pkg/certinfo/test_certs/leaf3.csr.text deleted file mode 100644 index 687a494..0000000 --- a/pkg/certinfo/test_certs/leaf3.csr.text +++ /dev/null @@ -1,21 +0,0 @@ -Certificate Request: - Data: - Version: 0 (0x0) - Subject: C=ZA,ST=Gauteng,UnknownOID=2.5.4.7,O=Sub-Saharan Widgets Corporation,OU=21st Century Department,CN=subsaharanwidgets.com,emailAddress=nobody@subsaharanwidgets.com - Subject Public Key Info: - Public Key Algorithm: ECDSA - Public-Key: (256 bit) - X: - 84:25:d6:a2:d0:91:8a:0b:9d:7e:82:51:29:29:fc: - 61:d7:31:00:d9:31:62:39:1c:0a:72:65:20:94:d8: - c0:28 - Y: - 60:03:44:db:e4:e8:42:2e:a5:e4:36:ff:2d:13:12: - b7:c2:9c:a8:af:10:2d:af:07:02:ba:08:7f:37:ce: - 1f:bc - Curve: P-256 - Signature Algorithm: ECDSA-SHA256 - 30:45:02:20:10:3a:61:7d:86:47:e7:51:ba:66:3f:14:a7:cb: - b7:e9:37:be:b9:22:8e:6b:ff:24:be:91:b9:50:4e:15:35:2d: - 02:21:00:e6:9a:55:21:3f:30:96:96:2a:5c:9f:dc:60:bf:44: - 9d:8d:ef:98:02:d6:17:fe:d0:2c:ae:f4:4a:02:84:a2:61 diff --git a/pkg/certinfo/test_certs/leaf3.key.pem b/pkg/certinfo/test_certs/leaf3.key.pem deleted file mode 100644 index 72eb64a..0000000 --- a/pkg/certinfo/test_certs/leaf3.key.pem +++ /dev/null @@ -1,8 +0,0 @@ ------BEGIN EC PARAMETERS----- -BggqhkjOPQMBBw== ------END EC PARAMETERS----- ------BEGIN EC PRIVATE KEY----- -MHcCAQEEIAIq6C/1Aeou9xIIEA54c5eSqmumMWBU3pA2Mk1BSRusoAoGCCqGSM49 -AwEHoUQDQgAEhCXWotCRigudfoJRKSn8YdcxANkxYjkcCnJlIJTYwChgA0Tb5OhC -LqXkNv8tExK3wpyorxAtrwcCugh/N84fvA== ------END EC PRIVATE KEY----- diff --git a/pkg/certinfo/test_certs/make-certs.sh b/pkg/certinfo/test_certs/make-certs.sh deleted file mode 100644 index 33c32c0..0000000 --- a/pkg/certinfo/test_certs/make-certs.sh +++ /dev/null @@ -1,57 +0,0 @@ -#!/usr/bin/env bash - -set -e - -# Certificate request files -ROOT1_CSR="root1.csr.pem" -LEAF1_CSR="leaf1.csr.pem" -LEAF2_CSR="leaf2.csr.pem" -LEAF3_CSR="leaf3.csr.pem" - -# Key files generated by 'make-keys.sh' -ROOT1_KEY="root1.key.pem" -LEAF1_KEY="leaf1.key.pem" -LEAF2_KEY="leaf2.key.pem" -LEAF3_KEY="leaf3.key.pem" - -# Certificate files -ROOT1_CERT="root1.cert.pem" -LEAF1_CERT="leaf1.cert.pem" -LEAF2_CERT="leaf2.cert.pem" -LEAF3_CERT="leaf3.cert.pem" - -# OpenSSL configuration files -ROOT1_CFG="root1.cfg" -LEAF1_CFG="leaf1.cfg" -LEAF2_CFG="leaf2.cfg" -LEAF3_CFG="leaf3.cfg" - -# Temporary files for using the OpenSSL 'ca' subcommand. -# These names must match the OpenSSL configuration file. -CA_TMP="./tmp" -CA_SERIAL=${CA_TMP}/serial.txt -CA_DB=${CA_TMP}/certdb.txt - - -# Create the test Certificate Authority -openssl req -new -config ${ROOT1_CFG} -key ${ROOT1_KEY} -out ${ROOT1_CSR} -rm -rf ${CA_TMP} -mkdir ${CA_TMP} -echo "01" > ${CA_SERIAL} -touch ${CA_DB} -openssl ca -selfsign -batch -config ${ROOT1_CFG} -keyfile ${ROOT1_KEY} -extensions ca_root_extensions -in ${ROOT1_CSR} -out ${ROOT1_CERT} - -# Create a leaf RSA certificate -openssl req -new -config ${LEAF1_CFG} -key ${LEAF1_KEY} -out ${LEAF1_CSR} -openssl ca -batch -config ${ROOT1_CFG} -keyfile ${ROOT1_KEY} -cert ${ROOT1_CERT} -extensions ca_leaf_extensions -in ${LEAF1_CSR} -out ${LEAF1_CERT} - -# Create a leaf DSA certificate -openssl req -new -config ${LEAF2_CFG} -key ${LEAF2_KEY} -out ${LEAF2_CSR} -openssl ca -batch -config ${ROOT1_CFG} -keyfile ${ROOT1_KEY} -cert ${ROOT1_CERT} -extensions ca_leaf_extensions -in ${LEAF2_CSR} -out ${LEAF2_CERT} - -# Create a leaf ECDSA certificate -openssl req -new -config ${LEAF3_CFG} -key ${LEAF3_KEY} -out ${LEAF3_CSR} -openssl ca -batch -config ${ROOT1_CFG} -keyfile ${ROOT1_KEY} -cert ${ROOT1_CERT} -extensions ca_leaf_extensions -in ${LEAF3_CSR} -out ${LEAF3_CERT} - -# Clean up all the garbage that 'openssl ca' left behind -rm -rf ${CA_TMP} diff --git a/pkg/certinfo/test_certs/new-keys.sh b/pkg/certinfo/test_certs/new-keys.sh deleted file mode 100644 index ec0378e..0000000 --- a/pkg/certinfo/test_certs/new-keys.sh +++ /dev/null @@ -1,14 +0,0 @@ -#!/usr/bin/env bash - -ROOT1_KEY="root1.key.pem" -LEAF1_KEY="leaf1.key.pem" -LEAF2_KEY="leaf2.key.pem" -LEAF3_KEY="leaf3.key.pem" -LEAF4_KEY="leaf4.key.pem" - -# We don't use the keys so make them small -openssl genrsa -out ${ROOT1_KEY} 512 -openssl genrsa -out ${LEAF1_KEY} 512 -openssl dsaparam -genkey -out ${LEAF2_KEY} -outform PEM 512 -openssl ecparam -genkey -out ${LEAF3_KEY} -outform PEM -name secp256k1 -openssl ecparam -genkey -out ${LEAF4_KEY} -outform PEM -name secp256k1 -param_enc explicit diff --git a/pkg/certinfo/test_certs/root1.cert.pem b/pkg/certinfo/test_certs/root1.cert.pem deleted file mode 100644 index 8d593ba..0000000 --- a/pkg/certinfo/test_certs/root1.cert.pem +++ /dev/null @@ -1,53 +0,0 @@ -Certificate: - Data: - Version: 3 (0x2) - Serial Number: 1 (0x1) - Signature Algorithm: sha256WithRSAEncryption - Issuer: C=US, ST=California, O=World Widget Authority, OU=Identity Affairs, CN=worldwidgetauthority.com/emailAddress=nobody@worldwidgetauthority.com - Validity - Not Before: Jul 23 18:56:47 2020 GMT - Not After : Jun 30 07:37:21 2040 GMT - Subject: C=US, ST=California, O=World Widget Authority, OU=Identity Affairs, CN=worldwidgetauthority.com/emailAddress=nobody@worldwidgetauthority.com - Subject Public Key Info: - Public Key Algorithm: rsaEncryption - Public-Key: (512 bit) - Modulus: - 00:b5:d6:60:b9:f9:31:09:fe:97:34:c4:f7:6b:7b: - 06:01:f4:8b:fe:1a:e0:65:8f:fd:30:c0:82:30:3c: - 61:f7:c2:1d:98:7c:3a:ed:9f:b4:5e:8f:15:ce:90: - 8b:45:de:db:23:0e:aa:4d:95:e9:af:3b:79:26:a5: - ce:71:8a:3a:bd - Exponent: 65537 (0x10001) - X509v3 extensions: - X509v3 Basic Constraints: - CA:TRUE - Netscape Comment: - This is a test certificate only - X509v3 Subject Key Identifier: - 7C:0F:26:9D:ED:C8:7A:C0:05:1E:99:A3:5D:A5:9E:8D:A6:A6:96:5E - X509v3 Authority Key Identifier: - keyid:7C:0F:26:9D:ED:C8:7A:C0:05:1E:99:A3:5D:A5:9E:8D:A6:A6:96:5E - - Signature Algorithm: sha256WithRSAEncryption - 60:bd:b4:c4:9a:09:0d:7a:d7:b4:6b:e2:85:3b:78:0b:97:de: - 57:47:34:19:37:2a:82:1a:79:c3:3f:0b:71:46:fe:9b:db:ce: - c7:41:42:2b:17:22:b4:d5:f1:fc:18:c3:31:af:c9:c4:4d:2d: - 92:16:f7:a6:6d:4f:5d:e0:8c:83 ------BEGIN CERTIFICATE----- -MIIC1jCCAoCgAwIBAgIBATANBgkqhkiG9w0BAQsFADCBsTELMAkGA1UEBhMCVVMx -EzARBgNVBAgMCkNhbGlmb3JuaWExHzAdBgNVBAoMFldvcmxkIFdpZGdldCBBdXRo -b3JpdHkxGTAXBgNVBAsMEElkZW50aXR5IEFmZmFpcnMxITAfBgNVBAMMGHdvcmxk -d2lkZ2V0YXV0aG9yaXR5LmNvbTEuMCwGCSqGSIb3DQEJARYfbm9ib2R5QHdvcmxk -d2lkZ2V0YXV0aG9yaXR5LmNvbTAeFw0yMDA3MjMxODU2NDdaFw00MDA2MzAwNzM3 -MjFaMIGxMQswCQYDVQQGEwJVUzETMBEGA1UECAwKQ2FsaWZvcm5pYTEfMB0GA1UE -CgwWV29ybGQgV2lkZ2V0IEF1dGhvcml0eTEZMBcGA1UECwwQSWRlbnRpdHkgQWZm -YWlyczEhMB8GA1UEAwwYd29ybGR3aWRnZXRhdXRob3JpdHkuY29tMS4wLAYJKoZI -hvcNAQkBFh9ub2JvZHlAd29ybGR3aWRnZXRhdXRob3JpdHkuY29tMFwwDQYJKoZI -hvcNAQEBBQADSwAwSAJBALXWYLn5MQn+lzTE92t7BgH0i/4a4GWP/TDAgjA8YffC -HZh8Ou2ftF6PFc6Qi0Xe2yMOqk2V6a87eSalznGKOr0CAwEAAaOBgDB+MAwGA1Ud -EwQFMAMBAf8wLgYJYIZIAYb4QgENBCEWH1RoaXMgaXMgYSB0ZXN0IGNlcnRpZmlj -YXRlIG9ubHkwHQYDVR0OBBYEFHwPJp3tyHrABR6Zo12lno2mppZeMB8GA1UdIwQY -MBaAFHwPJp3tyHrABR6Zo12lno2mppZeMA0GCSqGSIb3DQEBCwUAA0EAYL20xJoJ -DXrXtGvihTt4C5feV0c0GTcqghp5wz8LcUb+m9vOx0FCKxcitNXx/BjDMa/JxE0t -khb3pm1PXeCMgw== ------END CERTIFICATE----- diff --git a/pkg/certinfo/test_certs/root1.cert.text b/pkg/certinfo/test_certs/root1.cert.text deleted file mode 100644 index 97d3ea2..0000000 --- a/pkg/certinfo/test_certs/root1.cert.text +++ /dev/null @@ -1,35 +0,0 @@ -Certificate: - Data: - Version: 3 (0x2) - Serial Number: 1 (0x1) - Signature Algorithm: SHA256-RSA - Issuer: C=US,ST=California,O=World Widget Authority,OU=Identity Affairs,CN=worldwidgetauthority.com,emailAddress=nobody@worldwidgetauthority.com - Validity - Not Before: Jul 23 18:56:47 2020 UTC - Not After : Jun 30 07:37:21 2040 UTC - Subject: C=US,ST=California,O=World Widget Authority,OU=Identity Affairs,CN=worldwidgetauthority.com,emailAddress=nobody@worldwidgetauthority.com - Subject Public Key Info: - Public Key Algorithm: RSA - Public-Key: (512 bit) - Modulus: - b5:d6:60:b9:f9:31:09:fe:97:34:c4:f7:6b:7b:06: - 01:f4:8b:fe:1a:e0:65:8f:fd:30:c0:82:30:3c:61: - f7:c2:1d:98:7c:3a:ed:9f:b4:5e:8f:15:ce:90:8b: - 45:de:db:23:0e:aa:4d:95:e9:af:3b:79:26:a5:ce: - 71:8a:3a:bd - Exponent: 65537 (0x10001) - X509v3 extensions: - X509v3 Basic Constraints: - CA:TRUE - Netscape Comment: - This is a test certificate only - X509v3 Subject Key Identifier: - 7C:0F:26:9D:ED:C8:7A:C0:05:1E:99:A3:5D:A5:9E:8D:A6:A6:96:5E - X509v3 Authority Key Identifier: - keyid:7C:0F:26:9D:ED:C8:7A:C0:05:1E:99:A3:5D:A5:9E:8D:A6:A6:96:5E - - Signature Algorithm: SHA256-RSA - 60:bd:b4:c4:9a:09:0d:7a:d7:b4:6b:e2:85:3b:78:0b:97:de: - 57:47:34:19:37:2a:82:1a:79:c3:3f:0b:71:46:fe:9b:db:ce: - c7:41:42:2b:17:22:b4:d5:f1:fc:18:c3:31:af:c9:c4:4d:2d: - 92:16:f7:a6:6d:4f:5d:e0:8c:83 diff --git a/pkg/certinfo/test_certs/root1.cfg b/pkg/certinfo/test_certs/root1.cfg deleted file mode 100644 index c0d02a5..0000000 --- a/pkg/certinfo/test_certs/root1.cfg +++ /dev/null @@ -1,69 +0,0 @@ -# OpenSSL configuration for creating a test CA certiicate -# and using it to sign other test certificates -# -[ req ] -default_bits = 512 -default_keyfile = root1.key.pem -encrypt_key = no -default_md = sha256 -distinguished_name = req_distinguished_name -prompt = no - -########################################### -# Settings for the CA certificate request # -########################################### -[ req_distinguished_name ] -C = US -ST = California -L = San Francisco -O = World Widget Authority -OU = Identity Affairs -CN = worldwidgetauthority.com -emailAddress = nobody@worldwidgetauthority.com - -########################################### -# Settings for the CA certificate signing # -########################################### -[ ca ] -default_ca = ca_config - -[ ca_config ] -dir = ./tmp # Temp file directory which will be deleted -certs = $dir -new_certs_dir = $dir -crl_dir = $dir -serial = $dir/serial.txt -database = $dir/certdb.txt -crlnumber = $dir/crlnumber.txt -crl = $dir/crl.pem -unique_subject = yes -policy = ca_policy_match -default_md = default -name_opt = ca_default -cert_opt = ca_default -x509_extensions = ca_leaf_extensions -default_startdate = 200723185647Z # 23-July-2020 18:56:47 -default_enddate = 400630073721Z # 30-June-2040 07:37:21 - -[ ca_policy_match ] -countryName = optional -stateOrProvinceName = optional -organizationName = optional -organizationalUnitName = optional -commonName = supplied -emailAddress = optional - -# Extensions to place in a CA certificate -[ ca_root_extensions ] -basicConstraints = CA:true -nsComment = "This is a test certificate only" -subjectKeyIdentifier = hash -authorityKeyIdentifier = keyid,issuer - -# Extensions to place in a leaf (non-CA) certificate -[ ca_leaf_extensions ] -basicConstraints = CA:false -nsComment = "This is a test certificate only" -subjectKeyIdentifier = hash -authorityKeyIdentifier = keyid,issuer - diff --git a/pkg/certinfo/test_certs/root1.csr.pem b/pkg/certinfo/test_certs/root1.csr.pem deleted file mode 100644 index 16e209c..0000000 --- a/pkg/certinfo/test_certs/root1.csr.pem +++ /dev/null @@ -1,32 +0,0 @@ -Certificate Request: - Data: - Version: 0 (0x0) - Subject: C=US, ST=California, L=San Francisco, O=World Widget Authority, OU=Identity Affairs, CN=worldwidgetauthority.com/emailAddress=nobody@worldwidgetauthority.com - Subject Public Key Info: - Public Key Algorithm: rsaEncryption - Public-Key: (512 bit) - Modulus: - 00:b5:d6:60:b9:f9:31:09:fe:97:34:c4:f7:6b:7b: - 06:01:f4:8b:fe:1a:e0:65:8f:fd:30:c0:82:30:3c: - 61:f7:c2:1d:98:7c:3a:ed:9f:b4:5e:8f:15:ce:90: - 8b:45:de:db:23:0e:aa:4d:95:e9:af:3b:79:26:a5: - ce:71:8a:3a:bd - Exponent: 65537 (0x10001) - Attributes: - a0:00 - Signature Algorithm: sha256WithRSAEncryption - 36:f9:ef:78:db:36:5a:f3:82:82:69:5e:f1:4a:61:d1:01:1e: - d8:92:ed:41:f5:8b:06:6b:6d:69:da:fe:e0:6e:a1:17:c8:7a: - 4a:eb:c4:a2:82:aa:65:57:88:0c:98:9f:2d:d5:a5:66:fb:38: - b6:03:d0:2e:8a:11:01:ea:0c:75 ------BEGIN CERTIFICATE REQUEST----- -MIIBhTCCAS8CAQAwgckxCzAJBgNVBAYTAlVTMRMwEQYDVQQIDApDYWxpZm9ybmlh -MRYwFAYDVQQHDA1TYW4gRnJhbmNpc2NvMR8wHQYDVQQKDBZXb3JsZCBXaWRnZXQg -QXV0aG9yaXR5MRkwFwYDVQQLDBBJZGVudGl0eSBBZmZhaXJzMSEwHwYDVQQDDBh3 -b3JsZHdpZGdldGF1dGhvcml0eS5jb20xLjAsBgkqhkiG9w0BCQEWH25vYm9keUB3 -b3JsZHdpZGdldGF1dGhvcml0eS5jb20wXDANBgkqhkiG9w0BAQEFAANLADBIAkEA -tdZgufkxCf6XNMT3a3sGAfSL/hrgZY/9MMCCMDxh98IdmHw67Z+0Xo8VzpCLRd7b -Iw6qTZXprzt5JqXOcYo6vQIDAQABoAAwDQYJKoZIhvcNAQELBQADQQA2+e942zZa -84KCaV7xSmHRAR7Yku1B9YsGa21p2v7gbqEXyHpK68SigqplV4gMmJ8t1aVm+zi2 -A9AuihEB6gx1 ------END CERTIFICATE REQUEST----- diff --git a/pkg/certinfo/test_certs/root1.csr.text b/pkg/certinfo/test_certs/root1.csr.text deleted file mode 100644 index 9747e19..0000000 --- a/pkg/certinfo/test_certs/root1.csr.text +++ /dev/null @@ -1,19 +0,0 @@ -Certificate Request: - Data: - Version: 0 (0x0) - Subject: C=US,ST=California,UnknownOID=2.5.4.7,O=World Widget Authority,OU=Identity Affairs,CN=worldwidgetauthority.com,emailAddress=nobody@worldwidgetauthority.com - Subject Public Key Info: - Public Key Algorithm: RSA - Public-Key: (512 bit) - Modulus: - b5:d6:60:b9:f9:31:09:fe:97:34:c4:f7:6b:7b:06: - 01:f4:8b:fe:1a:e0:65:8f:fd:30:c0:82:30:3c:61: - f7:c2:1d:98:7c:3a:ed:9f:b4:5e:8f:15:ce:90:8b: - 45:de:db:23:0e:aa:4d:95:e9:af:3b:79:26:a5:ce: - 71:8a:3a:bd - Exponent: 65537 (0x10001) - Signature Algorithm: SHA256-RSA - 36:f9:ef:78:db:36:5a:f3:82:82:69:5e:f1:4a:61:d1:01:1e: - d8:92:ed:41:f5:8b:06:6b:6d:69:da:fe:e0:6e:a1:17:c8:7a: - 4a:eb:c4:a2:82:aa:65:57:88:0c:98:9f:2d:d5:a5:66:fb:38: - b6:03:d0:2e:8a:11:01:ea:0c:75 diff --git a/pkg/certinfo/test_certs/root1.key.pem b/pkg/certinfo/test_certs/root1.key.pem deleted file mode 100644 index 5b0b55c..0000000 --- a/pkg/certinfo/test_certs/root1.key.pem +++ /dev/null @@ -1,9 +0,0 @@ ------BEGIN RSA PRIVATE KEY----- -MIIBPAIBAAJBALXWYLn5MQn+lzTE92t7BgH0i/4a4GWP/TDAgjA8YffCHZh8Ou2f -tF6PFc6Qi0Xe2yMOqk2V6a87eSalznGKOr0CAwEAAQJBAKHh9gD7OU5FioXfKMpW -HvYUqaESel3Qv77sU7rvtuQjOhjsqYSGoy7NLyN7KF1YDudWFSc3ODlKCfNGAycL -hmECIQDr04YLyWJq/YP5j6IVX6fgpKaokkrl5ATr9dU/3l8S2QIhAMVkhoztfTQV -a32dVxcVWZMJ+7dhfitEhL067JDsCvCFAiBgClry/ebo76oJvqch0T0LTcLZsAp0 -qXIfrpRUWHmbsQIhAJ45v1T2BXebd+dgGPBq77tatEhBwPYD3Xcl5X0hiH9VAiEA -k0CmvueX4YumnYeTnCfbgVwELrulIjXrJ1taJzPwHC0= ------END RSA PRIVATE KEY----- diff --git a/pkg/discover/k8s/k8s.go b/pkg/discover/k8s/k8s.go deleted file mode 100644 index a456348..0000000 --- a/pkg/discover/k8s/k8s.go +++ /dev/null @@ -1,208 +0,0 @@ -// Package k8s provides pod discovery for Kubernetes. -package k8s - -import ( - "context" - "fmt" - "log" - "path/filepath" - "strconv" - - "github.com/hashicorp/go-multierror" - "github.com/mitchellh/go-homedir" - corev1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/client-go/kubernetes" - "k8s.io/client-go/rest" - "k8s.io/client-go/tools/clientcmd" - - // Register all known auth mechanisms since we might be authenticating - // from anywhere. - _ "k8s.io/client-go/plugin/pkg/client/auth" -) - -const ( - // AnnotationKeyPort is the annotation name of the field that specifies - // the port name or number to append to the address. - AnnotationKeyPort = "consul.hashicorp.com/auto-join-port" -) - -type Provider struct{} - -func (p *Provider) Help() string { - return `Kubernetes (K8S): - provider: "k8s" - kubeconfig: Path to the kubeconfig file. - namespace: Namespace to search for pods (defaults to "default"). - label_selector: Label selector value to filter pods. - field_selector: Field selector value to filter pods. - host_network: "true" if pod host IP and ports should be used. - The kubeconfig file value will be searched in the following locations: - 1. Use path from "kubeconfig" option if provided. - 2. Use path from KUBECONFIG environment variable. - 3. Use default path of $HOME/.kube/config - By default, the Pod IP is used to join. The "host_network" option may - be set to use the Host IP. No port is used by default. Pods may set - an annotation 'hashicorp/consul-auto-join-port' to a named port or - an integer value. If the value matches a named port, that port will - be used to join. - Note that if "host_network" is set to true, then only pods that have - a HostIP available will be selected. If a port annotation exists, then - the port must be exposed via a HostPort as well, otherwise the pod will - be ignored. -` -} - -func (p *Provider) Addrs(args map[string]string, l *log.Logger) ([]string, error) { - if args["provider"] != "k8s" { - return nil, fmt.Errorf("discover-k8s: invalid provider " + args["provider"]) - } - - // Get the configuration. This can come from multiple sources. We first - // try kubeconfig it is set directly, then we fall back to in-cluster - // auth. Finally, we try the default kubeconfig path. - kubeconfig := args["kubeconfig"] - if kubeconfig == "" { - // If kubeconfig is empty, let's first try the default directory. - // This is must faster than trying in-cluster auth so we try this - // first. - dir, err := homedir.Dir() - if err != nil { - return nil, fmt.Errorf("discover-k8s: error retrieving home directory: %s", err) - } - kubeconfig = filepath.Join(dir, ".kube", "config") - } - - // First try to get the configuration from the kubeconfig value - config, configErr := clientcmd.BuildConfigFromFlags("", kubeconfig) - if configErr != nil { - configErr = fmt.Errorf("discover-k8s: error loading kubeconfig: %s", configErr) - - // kubeconfig failed, fall back and try in-cluster config. We do - // this as the fallback since this makes network connections and - // is much slower to fail. - var err error - config, err = rest.InClusterConfig() - if err != nil { - return nil, multierror.Append(configErr, fmt.Errorf( - "discover-k8s: error loading in-cluster config: %s", err)) - } - } - - // Initialize the clientset - clientset, err := kubernetes.NewForConfig(config) - if err != nil { - return nil, fmt.Errorf("discover-k8s: error initializing k8s client: %s", err) - } - - namespace := args["namespace"] - if namespace == "" { - namespace = "default" - } - - // List all the pods based on the filters we requested - pods, err := clientset.CoreV1().Pods(namespace).List( - context.Background(), - metav1.ListOptions{ - LabelSelector: args["label_selector"], - FieldSelector: args["field_selector"], - }) - if err != nil { - return nil, fmt.Errorf("discover-k8s: error listing pods: %s", err) - } - - return PodAddrs(pods, args, l) -} - -// PodAddrs extracts the addresses from a list of pods. -// -// This is a separate method so that we can unit test this without having -// to setup complicated K8S cluster scenarios. It shouldn't generally be -// called externally. -func PodAddrs(pods *corev1.PodList, args map[string]string, l *log.Logger) ([]string, error) { - hostNetwork := false - if v := args["host_network"]; v != "" { - var err error - hostNetwork, err = strconv.ParseBool(v) - if err != nil { - return nil, fmt.Errorf("discover-k8s: host_network must be boolean value: %s", err) - } - } - - var addrs []string -// PodLoop: - for _, pod := range pods.Items { - if pod.Status.Phase != corev1.PodRunning { - l.Printf("[DEBUG] discover-k8s: ignoring pod %q, not running: %q", - pod.Name, pod.Status.Phase) - continue - } - - // If there is a Ready condition available, we need that to be true. - // If no ready condition is set, then we accept this pod regardless. - for _, condition := range pod.Status.Conditions { - if condition.Type == corev1.PodReady && condition.Status != corev1.ConditionTrue { - // l.Printf("[DEBUG] discover-k8s: ignoring pod %q, not ready state", pod.Name) - // continue PodLoop - l.Printf("[DEBUG] discover-k8s: pod %q, not ready state", pod.Name) - } - } - - // Get the IP address that we will join. - addr := pod.Status.PodIP - if hostNetwork { - addr = pod.Status.HostIP - } - if addr == "" { - // This can be empty according to the API docs, so we protect that. - l.Printf("[DEBUG] discover-k8s: ignoring pod %q, requested IP is empty", pod.Name) - continue - } - - // We only use the port if it is specified as an annotation. The - // annotation value can be a name or a number. - if v := pod.Annotations[AnnotationKeyPort]; v != "" { - port, err := podPort(&pod, v, hostNetwork) - if err != nil { - l.Printf("[DEBUG] discover-k8s: ignoring pod %q, error retrieving port: %s", - pod.Name, err) - continue - } - - addr = fmt.Sprintf("%s:%d", addr, port) - } - - addrs = append(addrs, addr) - } - - return addrs, nil -} - -// podPort extracts the proper port for the address from the given pod -// for a non-empty annotation. -// -// Pre-condition: annotation is non-empty -func podPort(pod *corev1.Pod, annotation string, host bool) (int32, error) { - // First look for a matching port matching the value of the annotation. - for _, container := range pod.Spec.Containers { - for _, portDef := range container.Ports { - if portDef.Name == annotation { - if host { - // It is possible for HostPort to be zero, if that is the - // case then we ignore this port. - if portDef.HostPort == 0 { - continue - } - - return portDef.HostPort, nil - } - - return portDef.ContainerPort, nil - } - } - } - - // Otherwise assume that the port is a numeric value. - v, err := strconv.ParseInt(annotation, 0, 32) - return int32(v), err -} \ No newline at end of file diff --git a/pkg/event/report.go b/pkg/event/report.go deleted file mode 100644 index 4f5eb17..0000000 --- a/pkg/event/report.go +++ /dev/null @@ -1,62 +0,0 @@ -package event - -import ( - "encoding/json" - "gitlab.oneitfarm.com/bifrost/go-toolbox/rediscluster" - "time" - - logger "gitlab.oneitfarm.com/bifrost/cilog/v2" -) - -const ( - MSP_EVENT = "msp:event_msg" - EVENT_TYPE_FUSE = "fuse" - EVENT_TYPE_RATE_LIMIT = "ratelimit" - EVENT_TYPE_HEARTBEAT = "heartbeat" - EVENT_TYPE_RESOURCE = "resource" -) - -type EventReport struct { - redisCluster *rediscluster.Cluster -} - -type MSPEvent struct { - EventType string `json:"event_type"` // fuse, ratelimit, heartbeat,resource - EventTime int64 `json:"event_time"` - EventBody interface{} `json:"event_body"` -} - -var _eventReport *EventReport - -func InitEventClient(rc *rediscluster.Cluster) *EventReport { - if _eventReport == nil { - _eventReport = &EventReport{redisCluster: rc} - } - return _eventReport -} - -func Client() *EventReport { - return _eventReport -} - -func (ev *EventReport) Report(evType string, eventBody interface{}) { - msg := MSPEvent{ - EventType: evType, - EventTime: time.Now().UnixNano() / 1e6, - EventBody: eventBody, - } - b, err := json.Marshal(msg) - if err != nil { - logger.Errorf("event report json.Marshal err", err) - return - } - // 兼容logproxy - _, err = ev.redisCluster.Do("LPUSH", MSP_EVENT, b) - - if err != nil { - logger.Errorf("heartbeat report redis rpush", err) - // return - } - //logger.Warnf(string(b)) - return -} diff --git a/pkg/influxdb/client.go b/pkg/influxdb/client.go index 617deeb..1039119 100644 --- a/pkg/influxdb/client.go +++ b/pkg/influxdb/client.go @@ -1,9 +1,9 @@ package influxdb import ( - cilog "gitlab.oneitfarm.com/bifrost/cilog/v2" - _ "gitlab.oneitfarm.com/bifrost/influxdata/influxdb1-client" // this is important because of the bug in go mod - client "gitlab.oneitfarm.com/bifrost/influxdata/influxdb1-client/v2" + _ "github.com/ztalab/ZACA/pkg/influxdb/influxdb-client" // this is important because of the bug in go mod + client "github.com/ztalab/ZACA/pkg/influxdb/influxdb-client/v2" + "github.com/ztalab/ZACA/pkg/logger" ) // UDPClient UDP Client @@ -18,7 +18,7 @@ func (p *UDPClient) newUDPV1Client() *UDPClient { Addr: p.Conf.UDPAddress, }) if err != nil { - cilog.Errorf("InfluxDBUDPClient err: %v", err) + logger.Errorf("InfluxDBUDPClient err: %v", err) } p.client = udpClient return p diff --git a/pkg/influxdb/config.go b/pkg/influxdb/config.go index 0ce0ad8..34b2133 100644 --- a/pkg/influxdb/config.go +++ b/pkg/influxdb/config.go @@ -1,13 +1,13 @@ package influxdb -// Config 配置文件 +// Config configuration type Config struct { - Enable bool `yaml:"enable"` //服务开关 + Enable bool `yaml:"enable"` //Service switch Address string `yaml:"address"` Port int `yaml:"port"` - UDPAddress string `yaml:"udp_address"` //influxdb 数据库的udp地址,ip:port - Database string `yaml:"database"` //数据库名称 - Precision string `yaml:"precision"` //精度 n, u, ms, s, m or h + UDPAddress string `yaml:"udp_address"` //influxdb UDP address of the database,ip:port + Database string `yaml:"database"` //Database name + Precision string `yaml:"precision"` //Accuracy n, u, ms, s, m or h UserName string `yaml:"username"` Password string `yaml:"password"` MaxIdleConns int `yaml:"max-idle-conns"` @@ -15,14 +15,14 @@ type Config struct { IdleConnTimeout int `yaml:"idle-conn-timeout"` } -// CustomConfig 自定义配置 +// CustomConfig Custom configuration type CustomConfig struct { - Enabled bool `yaml:"enabled"` //服务开关 + Enabled bool `yaml:"enabled"` //Service switch Address string `yaml:"address"` Port int `yaml:"port"` - UDPAddress string `yaml:"udp_address"` //influxdb 数据库的udp地址,ip:port - Database string `yaml:"database"` //数据库名称 - Precision string `yaml:"precision"` //精度 n, u, ms, s, m or h + UDPAddress string `yaml:"udp_address"` //influxdb UDP address of the database,ip:port + Database string `yaml:"database"` //Database name + Precision string `yaml:"precision"` //Accuracy n, u, ms, s, m or h UserName string `yaml:"username"` Password string `yaml:"password"` ReadUserName string `yaml:"read-username"` diff --git a/pkg/influxdb/influxdb-client/README.md b/pkg/influxdb/influxdb-client/README.md new file mode 100644 index 0000000..1438a34 --- /dev/null +++ b/pkg/influxdb/influxdb-client/README.md @@ -0,0 +1,38 @@ +# influxdb1-clientv2 +influxdb1-clientv2 is the current Go client API for InfluxDB 1.x. A Go client for the 2.0 API will be coming soon. + +InfluxDB is an open-source distributed time series database, find more about [InfluxDB](https://www.influxdata.com/time-series-platform/influxdb/) at https://docs.influxdata.com/influxdb/latest + +## Usage +To import into your Go project, run the following command in your terminal: +`go get gitlab.oneitfarm.com/bifrost/influxdata/influxdb1-client/v2` +Then, in your import declaration section of your Go file, paste the following: +`import "github.com/ztalab/ZACA/pkg/influxdb/influxdb-client/v2"` + +If you get the error `build github.com/user/influx: cannot find module for path gitlab.oneitfarm.com/bifrost/influxdata/influxdb1-client/v2` when trying to build: +change your import to: +```go +import( + _ "github.com/ztalab/ZACA/pkg/influxdb/influxdb-client" // this is important because of the bug in go mod + client "github.com/ztalab/ZACA/pkg/influxdb/influxdb-client/v2" +) +``` + +## Example +The following example creates a new client to the InfluxDB host on localhost:8086 and runs a query for the measurement `cpu_load` from the `mydb` database. +``` go +func ExampleClient_query() { + c, err := client.NewHTTPClient(client.HTTPConfig{ + Addr: "http://localhost:8086", + }) + if err != nil { + fmt.Println("Error creating InfluxDB Client: ", err.Error()) + } + defer c.Close() + + q := client.NewQuery("SELECT count(value) FROM cpu_load", "mydb", "") + if response, err := c.Query(q); err == nil && response.Error() == nil { + fmt.Println(response.Results) + } +} +``` diff --git a/pkg/influxdb/influxdb-client/influxdb.go b/pkg/influxdb/influxdb-client/influxdb.go new file mode 100644 index 0000000..40f45bf --- /dev/null +++ b/pkg/influxdb/influxdb-client/influxdb.go @@ -0,0 +1,870 @@ +// Package client implements a now-deprecated client for InfluxDB; +// use gitlab.oneitfarm.com/bifrost/influxdata/influxdb1-client/v2 instead. +package client // import "github.com/ztalab/ZACA/pkg/influxdb/influxdb-client" + +import ( + "bytes" + "context" + "crypto/tls" + "encoding/json" + "errors" + "fmt" + "io" + "io/ioutil" + "net" + "net/http" + "net/url" + "path" + "strconv" + "strings" + "time" + + "github.com/ztalab/ZACA/pkg/influxdb/influxdb-client/models" +) + +const ( + // DefaultHost is the default host used to connect to an InfluxDB instance + DefaultHost = "localhost" + + // DefaultPort is the default port used to connect to an InfluxDB instance + DefaultPort = 8086 + + // DefaultTimeout is the default connection timeout used to connect to an InfluxDB instance + DefaultTimeout = 0 +) + +// Query is used to send a command to the server. Both Command and Database are required. +type Query struct { + Command string + Database string + + // RetentionPolicy tells the server which retention policy to use by default. + // This option is only effective when querying a server of version 1.6.0 or later. + RetentionPolicy string + + // Chunked tells the server to send back chunked responses. This places + // less load on the server by sending back chunks of the response rather + // than waiting for the entire response all at once. + Chunked bool + + // ChunkSize sets the maximum number of rows that will be returned per + // chunk. Chunks are either divided based on their series or if they hit + // the chunk size limit. + // + // Chunked must be set to true for this option to be used. + ChunkSize int + + // NodeID sets the data node to use for the query results. This option only + // has any effect in the enterprise version of the software where there can be + // more than one data node and is primarily useful for analyzing differences in + // data. The default behavior is to automatically select the appropriate data + // nodes to retrieve all of the data. On a database where the number of data nodes + // is greater than the replication factor, it is expected that setting this option + // will only retrieve partial data. + NodeID int +} + +// ParseConnectionString will parse a string to create a valid connection URL +func ParseConnectionString(path string, ssl bool) (url.URL, error) { + var host string + var port int + + h, p, err := net.SplitHostPort(path) + if err != nil { + if path == "" { + host = DefaultHost + } else { + host = path + } + // If they didn't specify a port, always use the default port + port = DefaultPort + } else { + host = h + port, err = strconv.Atoi(p) + if err != nil { + return url.URL{}, fmt.Errorf("invalid port number %q: %s\n", path, err) + } + } + + u := url.URL{ + Scheme: "http", + Host: host, + } + if ssl { + u.Scheme = "https" + if port != 443 { + u.Host = net.JoinHostPort(host, strconv.Itoa(port)) + } + } else if port != 80 { + u.Host = net.JoinHostPort(host, strconv.Itoa(port)) + } + + return u, nil +} + +// Config is used to specify what server to connect to. +// URL: The URL of the server connecting to. +// Username/Password are optional. They will be passed via basic auth if provided. +// UserAgent: If not provided, will default "InfluxDBClient", +// Timeout: If not provided, will default to 0 (no timeout) +type Config struct { + URL url.URL + UnixSocket string + Username string + Password string + UserAgent string + Timeout time.Duration + Precision string + WriteConsistency string + UnsafeSsl bool + Proxy func(req *http.Request) (*url.URL, error) + TLS *tls.Config +} + +// NewConfig will create a config to be used in connecting to the client +func NewConfig() Config { + return Config{ + Timeout: DefaultTimeout, + } +} + +// Client is used to make calls to the server. +type Client struct { + url url.URL + unixSocket string + username string + password string + httpClient *http.Client + userAgent string + precision string +} + +const ( + // ConsistencyOne requires at least one data node acknowledged a write. + ConsistencyOne = "one" + + // ConsistencyAll requires all data nodes to acknowledge a write. + ConsistencyAll = "all" + + // ConsistencyQuorum requires a quorum of data nodes to acknowledge a write. + ConsistencyQuorum = "quorum" + + // ConsistencyAny allows for hinted hand off, potentially no write happened yet. + ConsistencyAny = "any" +) + +// NewClient will instantiate and return a connected client to issue commands to the server. +func NewClient(c Config) (*Client, error) { + tlsConfig := new(tls.Config) + if c.TLS != nil { + tlsConfig = c.TLS.Clone() + } + tlsConfig.InsecureSkipVerify = c.UnsafeSsl + + tr := &http.Transport{ + Proxy: c.Proxy, + TLSClientConfig: tlsConfig, + } + + if c.UnixSocket != "" { + // No need for compression in local communications. + tr.DisableCompression = true + + tr.DialContext = func(_ context.Context, _, _ string) (net.Conn, error) { + return net.Dial("unix", c.UnixSocket) + } + } + + client := Client{ + url: c.URL, + unixSocket: c.UnixSocket, + username: c.Username, + password: c.Password, + httpClient: &http.Client{Timeout: c.Timeout, Transport: tr}, + userAgent: c.UserAgent, + precision: c.Precision, + } + if client.userAgent == "" { + client.userAgent = "InfluxDBClient" + } + return &client, nil +} + +// SetAuth will update the username and passwords +func (c *Client) SetAuth(u, p string) { + c.username = u + c.password = p +} + +// SetPrecision will update the precision +func (c *Client) SetPrecision(precision string) { + c.precision = precision +} + +// Query sends a command to the server and returns the Response +func (c *Client) Query(q Query) (*Response, error) { + return c.QueryContext(context.Background(), q) +} + +// QueryContext sends a command to the server and returns the Response +// It uses a context that can be cancelled by the command line client +func (c *Client) QueryContext(ctx context.Context, q Query) (*Response, error) { + u := c.url + u.Path = path.Join(u.Path, "query") + + values := u.Query() + values.Set("q", q.Command) + values.Set("db", q.Database) + if q.RetentionPolicy != "" { + values.Set("rp", q.RetentionPolicy) + } + if q.Chunked { + values.Set("chunked", "true") + if q.ChunkSize > 0 { + values.Set("chunk_size", strconv.Itoa(q.ChunkSize)) + } + } + if q.NodeID > 0 { + values.Set("node_id", strconv.Itoa(q.NodeID)) + } + if c.precision != "" { + values.Set("epoch", c.precision) + } + u.RawQuery = values.Encode() + + req, err := http.NewRequest("POST", u.String(), nil) + if err != nil { + return nil, err + } + req.Header.Set("User-Agent", c.userAgent) + if c.username != "" { + req.SetBasicAuth(c.username, c.password) + } + + req = req.WithContext(ctx) + + resp, err := c.httpClient.Do(req) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + var response Response + if q.Chunked { + cr := NewChunkedResponse(resp.Body) + for { + r, err := cr.NextResponse() + if err != nil { + // If we got an error while decoding the response, send that back. + return nil, err + } + + if r == nil { + break + } + + response.Results = append(response.Results, r.Results...) + if r.Err != nil { + response.Err = r.Err + break + } + } + } else { + dec := json.NewDecoder(resp.Body) + dec.UseNumber() + if err := dec.Decode(&response); err != nil { + // Ignore EOF errors if we got an invalid status code. + if !(err == io.EOF && resp.StatusCode != http.StatusOK) { + return nil, err + } + } + } + + // If we don't have an error in our json response, and didn't get StatusOK, + // then send back an error. + if resp.StatusCode != http.StatusOK && response.Error() == nil { + return &response, fmt.Errorf("received status code %d from server", resp.StatusCode) + } + return &response, nil +} + +// Write takes BatchPoints and allows for writing of multiple points with defaults +// If successful, error is nil and Response is nil +// If an error occurs, Response may contain additional information if populated. +func (c *Client) Write(bp BatchPoints) (*Response, error) { + u := c.url + u.Path = path.Join(u.Path, "write") + + var b bytes.Buffer + for _, p := range bp.Points { + err := checkPointTypes(p) + if err != nil { + return nil, err + } + if p.Raw != "" { + if _, err := b.WriteString(p.Raw); err != nil { + return nil, err + } + } else { + for k, v := range bp.Tags { + if p.Tags == nil { + p.Tags = make(map[string]string, len(bp.Tags)) + } + p.Tags[k] = v + } + + if _, err := b.WriteString(p.MarshalString()); err != nil { + return nil, err + } + } + + if err := b.WriteByte('\n'); err != nil { + return nil, err + } + } + + req, err := http.NewRequest("POST", u.String(), &b) + if err != nil { + return nil, err + } + req.Header.Set("Content-Type", "") + req.Header.Set("User-Agent", c.userAgent) + if c.username != "" { + req.SetBasicAuth(c.username, c.password) + } + + precision := bp.Precision + if precision == "" { + precision = c.precision + } + + params := req.URL.Query() + params.Set("db", bp.Database) + params.Set("rp", bp.RetentionPolicy) + params.Set("precision", precision) + params.Set("consistency", bp.WriteConsistency) + req.URL.RawQuery = params.Encode() + + resp, err := c.httpClient.Do(req) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + var response Response + body, err := ioutil.ReadAll(resp.Body) + if err != nil { + return nil, err + } + + if resp.StatusCode != http.StatusNoContent && resp.StatusCode != http.StatusOK { + var err = fmt.Errorf(string(body)) + response.Err = err + return &response, err + } + + return nil, nil +} + +// WriteLineProtocol takes a string with line returns to delimit each write +// If successful, error is nil and Response is nil +// If an error occurs, Response may contain additional information if populated. +func (c *Client) WriteLineProtocol(data, database, retentionPolicy, precision, writeConsistency string) (*Response, error) { + u := c.url + u.Path = path.Join(u.Path, "write") + + r := strings.NewReader(data) + + req, err := http.NewRequest("POST", u.String(), r) + if err != nil { + return nil, err + } + req.Header.Set("Content-Type", "") + req.Header.Set("User-Agent", c.userAgent) + if c.username != "" { + req.SetBasicAuth(c.username, c.password) + } + params := req.URL.Query() + params.Set("db", database) + params.Set("rp", retentionPolicy) + params.Set("precision", precision) + params.Set("consistency", writeConsistency) + req.URL.RawQuery = params.Encode() + + resp, err := c.httpClient.Do(req) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + var response Response + body, err := ioutil.ReadAll(resp.Body) + if err != nil { + return nil, err + } + + if resp.StatusCode != http.StatusNoContent && resp.StatusCode != http.StatusOK { + err := fmt.Errorf(string(body)) + response.Err = err + return &response, err + } + + return nil, nil +} + +// Ping will check to see if the server is up +// Ping returns how long the request took, the version of the server it connected to, and an error if one occurred. +func (c *Client) Ping() (time.Duration, string, error) { + now := time.Now() + + u := c.url + u.Path = path.Join(u.Path, "ping") + + req, err := http.NewRequest("GET", u.String(), nil) + if err != nil { + return 0, "", err + } + req.Header.Set("User-Agent", c.userAgent) + if c.username != "" { + req.SetBasicAuth(c.username, c.password) + } + + resp, err := c.httpClient.Do(req) + if err != nil { + return 0, "", err + } + defer resp.Body.Close() + + version := resp.Header.Get("X-Influxdb-Version") + return time.Since(now), version, nil +} + +// Structs + +// Message represents a user message. +type Message struct { + Level string `json:"level,omitempty"` + Text string `json:"text,omitempty"` +} + +// Result represents a resultset returned from a single statement. +type Result struct { + Series []models.Row + Messages []*Message + Err error +} + +// MarshalJSON encodes the result into JSON. +func (r *Result) MarshalJSON() ([]byte, error) { + // Define a struct that outputs "error" as a string. + var o struct { + Series []models.Row `json:"series,omitempty"` + Messages []*Message `json:"messages,omitempty"` + Err string `json:"error,omitempty"` + } + + // Copy fields to output struct. + o.Series = r.Series + o.Messages = r.Messages + if r.Err != nil { + o.Err = r.Err.Error() + } + + return json.Marshal(&o) +} + +// UnmarshalJSON decodes the data into the Result struct +func (r *Result) UnmarshalJSON(b []byte) error { + var o struct { + Series []models.Row `json:"series,omitempty"` + Messages []*Message `json:"messages,omitempty"` + Err string `json:"error,omitempty"` + } + + dec := json.NewDecoder(bytes.NewBuffer(b)) + dec.UseNumber() + err := dec.Decode(&o) + if err != nil { + return err + } + r.Series = o.Series + r.Messages = o.Messages + if o.Err != "" { + r.Err = errors.New(o.Err) + } + return nil +} + +// Response represents a list of statement results. +type Response struct { + Results []Result + Err error +} + +// MarshalJSON encodes the response into JSON. +func (r *Response) MarshalJSON() ([]byte, error) { + // Define a struct that outputs "error" as a string. + var o struct { + Results []Result `json:"results,omitempty"` + Err string `json:"error,omitempty"` + } + + // Copy fields to output struct. + o.Results = r.Results + if r.Err != nil { + o.Err = r.Err.Error() + } + + return json.Marshal(&o) +} + +// UnmarshalJSON decodes the data into the Response struct +func (r *Response) UnmarshalJSON(b []byte) error { + var o struct { + Results []Result `json:"results,omitempty"` + Err string `json:"error,omitempty"` + } + + dec := json.NewDecoder(bytes.NewBuffer(b)) + dec.UseNumber() + err := dec.Decode(&o) + if err != nil { + return err + } + r.Results = o.Results + if o.Err != "" { + r.Err = errors.New(o.Err) + } + return nil +} + +// Error returns the first error from any statement. +// Returns nil if no errors occurred on any statements. +func (r *Response) Error() error { + if r.Err != nil { + return r.Err + } + for _, result := range r.Results { + if result.Err != nil { + return result.Err + } + } + return nil +} + +// duplexReader reads responses and writes it to another writer while +// satisfying the reader interface. +type duplexReader struct { + r io.Reader + w io.Writer +} + +func (r *duplexReader) Read(p []byte) (n int, err error) { + n, err = r.r.Read(p) + if err == nil { + r.w.Write(p[:n]) + } + return n, err +} + +// ChunkedResponse represents a response from the server that +// uses chunking to stream the output. +type ChunkedResponse struct { + dec *json.Decoder + duplex *duplexReader + buf bytes.Buffer +} + +// NewChunkedResponse reads a stream and produces responses from the stream. +func NewChunkedResponse(r io.Reader) *ChunkedResponse { + resp := &ChunkedResponse{} + resp.duplex = &duplexReader{r: r, w: &resp.buf} + resp.dec = json.NewDecoder(resp.duplex) + resp.dec.UseNumber() + return resp +} + +// NextResponse reads the next line of the stream and returns a response. +func (r *ChunkedResponse) NextResponse() (*Response, error) { + var response Response + if err := r.dec.Decode(&response); err != nil { + if err == io.EOF { + return nil, nil + } + // A decoding error happened. This probably means the server crashed + // and sent a last-ditch error message to us. Ensure we have read the + // entirety of the connection to get any remaining error text. + io.Copy(ioutil.Discard, r.duplex) + return nil, errors.New(strings.TrimSpace(r.buf.String())) + } + r.buf.Reset() + return &response, nil +} + +// Point defines the fields that will be written to the database +// Measurement, Time, and Fields are required +// Precision can be specified if the time is in epoch format (integer). +// Valid values for Precision are n, u, ms, s, m, and h +type Point struct { + Measurement string + Tags map[string]string + Time time.Time + Fields map[string]interface{} + Precision string + Raw string +} + +// MarshalJSON will format the time in RFC3339Nano +// Precision is also ignored as it is only used for writing, not reading +// Or another way to say it is we always send back in nanosecond precision +func (p *Point) MarshalJSON() ([]byte, error) { + point := struct { + Measurement string `json:"measurement,omitempty"` + Tags map[string]string `json:"tags,omitempty"` + Time string `json:"time,omitempty"` + Fields map[string]interface{} `json:"fields,omitempty"` + Precision string `json:"precision,omitempty"` + }{ + Measurement: p.Measurement, + Tags: p.Tags, + Fields: p.Fields, + Precision: p.Precision, + } + // Let it omit empty if it's really zero + if !p.Time.IsZero() { + point.Time = p.Time.UTC().Format(time.RFC3339Nano) + } + return json.Marshal(&point) +} + +// MarshalString renders string representation of a Point with specified +// precision. The default precision is nanoseconds. +func (p *Point) MarshalString() string { + pt, err := models.NewPoint(p.Measurement, models.NewTags(p.Tags), p.Fields, p.Time) + if err != nil { + return "# ERROR: " + err.Error() + " " + p.Measurement + } + if p.Precision == "" || p.Precision == "ns" || p.Precision == "n" { + return pt.String() + } + return pt.PrecisionString(p.Precision) +} + +// UnmarshalJSON decodes the data into the Point struct +func (p *Point) UnmarshalJSON(b []byte) error { + var normal struct { + Measurement string `json:"measurement"` + Tags map[string]string `json:"tags"` + Time time.Time `json:"time"` + Precision string `json:"precision"` + Fields map[string]interface{} `json:"fields"` + } + var epoch struct { + Measurement string `json:"measurement"` + Tags map[string]string `json:"tags"` + Time *int64 `json:"time"` + Precision string `json:"precision"` + Fields map[string]interface{} `json:"fields"` + } + + if err := func() error { + var err error + dec := json.NewDecoder(bytes.NewBuffer(b)) + dec.UseNumber() + if err = dec.Decode(&epoch); err != nil { + return err + } + // Convert from epoch to time.Time, but only if Time + // was actually set. + var ts time.Time + if epoch.Time != nil { + ts, err = EpochToTime(*epoch.Time, epoch.Precision) + if err != nil { + return err + } + } + p.Measurement = epoch.Measurement + p.Tags = epoch.Tags + p.Time = ts + p.Precision = epoch.Precision + p.Fields = normalizeFields(epoch.Fields) + return nil + }(); err == nil { + return nil + } + + dec := json.NewDecoder(bytes.NewBuffer(b)) + dec.UseNumber() + if err := dec.Decode(&normal); err != nil { + return err + } + normal.Time = SetPrecision(normal.Time, normal.Precision) + p.Measurement = normal.Measurement + p.Tags = normal.Tags + p.Time = normal.Time + p.Precision = normal.Precision + p.Fields = normalizeFields(normal.Fields) + + return nil +} + +// Remove any notion of json.Number +func normalizeFields(fields map[string]interface{}) map[string]interface{} { + newFields := map[string]interface{}{} + + for k, v := range fields { + switch v := v.(type) { + case json.Number: + jv, e := v.Float64() + if e != nil { + panic(fmt.Sprintf("unable to convert json.Number to float64: %s", e)) + } + newFields[k] = jv + default: + newFields[k] = v + } + } + return newFields +} + +// BatchPoints is used to send batched data in a single write. +// Database and Points are required +// If no retention policy is specified, it will use the databases default retention policy. +// If tags are specified, they will be "merged" with all points. If a point already has that tag, it will be ignored. +// If time is specified, it will be applied to any point with an empty time. +// Precision can be specified if the time is in epoch format (integer). +// Valid values for Precision are n, u, ms, s, m, and h +type BatchPoints struct { + Points []Point `json:"points,omitempty"` + Database string `json:"database,omitempty"` + RetentionPolicy string `json:"retentionPolicy,omitempty"` + Tags map[string]string `json:"tags,omitempty"` + Time time.Time `json:"time,omitempty"` + Precision string `json:"precision,omitempty"` + WriteConsistency string `json:"-"` +} + +// UnmarshalJSON decodes the data into the BatchPoints struct +func (bp *BatchPoints) UnmarshalJSON(b []byte) error { + var normal struct { + Points []Point `json:"points"` + Database string `json:"database"` + RetentionPolicy string `json:"retentionPolicy"` + Tags map[string]string `json:"tags"` + Time time.Time `json:"time"` + Precision string `json:"precision"` + } + var epoch struct { + Points []Point `json:"points"` + Database string `json:"database"` + RetentionPolicy string `json:"retentionPolicy"` + Tags map[string]string `json:"tags"` + Time *int64 `json:"time"` + Precision string `json:"precision"` + } + + if err := func() error { + var err error + if err = json.Unmarshal(b, &epoch); err != nil { + return err + } + // Convert from epoch to time.Time + var ts time.Time + if epoch.Time != nil { + ts, err = EpochToTime(*epoch.Time, epoch.Precision) + if err != nil { + return err + } + } + bp.Points = epoch.Points + bp.Database = epoch.Database + bp.RetentionPolicy = epoch.RetentionPolicy + bp.Tags = epoch.Tags + bp.Time = ts + bp.Precision = epoch.Precision + return nil + }(); err == nil { + return nil + } + + if err := json.Unmarshal(b, &normal); err != nil { + return err + } + normal.Time = SetPrecision(normal.Time, normal.Precision) + bp.Points = normal.Points + bp.Database = normal.Database + bp.RetentionPolicy = normal.RetentionPolicy + bp.Tags = normal.Tags + bp.Time = normal.Time + bp.Precision = normal.Precision + + return nil +} + +// utility functions + +// Addr provides the current url as a string of the server the client is connected to. +func (c *Client) Addr() string { + if c.unixSocket != "" { + return c.unixSocket + } + return c.url.String() +} + +// checkPointTypes ensures no unsupported types are submitted to influxdb, returning error if they are found. +func checkPointTypes(p Point) error { + for _, v := range p.Fields { + switch v.(type) { + case int, int8, int16, int32, int64, uint, uint8, uint16, uint32, uint64, float32, float64, bool, string, nil: + return nil + default: + return fmt.Errorf("unsupported point type: %T", v) + } + } + return nil +} + +// helper functions + +// EpochToTime takes a unix epoch time and uses precision to return back a time.Time +func EpochToTime(epoch int64, precision string) (time.Time, error) { + if precision == "" { + precision = "s" + } + var t time.Time + switch precision { + case "h": + t = time.Unix(0, epoch*int64(time.Hour)) + case "m": + t = time.Unix(0, epoch*int64(time.Minute)) + case "s": + t = time.Unix(0, epoch*int64(time.Second)) + case "ms": + t = time.Unix(0, epoch*int64(time.Millisecond)) + case "u": + t = time.Unix(0, epoch*int64(time.Microsecond)) + case "n": + t = time.Unix(0, epoch) + default: + return time.Time{}, fmt.Errorf("Unknown precision %q", precision) + } + return t, nil +} + +// SetPrecision will round a time to the specified precision +func SetPrecision(t time.Time, precision string) time.Time { + switch precision { + case "n": + case "u": + return t.Round(time.Microsecond) + case "ms": + return t.Round(time.Millisecond) + case "s": + return t.Round(time.Second) + case "m": + return t.Round(time.Minute) + case "h": + return t.Round(time.Hour) + } + return t +} diff --git a/pkg/influxdb/influxdb-client/models/inline_fnv.go b/pkg/influxdb/influxdb-client/models/inline_fnv.go new file mode 100644 index 0000000..9b59522 --- /dev/null +++ b/pkg/influxdb/influxdb-client/models/inline_fnv.go @@ -0,0 +1,32 @@ +package models // import "github.com/ztalab/ZACA/pkg/influxdb/influxdb-client/models" + +// from stdlib hash/fnv/fnv.go +const ( + prime64 = 1099511628211 + offset64 = 14695981039346656037 +) + +// InlineFNV64a is an alloc-free port of the standard library's fnv64a. +// See https://en.wikipedia.org/wiki/Fowler%E2%80%93Noll%E2%80%93Vo_hash_function. +type InlineFNV64a uint64 + +// NewInlineFNV64a returns a new instance of InlineFNV64a. +func NewInlineFNV64a() InlineFNV64a { + return offset64 +} + +// Write adds data to the running hash. +func (s *InlineFNV64a) Write(data []byte) (int, error) { + hash := uint64(*s) + for _, c := range data { + hash ^= uint64(c) + hash *= prime64 + } + *s = InlineFNV64a(hash) + return len(data), nil +} + +// Sum64 returns the uint64 of the current resulting hash. +func (s *InlineFNV64a) Sum64() uint64 { + return uint64(*s) +} diff --git a/pkg/influxdb/influxdb-client/models/inline_strconv_parse.go b/pkg/influxdb/influxdb-client/models/inline_strconv_parse.go new file mode 100644 index 0000000..1b55073 --- /dev/null +++ b/pkg/influxdb/influxdb-client/models/inline_strconv_parse.go @@ -0,0 +1,44 @@ +package models // import "github.com/ztalab/ZACA/pkg/influxdb/influxdb-client/models" + +import ( + "reflect" + "strconv" + "unsafe" +) + +// parseIntBytes is a zero-alloc wrapper around strconv.ParseInt. +func parseIntBytes(b []byte, base int, bitSize int) (i int64, err error) { + s := unsafeBytesToString(b) + return strconv.ParseInt(s, base, bitSize) +} + +// parseUintBytes is a zero-alloc wrapper around strconv.ParseUint. +func parseUintBytes(b []byte, base int, bitSize int) (i uint64, err error) { + s := unsafeBytesToString(b) + return strconv.ParseUint(s, base, bitSize) +} + +// parseFloatBytes is a zero-alloc wrapper around strconv.ParseFloat. +func parseFloatBytes(b []byte, bitSize int) (float64, error) { + s := unsafeBytesToString(b) + return strconv.ParseFloat(s, bitSize) +} + +// parseBoolBytes is a zero-alloc wrapper around strconv.ParseBool. +func parseBoolBytes(b []byte) (bool, error) { + return strconv.ParseBool(unsafeBytesToString(b)) +} + +// unsafeBytesToString converts a []byte to a string without a heap allocation. +// +// It is unsafe, and is intended to prepare input to short-lived functions +// that require strings. +func unsafeBytesToString(in []byte) string { + src := *(*reflect.SliceHeader)(unsafe.Pointer(&in)) + dst := reflect.StringHeader{ + Data: src.Data, + Len: src.Len, + } + s := *(*string)(unsafe.Pointer(&dst)) + return s +} diff --git a/pkg/influxdb/influxdb-client/models/points.go b/pkg/influxdb/influxdb-client/models/points.go new file mode 100644 index 0000000..4d329f2 --- /dev/null +++ b/pkg/influxdb/influxdb-client/models/points.go @@ -0,0 +1,2413 @@ +// Package models implements basic objects used throughout the TICK stack. +package models // import "github.com/ztalab/ZACA/pkg/influxdb/influxdb-client/models" + +import ( + "bytes" + "encoding/binary" + "errors" + "fmt" + "io" + "math" + "sort" + "strconv" + "strings" + "time" + "unicode" + "unicode/utf8" + + "github.com/ztalab/ZACA/pkg/influxdb/influxdb-client/pkg/escape" +) + +type escapeSet struct { + k [1]byte + esc [2]byte +} + +var ( + measurementEscapeCodes = [...]escapeSet{ + {k: [1]byte{','}, esc: [2]byte{'\\', ','}}, + {k: [1]byte{' '}, esc: [2]byte{'\\', ' '}}, + } + + tagEscapeCodes = [...]escapeSet{ + {k: [1]byte{','}, esc: [2]byte{'\\', ','}}, + {k: [1]byte{' '}, esc: [2]byte{'\\', ' '}}, + {k: [1]byte{'='}, esc: [2]byte{'\\', '='}}, + } + + // ErrPointMustHaveAField is returned when operating on a point that does not have any fields. + ErrPointMustHaveAField = errors.New("point without fields is unsupported") + + // ErrInvalidNumber is returned when a number is expected but not provided. + ErrInvalidNumber = errors.New("invalid number") + + // ErrInvalidPoint is returned when a point cannot be parsed correctly. + ErrInvalidPoint = errors.New("point is invalid") +) + +const ( + // MaxKeyLength is the largest allowed size of the combined measurement and tag keys. + MaxKeyLength = 65535 +) + +// enableUint64Support will enable uint64 support if set to true. +var enableUint64Support = false + +// EnableUintSupport manually enables uint support for the point parser. +// This function will be removed in the future and only exists for unit tests during the +// transition. +func EnableUintSupport() { + enableUint64Support = true +} + +// Point defines the values that will be written to the database. +type Point interface { + // Name return the measurement name for the point. + Name() []byte + + // SetName updates the measurement name for the point. + SetName(string) + + // Tags returns the tag set for the point. + Tags() Tags + + // ForEachTag iterates over each tag invoking fn. If fn return false, iteration stops. + ForEachTag(fn func(k, v []byte) bool) + + // AddTag adds or replaces a tag value for a point. + AddTag(key, value string) + + // SetTags replaces the tags for the point. + SetTags(tags Tags) + + // HasTag returns true if the tag exists for the point. + HasTag(tag []byte) bool + + // Fields returns the fields for the point. + Fields() (Fields, error) + + // Time return the timestamp for the point. + Time() time.Time + + // SetTime updates the timestamp for the point. + SetTime(t time.Time) + + // UnixNano returns the timestamp of the point as nanoseconds since Unix epoch. + UnixNano() int64 + + // HashID returns a non-cryptographic checksum of the point's key. + HashID() uint64 + + // Key returns the key (measurement joined with tags) of the point. + Key() []byte + + // String returns a string representation of the point. If there is a + // timestamp associated with the point then it will be specified with the default + // precision of nanoseconds. + String() string + + // MarshalBinary returns a binary representation of the point. + MarshalBinary() ([]byte, error) + + // PrecisionString returns a string representation of the point. If there + // is a timestamp associated with the point then it will be specified in the + // given unit. + PrecisionString(precision string) string + + // RoundedString returns a string representation of the point. If there + // is a timestamp associated with the point, then it will be rounded to the + // given duration. + RoundedString(d time.Duration) string + + // Split will attempt to return multiple points with the same timestamp whose + // string representations are no longer than size. Points with a single field or + // a point without a timestamp may exceed the requested size. + Split(size int) []Point + + // Round will round the timestamp of the point to the given duration. + Round(d time.Duration) + + // StringSize returns the length of the string that would be returned by String(). + StringSize() int + + // AppendString appends the result of String() to the provided buffer and returns + // the result, potentially reducing string allocations. + AppendString(buf []byte) []byte + + // FieldIterator retuns a FieldIterator that can be used to traverse the + // fields of a point without constructing the in-memory map. + FieldIterator() FieldIterator +} + +// FieldType represents the type of a field. +type FieldType int + +const ( + // Integer indicates the field's type is integer. + Integer FieldType = iota + + // Float indicates the field's type is float. + Float + + // Boolean indicates the field's type is boolean. + Boolean + + // String indicates the field's type is string. + String + + // Empty is used to indicate that there is no field. + Empty + + // Unsigned indicates the field's type is an unsigned integer. + Unsigned +) + +// FieldIterator provides a low-allocation interface to iterate through a point's fields. +type FieldIterator interface { + // Next indicates whether there any fields remaining. + Next() bool + + // FieldKey returns the key of the current field. + FieldKey() []byte + + // Type returns the FieldType of the current field. + Type() FieldType + + // StringValue returns the string value of the current field. + StringValue() string + + // IntegerValue returns the integer value of the current field. + IntegerValue() (int64, error) + + // UnsignedValue returns the unsigned value of the current field. + UnsignedValue() (uint64, error) + + // BooleanValue returns the boolean value of the current field. + BooleanValue() (bool, error) + + // FloatValue returns the float value of the current field. + FloatValue() (float64, error) + + // Reset resets the iterator to its initial state. + Reset() +} + +// Points represents a sortable list of points by timestamp. +type Points []Point + +// Len implements sort.Interface. +func (a Points) Len() int { return len(a) } + +// Less implements sort.Interface. +func (a Points) Less(i, j int) bool { return a[i].Time().Before(a[j].Time()) } + +// Swap implements sort.Interface. +func (a Points) Swap(i, j int) { a[i], a[j] = a[j], a[i] } + +// point is the default implementation of Point. +type point struct { + time time.Time + + // text encoding of measurement and tags + // key must always be stored sorted by tags, if the original line was not sorted, + // we need to resort it + key []byte + + // text encoding of field data + fields []byte + + // text encoding of timestamp + ts []byte + + // cached version of parsed fields from data + cachedFields map[string]interface{} + + // cached version of parsed name from key + cachedName string + + // cached version of parsed tags + cachedTags Tags + + it fieldIterator +} + +// type assertions +var ( + _ Point = (*point)(nil) + _ FieldIterator = (*point)(nil) +) + +const ( + // the number of characters for the largest possible int64 (9223372036854775807) + maxInt64Digits = 19 + + // the number of characters for the smallest possible int64 (-9223372036854775808) + minInt64Digits = 20 + + // the number of characters for the largest possible uint64 (18446744073709551615) + maxUint64Digits = 20 + + // the number of characters required for the largest float64 before a range check + // would occur during parsing + maxFloat64Digits = 25 + + // the number of characters required for smallest float64 before a range check occur + // would occur during parsing + minFloat64Digits = 27 +) + +// ParsePoints returns a slice of Points from a text representation of a point +// with each point separated by newlines. If any points fail to parse, a non-nil error +// will be returned in addition to the points that parsed successfully. +func ParsePoints(buf []byte) ([]Point, error) { + return ParsePointsWithPrecision(buf, time.Now().UTC(), "n") +} + +// ParsePointsString is identical to ParsePoints but accepts a string. +func ParsePointsString(buf string) ([]Point, error) { + return ParsePoints([]byte(buf)) +} + +// ParseKey returns the measurement name and tags from a point. +// +// NOTE: to minimize heap allocations, the returned Tags will refer to subslices of buf. +// This can have the unintended effect preventing buf from being garbage collected. +func ParseKey(buf []byte) (string, Tags) { + name, tags := ParseKeyBytes(buf) + return string(name), tags +} + +func ParseKeyBytes(buf []byte) ([]byte, Tags) { + return ParseKeyBytesWithTags(buf, nil) +} + +func ParseKeyBytesWithTags(buf []byte, tags Tags) ([]byte, Tags) { + // Ignore the error because scanMeasurement returns "missing fields" which we ignore + // when just parsing a key + state, i, _ := scanMeasurement(buf, 0) + + var name []byte + if state == tagKeyState { + tags = parseTags(buf, tags) + // scanMeasurement returns the location of the comma if there are tags, strip that off + name = buf[:i-1] + } else { + name = buf[:i] + } + return unescapeMeasurement(name), tags +} + +func ParseTags(buf []byte) Tags { + return parseTags(buf, nil) +} + +func ParseName(buf []byte) []byte { + // Ignore the error because scanMeasurement returns "missing fields" which we ignore + // when just parsing a key + state, i, _ := scanMeasurement(buf, 0) + var name []byte + if state == tagKeyState { + name = buf[:i-1] + } else { + name = buf[:i] + } + + return unescapeMeasurement(name) +} + +// ParsePointsWithPrecision is similar to ParsePoints, but allows the +// caller to provide a precision for time. +// +// NOTE: to minimize heap allocations, the returned Points will refer to subslices of buf. +// This can have the unintended effect preventing buf from being garbage collected. +func ParsePointsWithPrecision(buf []byte, defaultTime time.Time, precision string) ([]Point, error) { + points := make([]Point, 0, bytes.Count(buf, []byte{'\n'})+1) + var ( + pos int + block []byte + failed []string + ) + for pos < len(buf) { + pos, block = scanLine(buf, pos) + pos++ + + if len(block) == 0 { + continue + } + + start := skipWhitespace(block, 0) + + // If line is all whitespace, just skip it + if start >= len(block) { + continue + } + + // lines which start with '#' are comments + if block[start] == '#' { + continue + } + + // strip the newline if one is present + if block[len(block)-1] == '\n' { + block = block[:len(block)-1] + } + + pt, err := parsePoint(block[start:], defaultTime, precision) + if err != nil { + failed = append(failed, fmt.Sprintf("unable to parse '%s': %v", string(block[start:]), err)) + } else { + points = append(points, pt) + } + + } + if len(failed) > 0 { + return points, fmt.Errorf("%s", strings.Join(failed, "\n")) + } + return points, nil + +} + +func parsePoint(buf []byte, defaultTime time.Time, precision string) (Point, error) { + // scan the first block which is measurement[,tag1=value1,tag2=value2...] + pos, key, err := scanKey(buf, 0) + if err != nil { + return nil, err + } + + // measurement name is required + if len(key) == 0 { + return nil, fmt.Errorf("missing measurement") + } + + if len(key) > MaxKeyLength { + return nil, fmt.Errorf("max key length exceeded: %v > %v", len(key), MaxKeyLength) + } + + // scan the second block is which is field1=value1[,field2=value2,...] + pos, fields, err := scanFields(buf, pos) + if err != nil { + return nil, err + } + + // at least one field is required + if len(fields) == 0 { + return nil, fmt.Errorf("missing fields") + } + + var maxKeyErr error + err = walkFields(fields, func(k, v []byte) bool { + if sz := seriesKeySize(key, k); sz > MaxKeyLength { + maxKeyErr = fmt.Errorf("max key length exceeded: %v > %v", sz, MaxKeyLength) + return false + } + return true + }) + + if err != nil { + return nil, err + } + + if maxKeyErr != nil { + return nil, maxKeyErr + } + + // scan the last block which is an optional integer timestamp + pos, ts, err := scanTime(buf, pos) + if err != nil { + return nil, err + } + + pt := &point{ + key: key, + fields: fields, + ts: ts, + } + + if len(ts) == 0 { + pt.time = defaultTime + pt.SetPrecision(precision) + } else { + ts, err := parseIntBytes(ts, 10, 64) + if err != nil { + return nil, err + } + pt.time, err = SafeCalcTime(ts, precision) + if err != nil { + return nil, err + } + + // Determine if there are illegal non-whitespace characters after the + // timestamp block. + for pos < len(buf) { + if buf[pos] != ' ' { + return nil, ErrInvalidPoint + } + pos++ + } + } + return pt, nil +} + +// GetPrecisionMultiplier will return a multiplier for the precision specified. +func GetPrecisionMultiplier(precision string) int64 { + d := time.Nanosecond + switch precision { + case "u": + d = time.Microsecond + case "ms": + d = time.Millisecond + case "s": + d = time.Second + case "m": + d = time.Minute + case "h": + d = time.Hour + } + return int64(d) +} + +// scanKey scans buf starting at i for the measurement and tag portion of the point. +// It returns the ending position and the byte slice of key within buf. If there +// are tags, they will be sorted if they are not already. +func scanKey(buf []byte, i int) (int, []byte, error) { + start := skipWhitespace(buf, i) + + i = start + + // Determines whether the tags are sort, assume they are + sorted := true + + // indices holds the indexes within buf of the start of each tag. For example, + // a buf of 'cpu,host=a,region=b,zone=c' would have indices slice of [4,11,20] + // which indicates that the first tag starts at buf[4], seconds at buf[11], and + // last at buf[20] + indices := make([]int, 100) + + // tracks how many commas we've seen so we know how many values are indices. + // Since indices is an arbitrarily large slice, + // we need to know how many values in the buffer are in use. + commas := 0 + + // First scan the Point's measurement. + state, i, err := scanMeasurement(buf, i) + if err != nil { + return i, buf[start:i], err + } + + // Optionally scan tags if needed. + if state == tagKeyState { + i, commas, indices, err = scanTags(buf, i, indices) + if err != nil { + return i, buf[start:i], err + } + } + + // Now we know where the key region is within buf, and the location of tags, we + // need to determine if duplicate tags exist and if the tags are sorted. This iterates + // over the list comparing each tag in the sequence with each other. + for j := 0; j < commas-1; j++ { + // get the left and right tags + _, left := scanTo(buf[indices[j]:indices[j+1]-1], 0, '=') + _, right := scanTo(buf[indices[j+1]:indices[j+2]-1], 0, '=') + + // If left is greater than right, the tags are not sorted. We do not have to + // continue because the short path no longer works. + // If the tags are equal, then there are duplicate tags, and we should abort. + // If the tags are not sorted, this pass may not find duplicate tags and we + // need to do a more exhaustive search later. + if cmp := bytes.Compare(left, right); cmp > 0 { + sorted = false + break + } else if cmp == 0 { + return i, buf[start:i], fmt.Errorf("duplicate tags") + } + } + + // If the tags are not sorted, then sort them. This sort is inline and + // uses the tag indices we created earlier. The actual buffer is not sorted, the + // indices are using the buffer for value comparison. After the indices are sorted, + // the buffer is reconstructed from the sorted indices. + if !sorted && commas > 0 { + // Get the measurement name for later + measurement := buf[start : indices[0]-1] + + // Sort the indices + indices := indices[:commas] + insertionSort(0, commas, buf, indices) + + // Create a new key using the measurement and sorted indices + b := make([]byte, len(buf[start:i])) + pos := copy(b, measurement) + for _, i := range indices { + b[pos] = ',' + pos++ + _, v := scanToSpaceOr(buf, i, ',') + pos += copy(b[pos:], v) + } + + // Check again for duplicate tags now that the tags are sorted. + for j := 0; j < commas-1; j++ { + // get the left and right tags + _, left := scanTo(buf[indices[j]:], 0, '=') + _, right := scanTo(buf[indices[j+1]:], 0, '=') + + // If the tags are equal, then there are duplicate tags, and we should abort. + // If the tags are not sorted, this pass may not find duplicate tags and we + // need to do a more exhaustive search later. + if bytes.Equal(left, right) { + return i, b, fmt.Errorf("duplicate tags") + } + } + + return i, b, nil + } + + return i, buf[start:i], nil +} + +// The following constants allow us to specify which state to move to +// next, when scanning sections of a Point. +const ( + tagKeyState = iota + tagValueState + fieldsState +) + +// scanMeasurement examines the measurement part of a Point, returning +// the next state to move to, and the current location in the buffer. +func scanMeasurement(buf []byte, i int) (int, int, error) { + // Check first byte of measurement, anything except a comma is fine. + // It can't be a space, since whitespace is stripped prior to this + // function call. + if i >= len(buf) || buf[i] == ',' { + return -1, i, fmt.Errorf("missing measurement") + } + + for { + i++ + if i >= len(buf) { + // cpu + return -1, i, fmt.Errorf("missing fields") + } + + if buf[i-1] == '\\' { + // Skip character (it's escaped). + continue + } + + // Unescaped comma; move onto scanning the tags. + if buf[i] == ',' { + return tagKeyState, i + 1, nil + } + + // Unescaped space; move onto scanning the fields. + if buf[i] == ' ' { + // cpu value=1.0 + return fieldsState, i, nil + } + } +} + +// scanTags examines all the tags in a Point, keeping track of and +// returning the updated indices slice, number of commas and location +// in buf where to start examining the Point fields. +func scanTags(buf []byte, i int, indices []int) (int, int, []int, error) { + var ( + err error + commas int + state = tagKeyState + ) + + for { + switch state { + case tagKeyState: + // Grow our indices slice if we have too many tags. + if commas >= len(indices) { + newIndics := make([]int, cap(indices)*2) + copy(newIndics, indices) + indices = newIndics + } + indices[commas] = i + commas++ + + i, err = scanTagsKey(buf, i) + state = tagValueState // tag value always follows a tag key + case tagValueState: + state, i, err = scanTagsValue(buf, i) + case fieldsState: + indices[commas] = i + 1 + return i, commas, indices, nil + } + + if err != nil { + return i, commas, indices, err + } + } +} + +// scanTagsKey scans each character in a tag key. +func scanTagsKey(buf []byte, i int) (int, error) { + // First character of the key. + if i >= len(buf) || buf[i] == ' ' || buf[i] == ',' || buf[i] == '=' { + // cpu,{'', ' ', ',', '='} + return i, fmt.Errorf("missing tag key") + } + + // Examine each character in the tag key until we hit an unescaped + // equals (the tag value), or we hit an error (i.e., unescaped + // space or comma). + for { + i++ + + // Either we reached the end of the buffer or we hit an + // unescaped comma or space. + if i >= len(buf) || + ((buf[i] == ' ' || buf[i] == ',') && buf[i-1] != '\\') { + // cpu,tag{'', ' ', ','} + return i, fmt.Errorf("missing tag value") + } + + if buf[i] == '=' && buf[i-1] != '\\' { + // cpu,tag= + return i + 1, nil + } + } +} + +// scanTagsValue scans each character in a tag value. +func scanTagsValue(buf []byte, i int) (int, int, error) { + // Tag value cannot be empty. + if i >= len(buf) || buf[i] == ',' || buf[i] == ' ' { + // cpu,tag={',', ' '} + return -1, i, fmt.Errorf("missing tag value") + } + + // Examine each character in the tag value until we hit an unescaped + // comma (move onto next tag key), an unescaped space (move onto + // fields), or we error out. + for { + i++ + if i >= len(buf) { + // cpu,tag=value + return -1, i, fmt.Errorf("missing fields") + } + + // An unescaped equals sign is an invalid tag value. + if buf[i] == '=' && buf[i-1] != '\\' { + // cpu,tag={'=', 'fo=o'} + return -1, i, fmt.Errorf("invalid tag format") + } + + if buf[i] == ',' && buf[i-1] != '\\' { + // cpu,tag=foo, + return tagKeyState, i + 1, nil + } + + // cpu,tag=foo value=1.0 + // cpu, tag=foo\= value=1.0 + if buf[i] == ' ' && buf[i-1] != '\\' { + return fieldsState, i, nil + } + } +} + +func insertionSort(l, r int, buf []byte, indices []int) { + for i := l + 1; i < r; i++ { + for j := i; j > l && less(buf, indices, j, j-1); j-- { + indices[j], indices[j-1] = indices[j-1], indices[j] + } + } +} + +func less(buf []byte, indices []int, i, j int) bool { + // This grabs the tag names for i & j, it ignores the values + _, a := scanTo(buf, indices[i], '=') + _, b := scanTo(buf, indices[j], '=') + return bytes.Compare(a, b) < 0 +} + +// scanFields scans buf, starting at i for the fields section of a point. It returns +// the ending position and the byte slice of the fields within buf. +func scanFields(buf []byte, i int) (int, []byte, error) { + start := skipWhitespace(buf, i) + i = start + quoted := false + + // tracks how many '=' we've seen + equals := 0 + + // tracks how many commas we've seen + commas := 0 + + for { + // reached the end of buf? + if i >= len(buf) { + break + } + + // escaped characters? + if buf[i] == '\\' && i+1 < len(buf) { + i += 2 + continue + } + + // If the value is quoted, scan until we get to the end quote + // Only quote values in the field value since quotes are not significant + // in the field key + if buf[i] == '"' && equals > commas { + quoted = !quoted + i++ + continue + } + + // If we see an =, ensure that there is at least on char before and after it + if buf[i] == '=' && !quoted { + equals++ + + // check for "... =123" but allow "a\ =123" + if buf[i-1] == ' ' && buf[i-2] != '\\' { + return i, buf[start:i], fmt.Errorf("missing field key") + } + + // check for "...a=123,=456" but allow "a=123,a\,=456" + if buf[i-1] == ',' && buf[i-2] != '\\' { + return i, buf[start:i], fmt.Errorf("missing field key") + } + + // check for "... value=" + if i+1 >= len(buf) { + return i, buf[start:i], fmt.Errorf("missing field value") + } + + // check for "... value=,value2=..." + if buf[i+1] == ',' || buf[i+1] == ' ' { + return i, buf[start:i], fmt.Errorf("missing field value") + } + + if isNumeric(buf[i+1]) || buf[i+1] == '-' || buf[i+1] == 'N' || buf[i+1] == 'n' { + var err error + i, err = scanNumber(buf, i+1) + if err != nil { + return i, buf[start:i], err + } + continue + } + // If next byte is not a double-quote, the value must be a boolean + if buf[i+1] != '"' { + var err error + i, _, err = scanBoolean(buf, i+1) + if err != nil { + return i, buf[start:i], err + } + continue + } + } + + if buf[i] == ',' && !quoted { + commas++ + } + + // reached end of block? + if buf[i] == ' ' && !quoted { + break + } + i++ + } + + if quoted { + return i, buf[start:i], fmt.Errorf("unbalanced quotes") + } + + // check that all field sections had key and values (e.g. prevent "a=1,b" + if equals == 0 || commas != equals-1 { + return i, buf[start:i], fmt.Errorf("invalid field format") + } + + return i, buf[start:i], nil +} + +// scanTime scans buf, starting at i for the time section of a point. It +// returns the ending position and the byte slice of the timestamp within buf +// and and error if the timestamp is not in the correct numeric format. +func scanTime(buf []byte, i int) (int, []byte, error) { + start := skipWhitespace(buf, i) + i = start + + for { + // reached the end of buf? + if i >= len(buf) { + break + } + + // Reached end of block or trailing whitespace? + if buf[i] == '\n' || buf[i] == ' ' { + break + } + + // Handle negative timestamps + if i == start && buf[i] == '-' { + i++ + continue + } + + // Timestamps should be integers, make sure they are so we don't need + // to actually parse the timestamp until needed. + if buf[i] < '0' || buf[i] > '9' { + return i, buf[start:i], fmt.Errorf("bad timestamp") + } + i++ + } + return i, buf[start:i], nil +} + +func isNumeric(b byte) bool { + return (b >= '0' && b <= '9') || b == '.' +} + +// scanNumber returns the end position within buf, start at i after +// scanning over buf for an integer, or float. It returns an +// error if a invalid number is scanned. +func scanNumber(buf []byte, i int) (int, error) { + start := i + var isInt, isUnsigned bool + + // Is negative number? + if i < len(buf) && buf[i] == '-' { + i++ + // There must be more characters now, as just '-' is illegal. + if i == len(buf) { + return i, ErrInvalidNumber + } + } + + // how many decimal points we've see + decimal := false + + // indicates the number is float in scientific notation + scientific := false + + for { + if i >= len(buf) { + break + } + + if buf[i] == ',' || buf[i] == ' ' { + break + } + + if buf[i] == 'i' && i > start && !(isInt || isUnsigned) { + isInt = true + i++ + continue + } else if buf[i] == 'u' && i > start && !(isInt || isUnsigned) { + isUnsigned = true + i++ + continue + } + + if buf[i] == '.' { + // Can't have more than 1 decimal (e.g. 1.1.1 should fail) + if decimal { + return i, ErrInvalidNumber + } + decimal = true + } + + // `e` is valid for floats but not as the first char + if i > start && (buf[i] == 'e' || buf[i] == 'E') { + scientific = true + i++ + continue + } + + // + and - are only valid at this point if they follow an e (scientific notation) + if (buf[i] == '+' || buf[i] == '-') && (buf[i-1] == 'e' || buf[i-1] == 'E') { + i++ + continue + } + + // NaN is an unsupported value + if i+2 < len(buf) && (buf[i] == 'N' || buf[i] == 'n') { + return i, ErrInvalidNumber + } + + if !isNumeric(buf[i]) { + return i, ErrInvalidNumber + } + i++ + } + + if (isInt || isUnsigned) && (decimal || scientific) { + return i, ErrInvalidNumber + } + + numericDigits := i - start + if isInt { + numericDigits-- + } + if decimal { + numericDigits-- + } + if buf[start] == '-' { + numericDigits-- + } + + if numericDigits == 0 { + return i, ErrInvalidNumber + } + + // It's more common that numbers will be within min/max range for their type but we need to prevent + // out or range numbers from being parsed successfully. This uses some simple heuristics to decide + // if we should parse the number to the actual type. It does not do it all the time because it incurs + // extra allocations and we end up converting the type again when writing points to disk. + if isInt { + // Make sure the last char is an 'i' for integers (e.g. 9i10 is not valid) + if buf[i-1] != 'i' { + return i, ErrInvalidNumber + } + // Parse the int to check bounds the number of digits could be larger than the max range + // We subtract 1 from the index to remove the `i` from our tests + if len(buf[start:i-1]) >= maxInt64Digits || len(buf[start:i-1]) >= minInt64Digits { + if _, err := parseIntBytes(buf[start:i-1], 10, 64); err != nil { + return i, fmt.Errorf("unable to parse integer %s: %s", buf[start:i-1], err) + } + } + } else if isUnsigned { + // Return an error if uint64 support has not been enabled. + if !enableUint64Support { + return i, ErrInvalidNumber + } + // Make sure the last char is a 'u' for unsigned + if buf[i-1] != 'u' { + return i, ErrInvalidNumber + } + // Make sure the first char is not a '-' for unsigned + if buf[start] == '-' { + return i, ErrInvalidNumber + } + // Parse the uint to check bounds the number of digits could be larger than the max range + // We subtract 1 from the index to remove the `u` from our tests + if len(buf[start:i-1]) >= maxUint64Digits { + if _, err := parseUintBytes(buf[start:i-1], 10, 64); err != nil { + return i, fmt.Errorf("unable to parse unsigned %s: %s", buf[start:i-1], err) + } + } + } else { + // Parse the float to check bounds if it's scientific or the number of digits could be larger than the max range + if scientific || len(buf[start:i]) >= maxFloat64Digits || len(buf[start:i]) >= minFloat64Digits { + if _, err := parseFloatBytes(buf[start:i], 10); err != nil { + return i, fmt.Errorf("invalid float") + } + } + } + + return i, nil +} + +// scanBoolean returns the end position within buf, start at i after +// scanning over buf for boolean. Valid values for a boolean are +// t, T, true, TRUE, f, F, false, FALSE. It returns an error if a invalid boolean +// is scanned. +func scanBoolean(buf []byte, i int) (int, []byte, error) { + start := i + + if i < len(buf) && (buf[i] != 't' && buf[i] != 'f' && buf[i] != 'T' && buf[i] != 'F') { + return i, buf[start:i], fmt.Errorf("invalid boolean") + } + + i++ + for { + if i >= len(buf) { + break + } + + if buf[i] == ',' || buf[i] == ' ' { + break + } + i++ + } + + // Single char bool (t, T, f, F) is ok + if i-start == 1 { + return i, buf[start:i], nil + } + + // length must be 4 for true or TRUE + if (buf[start] == 't' || buf[start] == 'T') && i-start != 4 { + return i, buf[start:i], fmt.Errorf("invalid boolean") + } + + // length must be 5 for false or FALSE + if (buf[start] == 'f' || buf[start] == 'F') && i-start != 5 { + return i, buf[start:i], fmt.Errorf("invalid boolean") + } + + // Otherwise + valid := false + switch buf[start] { + case 't': + valid = bytes.Equal(buf[start:i], []byte("true")) + case 'f': + valid = bytes.Equal(buf[start:i], []byte("false")) + case 'T': + valid = bytes.Equal(buf[start:i], []byte("TRUE")) || bytes.Equal(buf[start:i], []byte("True")) + case 'F': + valid = bytes.Equal(buf[start:i], []byte("FALSE")) || bytes.Equal(buf[start:i], []byte("False")) + } + + if !valid { + return i, buf[start:i], fmt.Errorf("invalid boolean") + } + + return i, buf[start:i], nil + +} + +// skipWhitespace returns the end position within buf, starting at i after +// scanning over spaces in tags. +func skipWhitespace(buf []byte, i int) int { + for i < len(buf) { + if buf[i] != ' ' && buf[i] != '\t' && buf[i] != 0 { + break + } + i++ + } + return i +} + +// scanLine returns the end position in buf and the next line found within +// buf. +func scanLine(buf []byte, i int) (int, []byte) { + start := i + quoted := false + fields := false + + // tracks how many '=' and commas we've seen + // this duplicates some of the functionality in scanFields + equals := 0 + commas := 0 + for { + // reached the end of buf? + if i >= len(buf) { + break + } + + // skip past escaped characters + if buf[i] == '\\' && i+2 < len(buf) { + i += 2 + continue + } + + if buf[i] == ' ' { + fields = true + } + + // If we see a double quote, makes sure it is not escaped + if fields { + if !quoted && buf[i] == '=' { + i++ + equals++ + continue + } else if !quoted && buf[i] == ',' { + i++ + commas++ + continue + } else if buf[i] == '"' && equals > commas { + i++ + quoted = !quoted + continue + } + } + + if buf[i] == '\n' && !quoted { + break + } + + i++ + } + + return i, buf[start:i] +} + +// scanTo returns the end position in buf and the next consecutive block +// of bytes, starting from i and ending with stop byte, where stop byte +// has not been escaped. +// +// If there are leading spaces, they are skipped. +func scanTo(buf []byte, i int, stop byte) (int, []byte) { + start := i + for { + // reached the end of buf? + if i >= len(buf) { + break + } + + // Reached unescaped stop value? + if buf[i] == stop && (i == 0 || buf[i-1] != '\\') { + break + } + i++ + } + + return i, buf[start:i] +} + +// scanTo returns the end position in buf and the next consecutive block +// of bytes, starting from i and ending with stop byte. If there are leading +// spaces, they are skipped. +func scanToSpaceOr(buf []byte, i int, stop byte) (int, []byte) { + start := i + if buf[i] == stop || buf[i] == ' ' { + return i, buf[start:i] + } + + for { + i++ + if buf[i-1] == '\\' { + continue + } + + // reached the end of buf? + if i >= len(buf) { + return i, buf[start:i] + } + + // reached end of block? + if buf[i] == stop || buf[i] == ' ' { + return i, buf[start:i] + } + } +} + +func scanTagValue(buf []byte, i int) (int, []byte) { + start := i + for { + if i >= len(buf) { + break + } + + if buf[i] == ',' && buf[i-1] != '\\' { + break + } + i++ + } + if i > len(buf) { + return i, nil + } + return i, buf[start:i] +} + +func scanFieldValue(buf []byte, i int) (int, []byte) { + start := i + quoted := false + for i < len(buf) { + // Only escape char for a field value is a double-quote and backslash + if buf[i] == '\\' && i+1 < len(buf) && (buf[i+1] == '"' || buf[i+1] == '\\') { + i += 2 + continue + } + + // Quoted value? (e.g. string) + if buf[i] == '"' { + i++ + quoted = !quoted + continue + } + + if buf[i] == ',' && !quoted { + break + } + i++ + } + return i, buf[start:i] +} + +func EscapeMeasurement(in []byte) []byte { + for _, c := range measurementEscapeCodes { + if bytes.IndexByte(in, c.k[0]) != -1 { + in = bytes.Replace(in, c.k[:], c.esc[:], -1) + } + } + return in +} + +func unescapeMeasurement(in []byte) []byte { + if bytes.IndexByte(in, '\\') == -1 { + return in + } + + for i := range measurementEscapeCodes { + c := &measurementEscapeCodes[i] + if bytes.IndexByte(in, c.k[0]) != -1 { + in = bytes.Replace(in, c.esc[:], c.k[:], -1) + } + } + return in +} + +func escapeTag(in []byte) []byte { + for i := range tagEscapeCodes { + c := &tagEscapeCodes[i] + if bytes.IndexByte(in, c.k[0]) != -1 { + in = bytes.Replace(in, c.k[:], c.esc[:], -1) + } + } + return in +} + +func unescapeTag(in []byte) []byte { + if bytes.IndexByte(in, '\\') == -1 { + return in + } + + for i := range tagEscapeCodes { + c := &tagEscapeCodes[i] + if bytes.IndexByte(in, c.k[0]) != -1 { + in = bytes.Replace(in, c.esc[:], c.k[:], -1) + } + } + return in +} + +// escapeStringFieldReplacer replaces double quotes and backslashes +// with the same character preceded by a backslash. +// As of Go 1.7 this benchmarked better in allocations and CPU time +// compared to iterating through a string byte-by-byte and appending to a new byte slice, +// calling strings.Replace twice, and better than (*Regex).ReplaceAllString. +var escapeStringFieldReplacer = strings.NewReplacer(`"`, `\"`, `\`, `\\`) + +// EscapeStringField returns a copy of in with any double quotes or +// backslashes with escaped values. +func EscapeStringField(in string) string { + return escapeStringFieldReplacer.Replace(in) +} + +// unescapeStringField returns a copy of in with any escaped double-quotes +// or backslashes unescaped. +func unescapeStringField(in string) string { + if strings.IndexByte(in, '\\') == -1 { + return in + } + + var out []byte + i := 0 + for { + if i >= len(in) { + break + } + // unescape backslashes + if in[i] == '\\' && i+1 < len(in) && in[i+1] == '\\' { + out = append(out, '\\') + i += 2 + continue + } + // unescape double-quotes + if in[i] == '\\' && i+1 < len(in) && in[i+1] == '"' { + out = append(out, '"') + i += 2 + continue + } + out = append(out, in[i]) + i++ + + } + return string(out) +} + +// NewPoint returns a new point with the given measurement name, tags, fields and timestamp. If +// an unsupported field value (NaN, or +/-Inf) or out of range time is passed, this function +// returns an error. +func NewPoint(name string, tags Tags, fields Fields, t time.Time) (Point, error) { + key, err := pointKey(name, tags, fields, t) + if err != nil { + return nil, err + } + + return &point{ + key: key, + time: t, + fields: fields.MarshalBinary(), + }, nil +} + +// pointKey checks some basic requirements for valid points, and returns the +// key, along with an possible error. +func pointKey(measurement string, tags Tags, fields Fields, t time.Time) ([]byte, error) { + if len(fields) == 0 { + return nil, ErrPointMustHaveAField + } + + if !t.IsZero() { + if err := CheckTime(t); err != nil { + return nil, err + } + } + + for key, value := range fields { + switch value := value.(type) { + case float64: + // Ensure the caller validates and handles invalid field values + if math.IsInf(value, 0) { + return nil, fmt.Errorf("+/-Inf is an unsupported value for field %s", key) + } + if math.IsNaN(value) { + return nil, fmt.Errorf("NaN is an unsupported value for field %s", key) + } + case float32: + // Ensure the caller validates and handles invalid field values + if math.IsInf(float64(value), 0) { + return nil, fmt.Errorf("+/-Inf is an unsupported value for field %s", key) + } + if math.IsNaN(float64(value)) { + return nil, fmt.Errorf("NaN is an unsupported value for field %s", key) + } + } + if len(key) == 0 { + return nil, fmt.Errorf("all fields must have non-empty names") + } + } + + key := MakeKey([]byte(measurement), tags) + for field := range fields { + sz := seriesKeySize(key, []byte(field)) + if sz > MaxKeyLength { + return nil, fmt.Errorf("max key length exceeded: %v > %v", sz, MaxKeyLength) + } + } + + return key, nil +} + +func seriesKeySize(key, field []byte) int { + // 4 is the length of the tsm1.fieldKeySeparator constant. It's inlined here to avoid a circular + // dependency. + return len(key) + 4 + len(field) +} + +// NewPointFromBytes returns a new Point from a marshalled Point. +func NewPointFromBytes(b []byte) (Point, error) { + p := &point{} + if err := p.UnmarshalBinary(b); err != nil { + return nil, err + } + + // This does some basic validation to ensure there are fields and they + // can be unmarshalled as well. + iter := p.FieldIterator() + var hasField bool + for iter.Next() { + if len(iter.FieldKey()) == 0 { + continue + } + hasField = true + switch iter.Type() { + case Float: + _, err := iter.FloatValue() + if err != nil { + return nil, fmt.Errorf("unable to unmarshal field %s: %s", string(iter.FieldKey()), err) + } + case Integer: + _, err := iter.IntegerValue() + if err != nil { + return nil, fmt.Errorf("unable to unmarshal field %s: %s", string(iter.FieldKey()), err) + } + case Unsigned: + _, err := iter.UnsignedValue() + if err != nil { + return nil, fmt.Errorf("unable to unmarshal field %s: %s", string(iter.FieldKey()), err) + } + case String: + // Skip since this won't return an error + case Boolean: + _, err := iter.BooleanValue() + if err != nil { + return nil, fmt.Errorf("unable to unmarshal field %s: %s", string(iter.FieldKey()), err) + } + } + } + + if !hasField { + return nil, ErrPointMustHaveAField + } + + return p, nil +} + +// MustNewPoint returns a new point with the given measurement name, tags, fields and timestamp. If +// an unsupported field value (NaN) is passed, this function panics. +func MustNewPoint(name string, tags Tags, fields Fields, time time.Time) Point { + pt, err := NewPoint(name, tags, fields, time) + if err != nil { + panic(err.Error()) + } + return pt +} + +// Key returns the key (measurement joined with tags) of the point. +func (p *point) Key() []byte { + return p.key +} + +func (p *point) name() []byte { + _, name := scanTo(p.key, 0, ',') + return name +} + +func (p *point) Name() []byte { + return escape.Unescape(p.name()) +} + +// SetName updates the measurement name for the point. +func (p *point) SetName(name string) { + p.cachedName = "" + p.key = MakeKey([]byte(name), p.Tags()) +} + +// Time return the timestamp for the point. +func (p *point) Time() time.Time { + return p.time +} + +// SetTime updates the timestamp for the point. +func (p *point) SetTime(t time.Time) { + p.time = t +} + +// Round will round the timestamp of the point to the given duration. +func (p *point) Round(d time.Duration) { + p.time = p.time.Round(d) +} + +// Tags returns the tag set for the point. +func (p *point) Tags() Tags { + if p.cachedTags != nil { + return p.cachedTags + } + p.cachedTags = parseTags(p.key, nil) + return p.cachedTags +} + +func (p *point) ForEachTag(fn func(k, v []byte) bool) { + walkTags(p.key, fn) +} + +func (p *point) HasTag(tag []byte) bool { + if len(p.key) == 0 { + return false + } + + var exists bool + walkTags(p.key, func(key, value []byte) bool { + if bytes.Equal(tag, key) { + exists = true + return false + } + return true + }) + + return exists +} + +func walkTags(buf []byte, fn func(key, value []byte) bool) { + if len(buf) == 0 { + return + } + + pos, name := scanTo(buf, 0, ',') + + // it's an empty key, so there are no tags + if len(name) == 0 { + return + } + + hasEscape := bytes.IndexByte(buf, '\\') != -1 + i := pos + 1 + var key, value []byte + for { + if i >= len(buf) { + break + } + i, key = scanTo(buf, i, '=') + i, value = scanTagValue(buf, i+1) + + if len(value) == 0 { + continue + } + + if hasEscape { + if !fn(unescapeTag(key), unescapeTag(value)) { + return + } + } else { + if !fn(key, value) { + return + } + } + + i++ + } +} + +// walkFields walks each field key and value via fn. If fn returns false, the iteration +// is stopped. The values are the raw byte slices and not the converted types. +func walkFields(buf []byte, fn func(key, value []byte) bool) error { + var i int + var key, val []byte + for len(buf) > 0 { + i, key = scanTo(buf, 0, '=') + if i > len(buf)-2 { + return fmt.Errorf("invalid value: field-key=%s", key) + } + buf = buf[i+1:] + i, val = scanFieldValue(buf, 0) + buf = buf[i:] + if !fn(key, val) { + break + } + + // slice off comma + if len(buf) > 0 { + buf = buf[1:] + } + } + return nil +} + +// parseTags parses buf into the provided destination tags, returning destination +// Tags, which may have a different length and capacity. +func parseTags(buf []byte, dst Tags) Tags { + if len(buf) == 0 { + return nil + } + + n := bytes.Count(buf, []byte(",")) + if cap(dst) < n { + dst = make(Tags, n) + } else { + dst = dst[:n] + } + + // Ensure existing behaviour when point has no tags and nil slice passed in. + if dst == nil { + dst = Tags{} + } + + // Series keys can contain escaped commas, therefore the number of commas + // in a series key only gives an estimation of the upper bound on the number + // of tags. + var i int + walkTags(buf, func(key, value []byte) bool { + dst[i].Key, dst[i].Value = key, value + i++ + return true + }) + return dst[:i] +} + +// MakeKey creates a key for a set of tags. +func MakeKey(name []byte, tags Tags) []byte { + return AppendMakeKey(nil, name, tags) +} + +// AppendMakeKey appends the key derived from name and tags to dst and returns the extended buffer. +func AppendMakeKey(dst []byte, name []byte, tags Tags) []byte { + // unescape the name and then re-escape it to avoid double escaping. + // The key should always be stored in escaped form. + dst = append(dst, EscapeMeasurement(unescapeMeasurement(name))...) + dst = tags.AppendHashKey(dst) + return dst +} + +// SetTags replaces the tags for the point. +func (p *point) SetTags(tags Tags) { + p.key = MakeKey(p.Name(), tags) + p.cachedTags = tags +} + +// AddTag adds or replaces a tag value for a point. +func (p *point) AddTag(key, value string) { + tags := p.Tags() + tags = append(tags, Tag{Key: []byte(key), Value: []byte(value)}) + sort.Sort(tags) + p.cachedTags = tags + p.key = MakeKey(p.Name(), tags) +} + +// Fields returns the fields for the point. +func (p *point) Fields() (Fields, error) { + if p.cachedFields != nil { + return p.cachedFields, nil + } + cf, err := p.unmarshalBinary() + if err != nil { + return nil, err + } + p.cachedFields = cf + return p.cachedFields, nil +} + +// SetPrecision will round a time to the specified precision. +func (p *point) SetPrecision(precision string) { + switch precision { + case "n": + case "u": + p.SetTime(p.Time().Truncate(time.Microsecond)) + case "ms": + p.SetTime(p.Time().Truncate(time.Millisecond)) + case "s": + p.SetTime(p.Time().Truncate(time.Second)) + case "m": + p.SetTime(p.Time().Truncate(time.Minute)) + case "h": + p.SetTime(p.Time().Truncate(time.Hour)) + } +} + +// String returns the string representation of the point. +func (p *point) String() string { + if p.Time().IsZero() { + return string(p.Key()) + " " + string(p.fields) + } + return string(p.Key()) + " " + string(p.fields) + " " + strconv.FormatInt(p.UnixNano(), 10) +} + +// AppendString appends the string representation of the point to buf. +func (p *point) AppendString(buf []byte) []byte { + buf = append(buf, p.key...) + buf = append(buf, ' ') + buf = append(buf, p.fields...) + + if !p.time.IsZero() { + buf = append(buf, ' ') + buf = strconv.AppendInt(buf, p.UnixNano(), 10) + } + + return buf +} + +// StringSize returns the length of the string that would be returned by String(). +func (p *point) StringSize() int { + size := len(p.key) + len(p.fields) + 1 + + if !p.time.IsZero() { + digits := 1 // even "0" has one digit + t := p.UnixNano() + if t < 0 { + // account for negative sign, then negate + digits++ + t = -t + } + for t > 9 { // already accounted for one digit + digits++ + t /= 10 + } + size += digits + 1 // digits and a space + } + + return size +} + +// MarshalBinary returns a binary representation of the point. +func (p *point) MarshalBinary() ([]byte, error) { + if len(p.fields) == 0 { + return nil, ErrPointMustHaveAField + } + + tb, err := p.time.MarshalBinary() + if err != nil { + return nil, err + } + + b := make([]byte, 8+len(p.key)+len(p.fields)+len(tb)) + i := 0 + + binary.BigEndian.PutUint32(b[i:], uint32(len(p.key))) + i += 4 + + i += copy(b[i:], p.key) + + binary.BigEndian.PutUint32(b[i:i+4], uint32(len(p.fields))) + i += 4 + + i += copy(b[i:], p.fields) + + copy(b[i:], tb) + return b, nil +} + +// UnmarshalBinary decodes a binary representation of the point into a point struct. +func (p *point) UnmarshalBinary(b []byte) error { + var n int + + // Read key length. + if len(b) < 4 { + return io.ErrShortBuffer + } + n, b = int(binary.BigEndian.Uint32(b[:4])), b[4:] + + // Read key. + if len(b) < n { + return io.ErrShortBuffer + } + p.key, b = b[:n], b[n:] + + // Read fields length. + if len(b) < 4 { + return io.ErrShortBuffer + } + n, b = int(binary.BigEndian.Uint32(b[:4])), b[4:] + + // Read fields. + if len(b) < n { + return io.ErrShortBuffer + } + p.fields, b = b[:n], b[n:] + + // Read timestamp. + return p.time.UnmarshalBinary(b) +} + +// PrecisionString returns a string representation of the point. If there +// is a timestamp associated with the point then it will be specified in the +// given unit. +func (p *point) PrecisionString(precision string) string { + if p.Time().IsZero() { + return fmt.Sprintf("%s %s", p.Key(), string(p.fields)) + } + return fmt.Sprintf("%s %s %d", p.Key(), string(p.fields), + p.UnixNano()/GetPrecisionMultiplier(precision)) +} + +// RoundedString returns a string representation of the point. If there +// is a timestamp associated with the point, then it will be rounded to the +// given duration. +func (p *point) RoundedString(d time.Duration) string { + if p.Time().IsZero() { + return fmt.Sprintf("%s %s", p.Key(), string(p.fields)) + } + return fmt.Sprintf("%s %s %d", p.Key(), string(p.fields), + p.time.Round(d).UnixNano()) +} + +func (p *point) unmarshalBinary() (Fields, error) { + iter := p.FieldIterator() + fields := make(Fields, 8) + for iter.Next() { + if len(iter.FieldKey()) == 0 { + continue + } + switch iter.Type() { + case Float: + v, err := iter.FloatValue() + if err != nil { + return nil, fmt.Errorf("unable to unmarshal field %s: %s", string(iter.FieldKey()), err) + } + fields[string(iter.FieldKey())] = v + case Integer: + v, err := iter.IntegerValue() + if err != nil { + return nil, fmt.Errorf("unable to unmarshal field %s: %s", string(iter.FieldKey()), err) + } + fields[string(iter.FieldKey())] = v + case Unsigned: + v, err := iter.UnsignedValue() + if err != nil { + return nil, fmt.Errorf("unable to unmarshal field %s: %s", string(iter.FieldKey()), err) + } + fields[string(iter.FieldKey())] = v + case String: + fields[string(iter.FieldKey())] = iter.StringValue() + case Boolean: + v, err := iter.BooleanValue() + if err != nil { + return nil, fmt.Errorf("unable to unmarshal field %s: %s", string(iter.FieldKey()), err) + } + fields[string(iter.FieldKey())] = v + } + } + return fields, nil +} + +// HashID returns a non-cryptographic checksum of the point's key. +func (p *point) HashID() uint64 { + h := NewInlineFNV64a() + h.Write(p.key) + sum := h.Sum64() + return sum +} + +// UnixNano returns the timestamp of the point as nanoseconds since Unix epoch. +func (p *point) UnixNano() int64 { + return p.Time().UnixNano() +} + +// Split will attempt to return multiple points with the same timestamp whose +// string representations are no longer than size. Points with a single field or +// a point without a timestamp may exceed the requested size. +func (p *point) Split(size int) []Point { + if p.time.IsZero() || p.StringSize() <= size { + return []Point{p} + } + + // key string, timestamp string, spaces + size -= len(p.key) + len(strconv.FormatInt(p.time.UnixNano(), 10)) + 2 + + var points []Point + var start, cur int + + for cur < len(p.fields) { + end, _ := scanTo(p.fields, cur, '=') + end, _ = scanFieldValue(p.fields, end+1) + + if cur > start && end-start > size { + points = append(points, &point{ + key: p.key, + time: p.time, + fields: p.fields[start : cur-1], + }) + start = cur + } + + cur = end + 1 + } + + points = append(points, &point{ + key: p.key, + time: p.time, + fields: p.fields[start:], + }) + + return points +} + +// Tag represents a single key/value tag pair. +type Tag struct { + Key []byte + Value []byte +} + +// NewTag returns a new Tag. +func NewTag(key, value []byte) Tag { + return Tag{ + Key: key, + Value: value, + } +} + +// Size returns the size of the key and value. +func (t Tag) Size() int { return len(t.Key) + len(t.Value) } + +// Clone returns a shallow copy of Tag. +// +// Tags associated with a Point created by ParsePointsWithPrecision will hold references to the byte slice that was parsed. +// Use Clone to create a Tag with new byte slices that do not refer to the argument to ParsePointsWithPrecision. +func (t Tag) Clone() Tag { + other := Tag{ + Key: make([]byte, len(t.Key)), + Value: make([]byte, len(t.Value)), + } + + copy(other.Key, t.Key) + copy(other.Value, t.Value) + + return other +} + +// String returns the string reprsentation of the tag. +func (t *Tag) String() string { + var buf bytes.Buffer + buf.WriteByte('{') + buf.WriteString(string(t.Key)) + buf.WriteByte(' ') + buf.WriteString(string(t.Value)) + buf.WriteByte('}') + return buf.String() +} + +// Tags represents a sorted list of tags. +type Tags []Tag + +// NewTags returns a new Tags from a map. +func NewTags(m map[string]string) Tags { + if len(m) == 0 { + return nil + } + a := make(Tags, 0, len(m)) + for k, v := range m { + a = append(a, NewTag([]byte(k), []byte(v))) + } + sort.Sort(a) + return a +} + +// HashKey hashes all of a tag's keys. +func (a Tags) HashKey() []byte { + return a.AppendHashKey(nil) +} + +func (a Tags) needsEscape() bool { + for i := range a { + t := &a[i] + for j := range tagEscapeCodes { + c := &tagEscapeCodes[j] + if bytes.IndexByte(t.Key, c.k[0]) != -1 || bytes.IndexByte(t.Value, c.k[0]) != -1 { + return true + } + } + } + return false +} + +// AppendHashKey appends the result of hashing all of a tag's keys and values to dst and returns the extended buffer. +func (a Tags) AppendHashKey(dst []byte) []byte { + // Empty maps marshal to empty bytes. + if len(a) == 0 { + return dst + } + + // Type invariant: Tags are sorted + + sz := 0 + var escaped Tags + if a.needsEscape() { + var tmp [20]Tag + if len(a) < len(tmp) { + escaped = tmp[:len(a)] + } else { + escaped = make(Tags, len(a)) + } + + for i := range a { + t := &a[i] + nt := &escaped[i] + nt.Key = escapeTag(t.Key) + nt.Value = escapeTag(t.Value) + sz += len(nt.Key) + len(nt.Value) + } + } else { + sz = a.Size() + escaped = a + } + + sz += len(escaped) + (len(escaped) * 2) // separators + + // Generate marshaled bytes. + if cap(dst)-len(dst) < sz { + nd := make([]byte, len(dst), len(dst)+sz) + copy(nd, dst) + dst = nd + } + buf := dst[len(dst) : len(dst)+sz] + idx := 0 + for i := range escaped { + k := &escaped[i] + if len(k.Value) == 0 { + continue + } + buf[idx] = ',' + idx++ + copy(buf[idx:], k.Key) + idx += len(k.Key) + buf[idx] = '=' + idx++ + copy(buf[idx:], k.Value) + idx += len(k.Value) + } + return dst[:len(dst)+idx] +} + +// String returns the string representation of the tags. +func (a Tags) String() string { + var buf bytes.Buffer + buf.WriteByte('[') + for i := range a { + buf.WriteString(a[i].String()) + if i < len(a)-1 { + buf.WriteByte(' ') + } + } + buf.WriteByte(']') + return buf.String() +} + +// Size returns the number of bytes needed to store all tags. Note, this is +// the number of bytes needed to store all keys and values and does not account +// for data structures or delimiters for example. +func (a Tags) Size() int { + var total int + for i := range a { + total += a[i].Size() + } + return total +} + +// Clone returns a copy of the slice where the elements are a result of calling `Clone` on the original elements +// +// Tags associated with a Point created by ParsePointsWithPrecision will hold references to the byte slice that was parsed. +// Use Clone to create Tags with new byte slices that do not refer to the argument to ParsePointsWithPrecision. +func (a Tags) Clone() Tags { + if len(a) == 0 { + return nil + } + + others := make(Tags, len(a)) + for i := range a { + others[i] = a[i].Clone() + } + + return others +} + +func (a Tags) Len() int { return len(a) } +func (a Tags) Less(i, j int) bool { return bytes.Compare(a[i].Key, a[j].Key) == -1 } +func (a Tags) Swap(i, j int) { a[i], a[j] = a[j], a[i] } + +// Equal returns true if a equals other. +func (a Tags) Equal(other Tags) bool { + if len(a) != len(other) { + return false + } + for i := range a { + if !bytes.Equal(a[i].Key, other[i].Key) || !bytes.Equal(a[i].Value, other[i].Value) { + return false + } + } + return true +} + +// CompareTags returns -1 if a < b, 1 if a > b, and 0 if a == b. +func CompareTags(a, b Tags) int { + // Compare each key & value until a mismatch. + for i := 0; i < len(a) && i < len(b); i++ { + if cmp := bytes.Compare(a[i].Key, b[i].Key); cmp != 0 { + return cmp + } + if cmp := bytes.Compare(a[i].Value, b[i].Value); cmp != 0 { + return cmp + } + } + + // If all tags are equal up to this point then return shorter tagset. + if len(a) < len(b) { + return -1 + } else if len(a) > len(b) { + return 1 + } + + // All tags are equal. + return 0 +} + +// Get returns the value for a key. +func (a Tags) Get(key []byte) []byte { + // OPTIMIZE: Use sort.Search if tagset is large. + + for _, t := range a { + if bytes.Equal(t.Key, key) { + return t.Value + } + } + return nil +} + +// GetString returns the string value for a string key. +func (a Tags) GetString(key string) string { + return string(a.Get([]byte(key))) +} + +// Set sets the value for a key. +func (a *Tags) Set(key, value []byte) { + for i, t := range *a { + if bytes.Equal(t.Key, key) { + (*a)[i].Value = value + return + } + } + *a = append(*a, Tag{Key: key, Value: value}) + sort.Sort(*a) +} + +// SetString sets the string value for a string key. +func (a *Tags) SetString(key, value string) { + a.Set([]byte(key), []byte(value)) +} + +// Map returns a map representation of the tags. +func (a Tags) Map() map[string]string { + m := make(map[string]string, len(a)) + for _, t := range a { + m[string(t.Key)] = string(t.Value) + } + return m +} + +// CopyTags returns a shallow copy of tags. +func CopyTags(a Tags) Tags { + other := make(Tags, len(a)) + copy(other, a) + return other +} + +// DeepCopyTags returns a deep copy of tags. +func DeepCopyTags(a Tags) Tags { + // Calculate size of keys/values in bytes. + var n int + for _, t := range a { + n += len(t.Key) + len(t.Value) + } + + // Build single allocation for all key/values. + buf := make([]byte, n) + + // Copy tags to new set. + other := make(Tags, len(a)) + for i, t := range a { + copy(buf, t.Key) + other[i].Key, buf = buf[:len(t.Key)], buf[len(t.Key):] + + copy(buf, t.Value) + other[i].Value, buf = buf[:len(t.Value)], buf[len(t.Value):] + } + + return other +} + +// Fields represents a mapping between a Point's field names and their +// values. +type Fields map[string]interface{} + +// FieldIterator retuns a FieldIterator that can be used to traverse the +// fields of a point without constructing the in-memory map. +func (p *point) FieldIterator() FieldIterator { + p.Reset() + return p +} + +type fieldIterator struct { + start, end int + key, keybuf []byte + valueBuf []byte + fieldType FieldType +} + +// Next indicates whether there any fields remaining. +func (p *point) Next() bool { + p.it.start = p.it.end + if p.it.start >= len(p.fields) { + return false + } + + p.it.end, p.it.key = scanTo(p.fields, p.it.start, '=') + if escape.IsEscaped(p.it.key) { + p.it.keybuf = escape.AppendUnescaped(p.it.keybuf[:0], p.it.key) + p.it.key = p.it.keybuf + } + + p.it.end, p.it.valueBuf = scanFieldValue(p.fields, p.it.end+1) + p.it.end++ + + if len(p.it.valueBuf) == 0 { + p.it.fieldType = Empty + return true + } + + c := p.it.valueBuf[0] + + if c == '"' { + p.it.fieldType = String + return true + } + + if strings.IndexByte(`0123456789-.nNiIu`, c) >= 0 { + if p.it.valueBuf[len(p.it.valueBuf)-1] == 'i' { + p.it.fieldType = Integer + p.it.valueBuf = p.it.valueBuf[:len(p.it.valueBuf)-1] + } else if p.it.valueBuf[len(p.it.valueBuf)-1] == 'u' { + p.it.fieldType = Unsigned + p.it.valueBuf = p.it.valueBuf[:len(p.it.valueBuf)-1] + } else { + p.it.fieldType = Float + } + return true + } + + // to keep the same behavior that currently exists, default to boolean + p.it.fieldType = Boolean + return true +} + +// FieldKey returns the key of the current field. +func (p *point) FieldKey() []byte { + return p.it.key +} + +// Type returns the FieldType of the current field. +func (p *point) Type() FieldType { + return p.it.fieldType +} + +// StringValue returns the string value of the current field. +func (p *point) StringValue() string { + return unescapeStringField(string(p.it.valueBuf[1 : len(p.it.valueBuf)-1])) +} + +// IntegerValue returns the integer value of the current field. +func (p *point) IntegerValue() (int64, error) { + n, err := parseIntBytes(p.it.valueBuf, 10, 64) + if err != nil { + return 0, fmt.Errorf("unable to parse integer value %q: %v", p.it.valueBuf, err) + } + return n, nil +} + +// UnsignedValue returns the unsigned value of the current field. +func (p *point) UnsignedValue() (uint64, error) { + n, err := parseUintBytes(p.it.valueBuf, 10, 64) + if err != nil { + return 0, fmt.Errorf("unable to parse unsigned value %q: %v", p.it.valueBuf, err) + } + return n, nil +} + +// BooleanValue returns the boolean value of the current field. +func (p *point) BooleanValue() (bool, error) { + b, err := parseBoolBytes(p.it.valueBuf) + if err != nil { + return false, fmt.Errorf("unable to parse bool value %q: %v", p.it.valueBuf, err) + } + return b, nil +} + +// FloatValue returns the float value of the current field. +func (p *point) FloatValue() (float64, error) { + f, err := parseFloatBytes(p.it.valueBuf, 64) + if err != nil { + return 0, fmt.Errorf("unable to parse floating point value %q: %v", p.it.valueBuf, err) + } + return f, nil +} + +// Reset resets the iterator to its initial state. +func (p *point) Reset() { + p.it.fieldType = Empty + p.it.key = nil + p.it.valueBuf = nil + p.it.start = 0 + p.it.end = 0 +} + +// MarshalBinary encodes all the fields to their proper type and returns the binary +// represenation +// NOTE: uint64 is specifically not supported due to potential overflow when we decode +// again later to an int64 +// NOTE2: uint is accepted, and may be 64 bits, and is for some reason accepted... +func (p Fields) MarshalBinary() []byte { + var b []byte + keys := make([]string, 0, len(p)) + + for k := range p { + keys = append(keys, k) + } + + // Not really necessary, can probably be removed. + sort.Strings(keys) + + for i, k := range keys { + if i > 0 { + b = append(b, ',') + } + b = appendField(b, k, p[k]) + } + + return b +} + +func appendField(b []byte, k string, v interface{}) []byte { + b = append(b, []byte(escape.String(k))...) + b = append(b, '=') + + // check popular types first + switch v := v.(type) { + case float64: + b = strconv.AppendFloat(b, v, 'f', -1, 64) + case int64: + b = strconv.AppendInt(b, v, 10) + b = append(b, 'i') + case string: + b = append(b, '"') + b = append(b, []byte(EscapeStringField(v))...) + b = append(b, '"') + case bool: + b = strconv.AppendBool(b, v) + case int32: + b = strconv.AppendInt(b, int64(v), 10) + b = append(b, 'i') + case int16: + b = strconv.AppendInt(b, int64(v), 10) + b = append(b, 'i') + case int8: + b = strconv.AppendInt(b, int64(v), 10) + b = append(b, 'i') + case int: + b = strconv.AppendInt(b, int64(v), 10) + b = append(b, 'i') + case uint64: + b = strconv.AppendUint(b, v, 10) + b = append(b, 'u') + case uint32: + b = strconv.AppendInt(b, int64(v), 10) + b = append(b, 'i') + case uint16: + b = strconv.AppendInt(b, int64(v), 10) + b = append(b, 'i') + case uint8: + b = strconv.AppendInt(b, int64(v), 10) + b = append(b, 'i') + case uint: + // TODO: 'uint' should be converted to writing as an unsigned integer, + // but we cannot since that would break backwards compatibility. + b = strconv.AppendInt(b, int64(v), 10) + b = append(b, 'i') + case float32: + b = strconv.AppendFloat(b, float64(v), 'f', -1, 32) + case []byte: + b = append(b, v...) + case nil: + // skip + default: + // Can't determine the type, so convert to string + b = append(b, '"') + b = append(b, []byte(EscapeStringField(fmt.Sprintf("%v", v)))...) + b = append(b, '"') + + } + + return b +} + +// ValidKeyToken returns true if the token used for measurement, tag key, or tag +// value is a valid unicode string and only contains printable, non-replacement characters. +func ValidKeyToken(s string) bool { + if !utf8.ValidString(s) { + return false + } + for _, r := range s { + if !unicode.IsPrint(r) || r == unicode.ReplacementChar { + return false + } + } + return true +} + +// ValidKeyTokens returns true if the measurement name and all tags are valid. +func ValidKeyTokens(name string, tags Tags) bool { + if !ValidKeyToken(name) { + return false + } + for _, tag := range tags { + if !ValidKeyToken(string(tag.Key)) || !ValidKeyToken(string(tag.Value)) { + return false + } + } + return true +} diff --git a/pkg/influxdb/influxdb-client/models/rows.go b/pkg/influxdb/influxdb-client/models/rows.go new file mode 100644 index 0000000..c087a48 --- /dev/null +++ b/pkg/influxdb/influxdb-client/models/rows.go @@ -0,0 +1,62 @@ +package models + +import ( + "sort" +) + +// Row represents a single row returned from the execution of a statement. +type Row struct { + Name string `json:"name,omitempty"` + Tags map[string]string `json:"tags,omitempty"` + Columns []string `json:"columns,omitempty"` + Values [][]interface{} `json:"values,omitempty"` + Partial bool `json:"partial,omitempty"` +} + +// SameSeries returns true if r contains values for the same series as o. +func (r *Row) SameSeries(o *Row) bool { + return r.tagsHash() == o.tagsHash() && r.Name == o.Name +} + +// tagsHash returns a hash of tag key/value pairs. +func (r *Row) tagsHash() uint64 { + h := NewInlineFNV64a() + keys := r.tagsKeys() + for _, k := range keys { + h.Write([]byte(k)) + h.Write([]byte(r.Tags[k])) + } + return h.Sum64() +} + +// tagKeys returns a sorted list of tag keys. +func (r *Row) tagsKeys() []string { + a := make([]string, 0, len(r.Tags)) + for k := range r.Tags { + a = append(a, k) + } + sort.Strings(a) + return a +} + +// Rows represents a collection of rows. Rows implements sort.Interface. +type Rows []*Row + +// Len implements sort.Interface. +func (p Rows) Len() int { return len(p) } + +// Less implements sort.Interface. +func (p Rows) Less(i, j int) bool { + // Sort by name first. + if p[i].Name != p[j].Name { + return p[i].Name < p[j].Name + } + + // Sort by tag set hash. Tags don't have a meaningful sort order so we + // just compute a hash and sort by that instead. This allows the tests + // to receive rows in a predictable order every time. + return p[i].tagsHash() < p[j].tagsHash() +} + +// Swap implements sort.Interface. +func (p Rows) Swap(i, j int) { p[i], p[j] = p[j], p[i] } diff --git a/pkg/influxdb/influxdb-client/models/statistic.go b/pkg/influxdb/influxdb-client/models/statistic.go new file mode 100644 index 0000000..553e9d0 --- /dev/null +++ b/pkg/influxdb/influxdb-client/models/statistic.go @@ -0,0 +1,42 @@ +package models + +// Statistic is the representation of a statistic used by the monitoring service. +type Statistic struct { + Name string `json:"name"` + Tags map[string]string `json:"tags"` + Values map[string]interface{} `json:"values"` +} + +// NewStatistic returns an initialized Statistic. +func NewStatistic(name string) Statistic { + return Statistic{ + Name: name, + Tags: make(map[string]string), + Values: make(map[string]interface{}), + } +} + +// StatisticTags is a map that can be merged with others without causing +// mutations to either map. +type StatisticTags map[string]string + +// Merge creates a new map containing the merged contents of tags and t. +// If both tags and the receiver map contain the same key, the value in tags +// is used in the resulting map. +// +// Merge always returns a usable map. +func (t StatisticTags) Merge(tags map[string]string) map[string]string { + // Add everything in tags to the result. + out := make(map[string]string, len(tags)) + for k, v := range tags { + out[k] = v + } + + // Only add values from t that don't appear in tags. + for k, v := range t { + if _, ok := tags[k]; !ok { + out[k] = v + } + } + return out +} diff --git a/pkg/influxdb/influxdb-client/models/time.go b/pkg/influxdb/influxdb-client/models/time.go new file mode 100644 index 0000000..e98f2cb --- /dev/null +++ b/pkg/influxdb/influxdb-client/models/time.go @@ -0,0 +1,74 @@ +package models + +// Helper time methods since parsing time can easily overflow and we only support a +// specific time range. + +import ( + "fmt" + "math" + "time" +) + +const ( + // MinNanoTime is the minumum time that can be represented. + // + // 1677-09-21 00:12:43.145224194 +0000 UTC + // + // The two lowest minimum integers are used as sentinel values. The + // minimum value needs to be used as a value lower than any other value for + // comparisons and another separate value is needed to act as a sentinel + // default value that is unusable by the user, but usable internally. + // Because these two values need to be used for a special purpose, we do + // not allow users to write points at these two times. + MinNanoTime = int64(math.MinInt64) + 2 + + // MaxNanoTime is the maximum time that can be represented. + // + // 2262-04-11 23:47:16.854775806 +0000 UTC + // + // The highest time represented by a nanosecond needs to be used for an + // exclusive range in the shard group, so the maximum time needs to be one + // less than the possible maximum number of nanoseconds representable by an + // int64 so that we don't lose a point at that one time. + MaxNanoTime = int64(math.MaxInt64) - 1 +) + +var ( + minNanoTime = time.Unix(0, MinNanoTime).UTC() + maxNanoTime = time.Unix(0, MaxNanoTime).UTC() + + // ErrTimeOutOfRange gets returned when time is out of the representable range using int64 nanoseconds since the epoch. + ErrTimeOutOfRange = fmt.Errorf("time outside range %d - %d", MinNanoTime, MaxNanoTime) +) + +// SafeCalcTime safely calculates the time given. Will return error if the time is outside the +// supported range. +func SafeCalcTime(timestamp int64, precision string) (time.Time, error) { + mult := GetPrecisionMultiplier(precision) + if t, ok := safeSignedMult(timestamp, mult); ok { + tme := time.Unix(0, t).UTC() + return tme, CheckTime(tme) + } + + return time.Time{}, ErrTimeOutOfRange +} + +// CheckTime checks that a time is within the safe range. +func CheckTime(t time.Time) error { + if t.Before(minNanoTime) || t.After(maxNanoTime) { + return ErrTimeOutOfRange + } + return nil +} + +// Perform the multiplication and check to make sure it didn't overflow. +func safeSignedMult(a, b int64) (int64, bool) { + if a == 0 || b == 0 || a == 1 || b == 1 { + return a * b, true + } + if a == MinNanoTime || b == MaxNanoTime { + return 0, false + } + c := a * b + return c, c/b == a +} diff --git a/pkg/influxdb/influxdb-client/models/uint_support.go b/pkg/influxdb/influxdb-client/models/uint_support.go new file mode 100644 index 0000000..f6d31ae --- /dev/null +++ b/pkg/influxdb/influxdb-client/models/uint_support.go @@ -0,0 +1,8 @@ +//go:build uint || uint64 +// +build uint uint64 + +package models + +func init() { + EnableUintSupport() +} diff --git a/pkg/influxdb/influxdb-client/pkg/escape/bytes.go b/pkg/influxdb/influxdb-client/pkg/escape/bytes.go new file mode 100644 index 0000000..a69e524 --- /dev/null +++ b/pkg/influxdb/influxdb-client/pkg/escape/bytes.go @@ -0,0 +1,115 @@ +// Package escape contains utilities for escaping parts of InfluxQL +// and InfluxDB line protocol. +package escape // import "github.com/ztalab/ZACA/pkg/influxdb/influxdb-client/pkg/escape" + +import ( + "bytes" + "strings" +) + +// Codes is a map of bytes to be escaped. +var Codes = map[byte][]byte{ + ',': []byte(`\,`), + '"': []byte(`\"`), + ' ': []byte(`\ `), + '=': []byte(`\=`), +} + +// Bytes escapes characters on the input slice, as defined by Codes. +func Bytes(in []byte) []byte { + for b, esc := range Codes { + in = bytes.Replace(in, []byte{b}, esc, -1) + } + return in +} + +const escapeChars = `," =` + +// IsEscaped returns whether b has any escaped characters, +// i.e. whether b seems to have been processed by Bytes. +func IsEscaped(b []byte) bool { + for len(b) > 0 { + i := bytes.IndexByte(b, '\\') + if i < 0 { + return false + } + + if i+1 < len(b) && strings.IndexByte(escapeChars, b[i+1]) >= 0 { + return true + } + b = b[i+1:] + } + return false +} + +// AppendUnescaped appends the unescaped version of src to dst +// and returns the resulting slice. +func AppendUnescaped(dst, src []byte) []byte { + var pos int + for len(src) > 0 { + next := bytes.IndexByte(src[pos:], '\\') + if next < 0 || pos+next+1 >= len(src) { + return append(dst, src...) + } + + if pos+next+1 < len(src) && strings.IndexByte(escapeChars, src[pos+next+1]) >= 0 { + if pos+next > 0 { + dst = append(dst, src[:pos+next]...) + } + src = src[pos+next+1:] + pos = 0 + } else { + pos += next + 1 + } + } + + return dst +} + +// Unescape returns a new slice containing the unescaped version of in. +func Unescape(in []byte) []byte { + if len(in) == 0 { + return nil + } + + if bytes.IndexByte(in, '\\') == -1 { + return in + } + + i := 0 + inLen := len(in) + + // The output size will be no more than inLen. Preallocating the + // capacity of the output is faster and uses less memory than + // letting append() do its own (over)allocation. + out := make([]byte, 0, inLen) + + for { + if i >= inLen { + break + } + if in[i] == '\\' && i+1 < inLen { + switch in[i+1] { + case ',': + out = append(out, ',') + i += 2 + continue + case '"': + out = append(out, '"') + i += 2 + continue + case ' ': + out = append(out, ' ') + i += 2 + continue + case '=': + out = append(out, '=') + i += 2 + continue + } + } + out = append(out, in[i]) + i += 1 + } + return out +} diff --git a/pkg/influxdb/influxdb-client/pkg/escape/strings.go b/pkg/influxdb/influxdb-client/pkg/escape/strings.go new file mode 100644 index 0000000..db98033 --- /dev/null +++ b/pkg/influxdb/influxdb-client/pkg/escape/strings.go @@ -0,0 +1,21 @@ +package escape + +import "strings" + +var ( + escaper = strings.NewReplacer(`,`, `\,`, `"`, `\"`, ` `, `\ `, `=`, `\=`) + unescaper = strings.NewReplacer(`\,`, `,`, `\"`, `"`, `\ `, ` `, `\=`, `=`) +) + +// UnescapeString returns unescaped version of in. +func UnescapeString(in string) string { + if strings.IndexByte(in, '\\') == -1 { + return in + } + return unescaper.Replace(in) +} + +// String returns the escaped version of in. +func String(in string) string { + return escaper.Replace(in) +} diff --git a/pkg/influxdb/influxdb-client/v2/client.go b/pkg/influxdb/influxdb-client/v2/client.go new file mode 100644 index 0000000..4e8c2ee --- /dev/null +++ b/pkg/influxdb/influxdb-client/v2/client.go @@ -0,0 +1,811 @@ +// Package client (v2) is the current official Go client for InfluxDB. +package client // import "github.com/ztalab/ZACA/pkg/influxdb/influxdb-client/v2" + +import ( + "bytes" + "compress/gzip" + "crypto/tls" + "encoding/json" + "errors" + "fmt" + "io" + "io/ioutil" + "mime" + "net" + "net/http" + "net/url" + "path" + "strconv" + "strings" + "time" + + "github.com/ztalab/ZACA/pkg/influxdb/influxdb-client/models" +) + +type ContentEncoding string + +const ( + DefaultEncoding ContentEncoding = "" + GzipEncoding ContentEncoding = "gzip" + DefaultMaxIdleConns = 30 +) + +// HTTPConfig is the config data needed to create an HTTP Client. +type HTTPConfig struct { + // Addr should be of the form "http://host:port" + // or "http://[ipv6-host%zone]:port". + Addr string + + // Username is the influxdb username, optional. + Username string + + // Password is the influxdb password, optional. + Password string + + // UserAgent is the http User Agent, defaults to "InfluxDBClient". + UserAgent string + + // Timeout for influxdb writes, defaults to no timeout. + Timeout time.Duration + + // InsecureSkipVerify gets passed to the http client, if true, it will + // skip https certificate verification. Defaults to false. + InsecureSkipVerify bool + + // TLSConfig allows the user to set their own TLS config for the HTTP + // Client. If set, this option overrides InsecureSkipVerify. + TLSConfig *tls.Config + + // Proxy configures the Proxy function on the HTTP client. + Proxy func(req *http.Request) (*url.URL, error) + + // WriteEncoding specifies the encoding of write request + WriteEncoding ContentEncoding + + // MaxIdleConns controls the maximum number of idle (keep-alive) + MaxIdleConns int + + // MaxIdleConnsPerHost, if non-zero, controls the maximum idle + MaxIdleConnsPerHost int + + // IdleConnTimeout is the maximum amount of time an idle + IdleConnTimeout time.Duration +} + +// BatchPointsConfig is the config data needed to create an instance of the BatchPoints struct. +type BatchPointsConfig struct { + // Precision is the write precision of the points, defaults to "ns". + Precision string + + // Database is the database to write points to. + Database string + + // RetentionPolicy is the retention policy of the points. + RetentionPolicy string + + // Write consistency is the number of servers required to confirm write. + WriteConsistency string +} + +// Client is a client interface for writing & querying the database. +type Client interface { + // Ping checks that status of cluster, and will always return 0 time and no + // error for UDP clients. + Ping(timeout time.Duration) (time.Duration, string, error) + + // Write takes a BatchPoints object and writes all Points to InfluxDB. + Write(bp BatchPoints) error + + // Query makes an InfluxDB Query on the database. This will fail if using + // the UDP client. + Query(q Query) (*Response, error) + + // QueryAsChunk makes an InfluxDB Query on the database. This will fail if using + // the UDP client. + QueryAsChunk(q Query) (*ChunkedResponse, error) + + // Close releases any resources a Client may be using. + Close() error +} + +// NewHTTPClient returns a new Client from the provided config. +// Client is safe for concurrent use by multiple goroutines. +func NewHTTPClient(conf HTTPConfig) (Client, error) { + if conf.UserAgent == "" { + conf.UserAgent = "InfluxDBClient" + } + + u, err := url.Parse(conf.Addr) + if err != nil { + return nil, err + } else if u.Scheme != "http" && u.Scheme != "https" { + m := fmt.Sprintf("Unsupported protocol scheme: %s, your address"+ + " must start with http:// or https://", u.Scheme) + return nil, errors.New(m) + } + + switch conf.WriteEncoding { + case DefaultEncoding, GzipEncoding: + default: + return nil, fmt.Errorf("unsupported encoding %s", conf.WriteEncoding) + } + + if conf.MaxIdleConns == 0 { + conf.MaxIdleConns = DefaultMaxIdleConns + } + if conf.MaxIdleConnsPerHost == 0 { + conf.MaxIdleConnsPerHost = conf.MaxIdleConns + } + + tr := &http.Transport{ + TLSClientConfig: &tls.Config{ + InsecureSkipVerify: conf.InsecureSkipVerify, + }, + Proxy: conf.Proxy, + MaxIdleConns: conf.MaxIdleConns, + MaxIdleConnsPerHost: conf.MaxIdleConnsPerHost, + IdleConnTimeout: conf.IdleConnTimeout, + DialContext: (&net.Dialer{ + KeepAlive: time.Second * 60, + }).DialContext, + } + if conf.TLSConfig != nil { + tr.TLSClientConfig = conf.TLSConfig + } + return &client{ + url: *u, + username: conf.Username, + password: conf.Password, + useragent: conf.UserAgent, + httpClient: &http.Client{ + Timeout: conf.Timeout, + Transport: tr, + }, + transport: tr, + encoding: conf.WriteEncoding, + }, nil +} + +// Ping will check to see if the server is up with an optional timeout on waiting for leader. +// Ping returns how long the request took, the version of the server it connected to, and an error if one occurred. +func (c *client) Ping(timeout time.Duration) (time.Duration, string, error) { + now := time.Now() + + u := c.url + u.Path = path.Join(u.Path, "ping") + + req, err := http.NewRequest("GET", u.String(), nil) + if err != nil { + return 0, "", err + } + + req.Header.Set("User-Agent", c.useragent) + + if c.username != "" { + req.SetBasicAuth(c.username, c.password) + } + + if timeout > 0 { + params := req.URL.Query() + params.Set("wait_for_leader", fmt.Sprintf("%.0fs", timeout.Seconds())) + req.URL.RawQuery = params.Encode() + } + + resp, err := c.httpClient.Do(req) + if err != nil { + return 0, "", err + } + defer resp.Body.Close() + + body, err := ioutil.ReadAll(resp.Body) + if err != nil { + return 0, "", err + } + + if resp.StatusCode != http.StatusNoContent { + var err = errors.New(string(body)) + return 0, "", err + } + + version := resp.Header.Get("X-Influxdb-Version") + return time.Since(now), version, nil +} + +// Close releases the client's resources. +func (c *client) Close() error { + c.transport.CloseIdleConnections() + return nil +} + +// client is safe for concurrent use as the fields are all read-only +// once the client is instantiated. +type client struct { + // N.B - if url.UserInfo is accessed in future modifications to the + // methods on client, you will need to synchronize access to url. + url url.URL + username string + password string + useragent string + httpClient *http.Client + transport *http.Transport + encoding ContentEncoding +} + +// BatchPoints is an interface into a batched grouping of points to write into +// InfluxDB together. BatchPoints is NOT thread-safe, you must create a separate +// batch for each goroutine. +type BatchPoints interface { + // AddPoint adds the given point to the Batch of points. + AddPoint(p *Point) + // AddPoints adds the given points to the Batch of points. + AddPoints(ps []*Point) + // Points lists the points in the Batch. + Points() []*Point + // ClearPoints clear all points + ClearPoints() + + //ClearPoints get the number of Point + GetPointsNum() int + + // Precision returns the currently set precision of this Batch. + Precision() string + // SetPrecision sets the precision of this batch. + SetPrecision(s string) error + + // Database returns the currently set database of this Batch. + Database() string + // SetDatabase sets the database of this Batch. + SetDatabase(s string) + + // WriteConsistency returns the currently set write consistency of this Batch. + WriteConsistency() string + // SetWriteConsistency sets the write consistency of this Batch. + SetWriteConsistency(s string) + + // RetentionPolicy returns the currently set retention policy of this Batch. + RetentionPolicy() string + // SetRetentionPolicy sets the retention policy of this Batch. + SetRetentionPolicy(s string) +} + +// NewBatchPoints returns a BatchPoints interface based on the given config. +func NewBatchPoints(conf BatchPointsConfig) (BatchPoints, error) { + if conf.Precision == "" { + conf.Precision = "ns" + } + if _, err := time.ParseDuration("1" + conf.Precision); err != nil { + return nil, err + } + bp := &batchpoints{ + database: conf.Database, + precision: conf.Precision, + retentionPolicy: conf.RetentionPolicy, + writeConsistency: conf.WriteConsistency, + } + return bp, nil +} + +type batchpoints struct { + points []*Point + database string + precision string + retentionPolicy string + writeConsistency string +} + +func (bp *batchpoints) AddPoint(p *Point) { + bp.points = append(bp.points, p) +} + +func (bp *batchpoints) AddPoints(ps []*Point) { + bp.points = append(bp.points, ps...) +} + +func (bp *batchpoints) Points() []*Point { + return bp.points +} + +func (bp *batchpoints) ClearPoints() { + bp.points = bp.points[0:0] +} + +func (bp *batchpoints) GetPointsNum() int { + return len(bp.points) +} + +func (bp *batchpoints) Precision() string { + return bp.precision +} + +func (bp *batchpoints) Database() string { + return bp.database +} + +func (bp *batchpoints) WriteConsistency() string { + return bp.writeConsistency +} + +func (bp *batchpoints) RetentionPolicy() string { + return bp.retentionPolicy +} + +func (bp *batchpoints) SetPrecision(p string) error { + if _, err := time.ParseDuration("1" + p); err != nil { + return err + } + bp.precision = p + return nil +} + +func (bp *batchpoints) SetDatabase(db string) { + bp.database = db +} + +func (bp *batchpoints) SetWriteConsistency(wc string) { + bp.writeConsistency = wc +} + +func (bp *batchpoints) SetRetentionPolicy(rp string) { + bp.retentionPolicy = rp +} + +// Point represents a single data point. +type Point struct { + pt models.Point +} + +// NewPoint returns a point with the given timestamp. If a timestamp is not +// given, then data is sent to the database without a timestamp, in which case +// the server will assign local time upon reception. NOTE: it is recommended to +// send data with a timestamp. +func NewPoint( + name string, + tags map[string]string, + fields map[string]interface{}, + t ...time.Time, +) (*Point, error) { + var T time.Time + if len(t) > 0 { + T = t[0] + } + + pt, err := models.NewPoint(name, models.NewTags(tags), fields, T) + if err != nil { + return nil, err + } + return &Point{ + pt: pt, + }, nil +} + +// String returns a line-protocol string of the Point. +func (p *Point) String() string { + return p.pt.String() +} + +// PrecisionString returns a line-protocol string of the Point, +// with the timestamp formatted for the given precision. +func (p *Point) PrecisionString(precision string) string { + return p.pt.PrecisionString(precision) +} + +// Name returns the measurement name of the point. +func (p *Point) Name() string { + return string(p.pt.Name()) +} + +// Tags returns the tags associated with the point. +func (p *Point) Tags() map[string]string { + return p.pt.Tags().Map() +} + +// Time return the timestamp for the point. +func (p *Point) Time() time.Time { + return p.pt.Time() +} + +// UnixNano returns timestamp of the point in nanoseconds since Unix epoch. +func (p *Point) UnixNano() int64 { + return p.pt.UnixNano() +} + +// Fields returns the fields for the point. +func (p *Point) Fields() (map[string]interface{}, error) { + return p.pt.Fields() +} + +// NewPointFrom returns a point from the provided models.Point. +func NewPointFrom(pt models.Point) *Point { + return &Point{pt: pt} +} + +func (c *client) Write(bp BatchPoints) error { + var b bytes.Buffer + + var w io.Writer + if c.encoding == GzipEncoding { + w = gzip.NewWriter(&b) + } else { + w = &b + } + + for _, p := range bp.Points() { + if p == nil { + continue + } + if _, err := io.WriteString(w, p.pt.PrecisionString(bp.Precision())); err != nil && err != io.EOF { + return err + } + + if _, err := w.Write([]byte{'\n'}); err != nil && err != io.EOF { + return err + } + } + + // gzip writer should be closed to flush data into underlying buffer + if c, ok := w.(io.Closer); ok { + if err := c.Close(); err != nil && err != io.EOF { + return err + } + } + + u := c.url + u.Path = path.Join(u.Path, "write") + + req, err := http.NewRequest("POST", u.String(), &b) + if err == io.EOF { + err = nil + } + if err != nil { + return err + } + if c.encoding != DefaultEncoding { + req.Header.Set("Content-Encoding", string(c.encoding)) + } + req.Header.Set("Content-Type", "") + req.Header.Set("User-Agent", c.useragent) + if c.username != "" { + req.SetBasicAuth(c.username, c.password) + } + + params := req.URL.Query() + params.Set("db", bp.Database()) + params.Set("rp", bp.RetentionPolicy()) + params.Set("precision", bp.Precision()) + params.Set("consistency", bp.WriteConsistency()) + req.URL.RawQuery = params.Encode() + + resp, err := c.httpClient.Do(req) + if err == io.EOF { + err = nil + } + if err != nil { + return err + } + defer resp.Body.Close() + + body, err := ioutil.ReadAll(resp.Body) + if err == io.EOF { + err = nil + } + if err != nil { + return err + } + + if resp.StatusCode != http.StatusNoContent && resp.StatusCode != http.StatusOK { + var err = errors.New(string(body)) + if err == io.EOF { + err = nil + } + return err + } + + return nil +} + +// Query defines a query to send to the server. +type Query struct { + Command string + Database string + RetentionPolicy string + Precision string + Chunked bool + ChunkSize int + Parameters map[string]interface{} +} + +// Params is a type alias to the query parameters. +type Params map[string]interface{} + +// NewQuery returns a query object. +// The database and precision arguments can be empty strings if they are not needed for the query. +func NewQuery(command, database, precision string) Query { + return Query{ + Command: command, + Database: database, + Precision: precision, + Parameters: make(map[string]interface{}), + } +} + +// NewQueryWithRP returns a query object. +// The database, retention policy, and precision arguments can be empty strings if they are not needed +// for the query. Setting the retention policy only works on InfluxDB versions 1.6 or greater. +func NewQueryWithRP(command, database, retentionPolicy, precision string) Query { + return Query{ + Command: command, + Database: database, + RetentionPolicy: retentionPolicy, + Precision: precision, + Parameters: make(map[string]interface{}), + } +} + +// NewQueryWithParameters returns a query object. +// The database and precision arguments can be empty strings if they are not needed for the query. +// parameters is a map of the parameter names used in the command to their values. +func NewQueryWithParameters(command, database, precision string, parameters map[string]interface{}) Query { + return Query{ + Command: command, + Database: database, + Precision: precision, + Parameters: parameters, + } +} + +// Response represents a list of statement results. +type Response struct { + Results []Result + Err string `json:"error,omitempty"` +} + +// Error returns the first error from any statement. +// It returns nil if no errors occurred on any statements. +func (r *Response) Error() error { + if r.Err != "" { + return errors.New(r.Err) + } + for _, result := range r.Results { + if result.Err != "" { + return errors.New(result.Err) + } + } + return nil +} + +// Message represents a user message. +type Message struct { + Level string + Text string +} + +// Result represents a resultset returned from a single statement. +type Result struct { + StatementId int `json:"statement_id"` + Series []models.Row + Messages []*Message + Err string `json:"error,omitempty"` +} + +// Query sends a command to the server and returns the Response. +func (c *client) Query(q Query) (*Response, error) { + req, err := c.createDefaultRequest(q) + if err != nil { + return nil, err + } + params := req.URL.Query() + if q.Chunked { + params.Set("chunked", "true") + if q.ChunkSize > 0 { + params.Set("chunk_size", strconv.Itoa(q.ChunkSize)) + } + req.URL.RawQuery = params.Encode() + } + resp, err := c.httpClient.Do(req) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + if err := checkResponse(resp); err != nil { + return nil, err + } + + var response Response + if q.Chunked { + cr := NewChunkedResponse(resp.Body) + for { + r, err := cr.NextResponse() + if err != nil { + if err == io.EOF { + break + } + // If we got an error while decoding the response, send that back. + return nil, err + } + + if r == nil { + break + } + + response.Results = append(response.Results, r.Results...) + if r.Err != "" { + response.Err = r.Err + break + } + } + } else { + dec := json.NewDecoder(resp.Body) + dec.UseNumber() + decErr := dec.Decode(&response) + + // ignore this error if we got an invalid status code + if decErr != nil && decErr.Error() == "EOF" && resp.StatusCode != http.StatusOK { + decErr = nil + } + // If we got a valid decode error, send that back + if decErr != nil { + return nil, fmt.Errorf("unable to decode json: received status code %d err: %s", resp.StatusCode, decErr) + } + } + + // If we don't have an error in our json response, and didn't get statusOK + // then send back an error + if resp.StatusCode != http.StatusOK && response.Error() == nil { + return &response, fmt.Errorf("received status code %d from server", resp.StatusCode) + } + return &response, nil +} + +// QueryAsChunk sends a command to the server and returns the Response. +func (c *client) QueryAsChunk(q Query) (*ChunkedResponse, error) { + req, err := c.createDefaultRequest(q) + if err != nil { + return nil, err + } + params := req.URL.Query() + params.Set("chunked", "true") + if q.ChunkSize > 0 { + params.Set("chunk_size", strconv.Itoa(q.ChunkSize)) + } + req.URL.RawQuery = params.Encode() + resp, err := c.httpClient.Do(req) + if err != nil { + return nil, err + } + + if err := checkResponse(resp); err != nil { + return nil, err + } + return NewChunkedResponse(resp.Body), nil +} + +func checkResponse(resp *http.Response) error { + // If we lack a X-Influxdb-Version header, then we didn't get a response from influxdb + // but instead some other service. If the error code is also a 500+ code, then some + // downstream loadbalancer/proxy/etc had an issue and we should report that. + if resp.Header.Get("X-Influxdb-Version") == "" && resp.StatusCode >= http.StatusInternalServerError { + body, err := ioutil.ReadAll(resp.Body) + if err != nil || len(body) == 0 { + return fmt.Errorf("received status code %d from downstream server", resp.StatusCode) + } + + return fmt.Errorf("received status code %d from downstream server, with response body: %q", resp.StatusCode, body) + } + + // If we get an unexpected content type, then it is also not from influx direct and therefore + // we want to know what we received and what status code was returned for debugging purposes. + if cType, _, _ := mime.ParseMediaType(resp.Header.Get("Content-Type")); cType != "application/json" { + // Read up to 1kb of the body to help identify downstream errors and limit the impact of things + // like downstream serving a large file + body, err := ioutil.ReadAll(io.LimitReader(resp.Body, 1024)) + if err != nil || len(body) == 0 { + return fmt.Errorf("expected json response, got empty body, with status: %v", resp.StatusCode) + } + + return fmt.Errorf("expected json response, got %q, with status: %v and response body: %q", cType, resp.StatusCode, body) + } + return nil +} + +func (c *client) createDefaultRequest(q Query) (*http.Request, error) { + u := c.url + u.Path = path.Join(u.Path, "query") + + jsonParameters, err := json.Marshal(q.Parameters) + if err != nil { + return nil, err + } + + req, err := http.NewRequest("POST", u.String(), nil) + if err != nil { + return nil, err + } + + req.Header.Set("Content-Type", "") + req.Header.Set("User-Agent", c.useragent) + + if c.username != "" { + req.SetBasicAuth(c.username, c.password) + } + + params := req.URL.Query() + params.Set("q", q.Command) + params.Set("db", q.Database) + if q.RetentionPolicy != "" { + params.Set("rp", q.RetentionPolicy) + } + params.Set("params", string(jsonParameters)) + + if q.Precision != "" { + params.Set("epoch", q.Precision) + } + req.URL.RawQuery = params.Encode() + + return req, nil + +} + +// duplexReader reads responses and writes it to another writer while +// satisfying the reader interface. +type duplexReader struct { + r io.ReadCloser + w io.Writer +} + +func (r *duplexReader) Read(p []byte) (n int, err error) { + n, err = r.r.Read(p) + if err == nil { + r.w.Write(p[:n]) + } + return n, err +} + +// Close closes the response. +func (r *duplexReader) Close() error { + return r.r.Close() +} + +// ChunkedResponse represents a response from the server that +// uses chunking to stream the output. +type ChunkedResponse struct { + dec *json.Decoder + duplex *duplexReader + buf bytes.Buffer +} + +// NewChunkedResponse reads a stream and produces responses from the stream. +func NewChunkedResponse(r io.Reader) *ChunkedResponse { + rc, ok := r.(io.ReadCloser) + if !ok { + rc = ioutil.NopCloser(r) + } + resp := &ChunkedResponse{} + resp.duplex = &duplexReader{r: rc, w: &resp.buf} + resp.dec = json.NewDecoder(resp.duplex) + resp.dec.UseNumber() + return resp +} + +// NextResponse reads the next line of the stream and returns a response. +func (r *ChunkedResponse) NextResponse() (*Response, error) { + var response Response + if err := r.dec.Decode(&response); err != nil { + if err == io.EOF { + return nil, err + } + // A decoding error happened. This probably means the server crashed + // and sent a last-ditch error message to us. Ensure we have read the + // entirety of the connection to get any remaining error text. + io.Copy(ioutil.Discard, r.duplex) + return nil, errors.New(strings.TrimSpace(r.buf.String())) + } + + r.buf.Reset() + return &response, nil +} + +// Close closes the response. +func (r *ChunkedResponse) Close() error { + return r.duplex.Close() +} diff --git a/pkg/influxdb/influxdb-client/v2/params.go b/pkg/influxdb/influxdb-client/v2/params.go new file mode 100644 index 0000000..5616bfb --- /dev/null +++ b/pkg/influxdb/influxdb-client/v2/params.go @@ -0,0 +1,73 @@ +package client + +import ( + "encoding/json" + "time" +) + +type ( + // Identifier is an identifier value. + Identifier string + + // StringValue is a string literal. + StringValue string + + // RegexValue is a regexp literal. + RegexValue string + + // NumberValue is a number literal. + NumberValue float64 + + // IntegerValue is an integer literal. + IntegerValue int64 + + // BooleanValue is a boolean literal. + BooleanValue bool + + // TimeValue is a time literal. + TimeValue time.Time + + // DurationValue is a duration literal. + DurationValue time.Duration +) + +func (v Identifier) MarshalJSON() ([]byte, error) { + m := map[string]string{"identifier": string(v)} + return json.Marshal(m) +} + +func (v StringValue) MarshalJSON() ([]byte, error) { + m := map[string]string{"string": string(v)} + return json.Marshal(m) +} + +func (v RegexValue) MarshalJSON() ([]byte, error) { + m := map[string]string{"regex": string(v)} + return json.Marshal(m) +} + +func (v NumberValue) MarshalJSON() ([]byte, error) { + m := map[string]float64{"number": float64(v)} + return json.Marshal(m) +} + +func (v IntegerValue) MarshalJSON() ([]byte, error) { + m := map[string]int64{"integer": int64(v)} + return json.Marshal(m) +} + +func (v BooleanValue) MarshalJSON() ([]byte, error) { + m := map[string]bool{"boolean": bool(v)} + return json.Marshal(m) +} + +func (v TimeValue) MarshalJSON() ([]byte, error) { + t := time.Time(v) + m := map[string]string{"string": t.Format(time.RFC3339Nano)} + return json.Marshal(m) +} + +func (v DurationValue) MarshalJSON() ([]byte, error) { + m := map[string]int64{"duration": int64(v)} + return json.Marshal(m) +} diff --git a/pkg/influxdb/influxdb-client/v2/udp.go b/pkg/influxdb/influxdb-client/v2/udp.go new file mode 100644 index 0000000..9867868 --- /dev/null +++ b/pkg/influxdb/influxdb-client/v2/udp.go @@ -0,0 +1,116 @@ +package client + +import ( + "fmt" + "io" + "net" + "time" +) + +const ( + // UDPPayloadSize is a reasonable default payload size for UDP packets that + // could be travelling over the internet. + UDPPayloadSize = 512 +) + +// UDPConfig is the config data needed to create a UDP Client. +type UDPConfig struct { + // Addr should be of the form "host:port" + // or "[ipv6-host%zone]:port". + Addr string + + // PayloadSize is the maximum size of a UDP client message, optional + // Tune this based on your network. Defaults to UDPPayloadSize. + PayloadSize int +} + +// NewUDPClient returns a client interface for writing to an InfluxDB UDP +// service from the given config. +func NewUDPClient(conf UDPConfig) (Client, error) { + var udpAddr *net.UDPAddr + udpAddr, err := net.ResolveUDPAddr("udp", conf.Addr) + if err != nil { + return nil, err + } + + conn, err := net.DialUDP("udp", nil, udpAddr) + if err != nil { + return nil, err + } + + payloadSize := conf.PayloadSize + if payloadSize == 0 { + payloadSize = UDPPayloadSize + } + + return &udpclient{ + conn: conn, + payloadSize: payloadSize, + }, nil +} + +// Close releases the udpclient's resources. +func (uc *udpclient) Close() error { + return uc.conn.Close() +} + +type udpclient struct { + conn io.WriteCloser + payloadSize int +} + +func (uc *udpclient) Write(bp BatchPoints) error { + var b = make([]byte, 0, uc.payloadSize) // initial buffer size, it will grow as needed + var d, _ = time.ParseDuration("1" + bp.Precision()) + + var delayedError error + + var checkBuffer = func(n int) { + if len(b) > 0 && len(b)+n > uc.payloadSize { + if _, err := uc.conn.Write(b); err != nil { + delayedError = err + } + b = b[:0] + } + } + + for _, p := range bp.Points() { + p.pt.Round(d) + pointSize := p.pt.StringSize() + 1 // include newline in size + //point := p.pt.RoundedString(d) + "\n" + + checkBuffer(pointSize) + + if p.Time().IsZero() || pointSize <= uc.payloadSize { + b = p.pt.AppendString(b) + b = append(b, '\n') + continue + } + + points := p.pt.Split(uc.payloadSize - 1) // account for newline character + for _, sp := range points { + checkBuffer(sp.StringSize() + 1) + b = sp.AppendString(b) + b = append(b, '\n') + } + } + + if len(b) > 0 { + if _, err := uc.conn.Write(b); err != nil { + return err + } + } + return delayedError +} + +func (uc *udpclient) Query(q Query) (*Response, error) { + return nil, fmt.Errorf("Querying via UDP is not supported") +} + +func (uc *udpclient) QueryAsChunk(q Query) (*ChunkedResponse, error) { + return nil, fmt.Errorf("Querying via UDP is not supported") +} + +func (uc *udpclient) Ping(timeout time.Duration) (time.Duration, string, error) { + return 0, "", nil +} diff --git a/pkg/influxdb/metrics.go b/pkg/influxdb/metrics.go index 7dcca9a..4729e44 100644 --- a/pkg/influxdb/metrics.go +++ b/pkg/influxdb/metrics.go @@ -3,8 +3,8 @@ package influxdb import ( "errors" "fmt" - v2log "gitlab.oneitfarm.com/bifrost/cilog/v2" - client "gitlab.oneitfarm.com/bifrost/influxdata/influxdb1-client/v2" + client "github.com/ztalab/ZACA/pkg/influxdb/influxdb-client/v2" + "github.com/ztalab/ZACA/pkg/logger" "io" "strings" "sync" @@ -41,14 +41,14 @@ type Response struct { func NewMetrics(influxDBHttpClient *HTTPClient, conf *CustomConfig) (metrics *Metrics) { bp, err := client.NewBatchPoints(influxDBHttpClient.BatchPointsConfig) if err != nil { - v2log.Named("metrics").Errorf("custom-influxdb client.NewBatchPoints err: %v", err) + logger.Named("metrics").Errorf("custom-influxdb client.NewBatchPoints err: %v", err) return } metrics = &Metrics{ conf: conf, batchPoints: bp, point: make(chan *client.Point, 16), - flushTimer: time.NewTicker(time.Duration(conf.FlushTime) * time.Second), // 默认定时 30s 发送一次数据 + flushTimer: time.NewTicker(time.Duration(conf.FlushTime) * time.Second), InfluxDBHttpClient: influxDBHttpClient, } go metrics.worker() @@ -62,7 +62,7 @@ func (mt *Metrics) AddPoint(metricsData *MetricsData) { //atomic.AddUint64(&mt.counter, 1) pt, err := client.NewPoint(metricsData.Measurement, metricsData.Tags, metricsData.Fields, time.Now()) if err != nil { - v2log.Named("metrics").Errorf("custom-influxdb client.NewPoint err: %s", err) + logger.Named("metrics").Errorf("custom-influxdb client.NewPoint err: %s", err) return } mt.point <- pt @@ -77,14 +77,11 @@ func (mt *Metrics) worker() { return } mt.batchPoints.AddPoint(p) - // 当点数量达到50的时候,发送数据 - //fmt.Println("当前缓存的点的个数: ", mt.batchPoints.GetPointsNum()) - //fmt.Println("当前缓存的点FlushSize: ", mt.conf.FlushSize) + // When the number of points reaches 50, send data if mt.batchPoints.GetPointsNum() >= mt.conf.FlushSize { mt.flush() } case <-mt.flushTimer.C: - //fmt.Println("定时器到,flush数据---------------------") mt.flush() } } @@ -101,15 +98,15 @@ func (mt *Metrics) flush() { if strings.Contains(err.Error(), io.EOF.Error()) { err = nil } else { - v2log.Named("metric").Errorf("custom-influxdb client.Write err: %s", err) + logger.Named("metric").Errorf("custom-influxdb client.Write err: %s", err) } } defer mt.InfluxDBHttpClient.FluxDBHttpClose() - // 清空所有的点 + // Clear all points mt.batchPoints.ClearPoints() } -// 写入数据超时处理 +// Write data timeout processing func (mt *Metrics) Write() error { ch := make(chan error, 1) go func() { @@ -118,7 +115,7 @@ func (mt *Metrics) Write() error { select { case err := <-ch: return err - case <-time.After(800 * time.Millisecond): // 800豪秒超时 + case <-time.After(800 * time.Millisecond): return errors.New("write timeout") } } @@ -126,6 +123,6 @@ func (mt *Metrics) Write() error { func (mt *Metrics) count() { for { time.Sleep(time.Second) - fmt.Println("计数器:", atomic.LoadUint64(&mt.counter)) + fmt.Println("Counter:", atomic.LoadUint64(&mt.counter)) } } diff --git a/pkg/keygen/genkey.go b/pkg/keygen/genkey.go index 9dc0b7e..91eb4e5 100644 --- a/pkg/keygen/genkey.go +++ b/pkg/keygen/genkey.go @@ -13,12 +13,12 @@ import ( "strings" "time" - cf_csr "gitlab.oneitfarm.com/bifrost/cfssl/csr" - "gitlab.oneitfarm.com/bifrost/cfssl/helpers" + cf_csr "github.com/ztalab/cfssl/csr" + "github.com/ztalab/cfssl/helpers" - "gitlab.oneitfarm.com/bifrost/capitalizone/pkg/pkiutil" - "gitlab.oneitfarm.com/bifrost/capitalizone/pkg/spiffe" - "gitlab.oneitfarm.com/bifrost/capitalizone/util" + "github.com/ztalab/ZACA/pkg/pkiutil" + "github.com/ztalab/ZACA/pkg/spiffe" + "github.com/ztalab/ZACA/util" ) type SupportedSignatureAlgorithms string @@ -74,7 +74,7 @@ type CertOptions struct { SigAlg SupportedSignatureAlgorithms } -// 生成 Private Key +// Generate Private Key func GenKey(sigAlg SupportedSignatureAlgorithms) (priv interface{}, key []byte, err error) { var block pem.Block switch sigAlg { @@ -112,8 +112,8 @@ func GenKey(sigAlg SupportedSignatureAlgorithms) (priv interface{}, key []byte, return priv, key, nil } -// 通过 Key 生成 CSR -// 支持自定义 CSR 请求 +// Generate CSR through key +// Support custom CSR requests func GenCSR(key []byte, options CertOptions) ([]byte, error) { template, _ := pkiutil.GenCSRTemplate(pkiutil.CertOptions{ Host: options.Host, @@ -157,7 +157,7 @@ func GenWorkloadCSR(key []byte, id *spiffe.IDGIdentity) ([]byte, error) { }) } -// GenExtendWorkloadCSR 支持自定义 CSR 参数 +// GenExtendWorkloadCSR Support custom CSR parameters func GenExtendWorkloadCSR(key []byte, id *spiffe.IDGIdentity, csrConf CSRConf) ([]byte, error) { hostnames := make([]string, 0) if len(csrConf.SNIHostnames) > 0 { @@ -181,7 +181,7 @@ func GenExtendWorkloadCSR(key []byte, id *spiffe.IDGIdentity, csrConf CSRConf) ( }) } -// GenCustomExtendCSR 生成业务自定义带扩展字段的 CSR +// GenCustomExtendCSR Generate business custom CSR with extended fields func GenCustomExtendCSR(pemKey []byte, id *spiffe.IDGIdentity, opts *CertOptions, exts []pkix.Extension) ([]byte, error) { if opts.Host == "" { hostname, _ := os.Hostname() diff --git a/pkg/keygen/genkey_test.go b/pkg/keygen/genkey_test.go index 1d2291a..5723a2f 100644 --- a/pkg/keygen/genkey_test.go +++ b/pkg/keygen/genkey_test.go @@ -6,8 +6,8 @@ import ( "fmt" "testing" - "gitlab.oneitfarm.com/bifrost/capitalizone/pkg/attrmgr" - "gitlab.oneitfarm.com/bifrost/capitalizone/pkg/spiffe" + "github.com/ztalab/ZACA/pkg/attrmgr" + "github.com/ztalab/ZACA/pkg/spiffe" ) var ( diff --git a/pkg/keyprovider/xkey_provider.go b/pkg/keyprovider/xkey_provider.go index 3f9507d..f621ede 100644 --- a/pkg/keyprovider/xkey_provider.go +++ b/pkg/keyprovider/xkey_provider.go @@ -13,12 +13,12 @@ import ( "sync" "github.com/pkg/errors" - "gitlab.oneitfarm.com/bifrost/cfssl/csr" - "gitlab.oneitfarm.com/bifrost/cfssl/helpers" - v2log "gitlab.oneitfarm.com/bifrost/cilog/v2" + "github.com/ztalab/ZACA/pkg/logger" + "github.com/ztalab/cfssl/csr" + "github.com/ztalab/cfssl/helpers" - "gitlab.oneitfarm.com/bifrost/capitalizone/pkg/keygen" - "gitlab.oneitfarm.com/bifrost/capitalizone/pkg/spiffe" + "github.com/ztalab/ZACA/pkg/keygen" + "github.com/ztalab/ZACA/pkg/spiffe" ) const ( @@ -46,7 +46,6 @@ type xInternal struct { certPEM []byte } -// TODO 加入 file watcher 更新 root 证书 // XKeyProvider provides unencrypted PEM-encoded certificates and // private keys. If paths are provided, the key and certificate will // be stored on disk. @@ -55,7 +54,7 @@ type XKeyProvider struct { internal xInternal *spiffe.IDGIdentity `json:"idg_identity"` DiskStore bool - logger *v2log.Logger + logger *logger.Logger CSRConf keygen.CSRConf } @@ -75,7 +74,7 @@ func NewXKeyProvider(id *spiffe.IDGIdentity) (*XKeyProvider, error) { mu: sync.RWMutex{}, }, IDGIdentity: id, - logger: v2log.Named("keyprovider"), + logger: logger.Named("keyprovider"), } err := sp.Check() @@ -155,7 +154,7 @@ func (sp *XKeyProvider) Generate(algo string, size int) (err error) { sp.internal.priv = priv.(crypto.Signer) sp.internal.keyPEM = key - sp.logger.Debugf("创建 Private Key: %v", algo) + sp.logger.Debugf("Create Private Key: %v", algo) return nil } diff --git a/pkg/keyprovider/xkey_provider_test.go b/pkg/keyprovider/xkey_provider_test.go index 46cd7fc..71cee06 100644 --- a/pkg/keyprovider/xkey_provider_test.go +++ b/pkg/keyprovider/xkey_provider_test.go @@ -2,8 +2,8 @@ package keyprovider import ( "fmt" - "gitlab.oneitfarm.com/bifrost/capitalizone/pkg/keygen" - "gitlab.oneitfarm.com/bifrost/capitalizone/pkg/spiffe" + "github.com/ztalab/ZACA/pkg/keygen" + "github.com/ztalab/ZACA/pkg/spiffe" "testing" ) diff --git a/pkg/kutil/clock/clock.go b/pkg/kutil/clock/clock.go deleted file mode 100644 index 6cf13d8..0000000 --- a/pkg/kutil/clock/clock.go +++ /dev/null @@ -1,393 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package clock - -import ( - "sync" - "time" -) - -// PassiveClock allows for injecting fake or real clocks into code -// that needs to read the current time but does not support scheduling -// activity in the future. -type PassiveClock interface { - Now() time.Time - Since(time.Time) time.Duration -} - -// Clock allows for injecting fake or real clocks into code that -// needs to do arbitrary things based on time. -type Clock interface { - PassiveClock - After(time.Duration) <-chan time.Time - NewTimer(time.Duration) Timer - Sleep(time.Duration) - NewTicker(time.Duration) Ticker -} - -// RealClock really calls time.Now() -type RealClock struct{} - -// Now returns the current time. -func (RealClock) Now() time.Time { - return time.Now() -} - -// Since returns time since the specified timestamp. -func (RealClock) Since(ts time.Time) time.Duration { - return time.Since(ts) -} - -// After is the same as time.After(d). -func (RealClock) After(d time.Duration) <-chan time.Time { - return time.After(d) -} - -// NewTimer returns a new Timer. -func (RealClock) NewTimer(d time.Duration) Timer { - return &realTimer{ - timer: time.NewTimer(d), - } -} - -// NewTicker returns a new Ticker. -func (RealClock) NewTicker(d time.Duration) Ticker { - return &realTicker{ - ticker: time.NewTicker(d), - } -} - -// Sleep pauses the RealClock for duration d. -func (RealClock) Sleep(d time.Duration) { - time.Sleep(d) -} - -// FakePassiveClock implements PassiveClock, but returns an arbitrary time. -type FakePassiveClock struct { - lock sync.RWMutex - time time.Time -} - -// FakeClock implements Clock, but returns an arbitrary time. -type FakeClock struct { - FakePassiveClock - - // waiters are waiting for the fake time to pass their specified time - waiters []fakeClockWaiter -} - -type fakeClockWaiter struct { - targetTime time.Time - stepInterval time.Duration - skipIfBlocked bool - destChan chan time.Time -} - -// NewFakePassiveClock returns a new FakePassiveClock. -func NewFakePassiveClock(t time.Time) *FakePassiveClock { - return &FakePassiveClock{ - time: t, - } -} - -// NewFakeClock returns a new FakeClock -func NewFakeClock(t time.Time) *FakeClock { - return &FakeClock{ - FakePassiveClock: *NewFakePassiveClock(t), - } -} - -// Now returns f's time. -func (f *FakePassiveClock) Now() time.Time { - f.lock.RLock() - defer f.lock.RUnlock() - return f.time -} - -// Since returns time since the time in f. -func (f *FakePassiveClock) Since(ts time.Time) time.Duration { - f.lock.RLock() - defer f.lock.RUnlock() - return f.time.Sub(ts) -} - -// SetTime sets the time on the FakePassiveClock. -func (f *FakePassiveClock) SetTime(t time.Time) { - f.lock.Lock() - defer f.lock.Unlock() - f.time = t -} - -// After is the Fake version of time.After(d). -func (f *FakeClock) After(d time.Duration) <-chan time.Time { - f.lock.Lock() - defer f.lock.Unlock() - stopTime := f.time.Add(d) - ch := make(chan time.Time, 1) // Don't block! - f.waiters = append(f.waiters, fakeClockWaiter{ - targetTime: stopTime, - destChan: ch, - }) - return ch -} - -// NewTimer is the Fake version of time.NewTimer(d). -func (f *FakeClock) NewTimer(d time.Duration) Timer { - f.lock.Lock() - defer f.lock.Unlock() - stopTime := f.time.Add(d) - ch := make(chan time.Time, 1) // Don't block! - timer := &fakeTimer{ - fakeClock: f, - waiter: fakeClockWaiter{ - targetTime: stopTime, - destChan: ch, - }, - } - f.waiters = append(f.waiters, timer.waiter) - return timer -} - -// NewTicker returns a new Ticker. -func (f *FakeClock) NewTicker(d time.Duration) Ticker { - f.lock.Lock() - defer f.lock.Unlock() - tickTime := f.time.Add(d) - ch := make(chan time.Time, 1) // hold one tick - f.waiters = append(f.waiters, fakeClockWaiter{ - targetTime: tickTime, - stepInterval: d, - skipIfBlocked: true, - destChan: ch, - }) - - return &fakeTicker{ - c: ch, - } -} - -// Step moves clock by Duration, notifies anyone that's called After, Tick, or NewTimer -func (f *FakeClock) Step(d time.Duration) { - f.lock.Lock() - defer f.lock.Unlock() - f.setTimeLocked(f.time.Add(d)) -} - -// SetTime sets the time on a FakeClock. -func (f *FakeClock) SetTime(t time.Time) { - f.lock.Lock() - defer f.lock.Unlock() - f.setTimeLocked(t) -} - -// Actually changes the time and checks any waiters. f must be write-locked. -func (f *FakeClock) setTimeLocked(t time.Time) { - f.time = t - newWaiters := make([]fakeClockWaiter, 0, len(f.waiters)) - for i := range f.waiters { - w := &f.waiters[i] - if !w.targetTime.After(t) { - - if w.skipIfBlocked { - select { - case w.destChan <- t: - default: - } - } else { - w.destChan <- t - } - - if w.stepInterval > 0 { - for !w.targetTime.After(t) { - w.targetTime = w.targetTime.Add(w.stepInterval) - } - newWaiters = append(newWaiters, *w) - } - - } else { - newWaiters = append(newWaiters, f.waiters[i]) - } - } - f.waiters = newWaiters -} - -// HasWaiters returns true if After has been called on f but not yet satisfied (so you can -// write race-free tests). -func (f *FakeClock) HasWaiters() bool { - f.lock.RLock() - defer f.lock.RUnlock() - return len(f.waiters) > 0 -} - -// Sleep pauses the FakeClock for duration d. -func (f *FakeClock) Sleep(d time.Duration) { - f.Step(d) -} - -// IntervalClock implements Clock, but each invocation of Now steps the clock forward the specified duration -type IntervalClock struct { - Time time.Time - Duration time.Duration -} - -// Now returns i's time. -func (i *IntervalClock) Now() time.Time { - i.Time = i.Time.Add(i.Duration) - return i.Time -} - -// Since returns time since the time in i. -func (i *IntervalClock) Since(ts time.Time) time.Duration { - return i.Time.Sub(ts) -} - -// After is currently unimplemented, will panic. -// TODO: make interval clock use FakeClock so this can be implemented. -func (*IntervalClock) After(d time.Duration) <-chan time.Time { - panic("IntervalClock doesn't implement After") -} - -// NewTimer is currently unimplemented, will panic. -// TODO: make interval clock use FakeClock so this can be implemented. -func (*IntervalClock) NewTimer(d time.Duration) Timer { - panic("IntervalClock doesn't implement NewTimer") -} - -// NewTicker is currently unimplemented, will panic. -// TODO: make interval clock use FakeClock so this can be implemented. -func (*IntervalClock) NewTicker(d time.Duration) Ticker { - panic("IntervalClock doesn't implement NewTicker") -} - -// Sleep is currently unimplemented; will panic. -func (*IntervalClock) Sleep(d time.Duration) { - panic("IntervalClock doesn't implement Sleep") -} - -// Timer allows for injecting fake or real timers into code that -// needs to do arbitrary things based on time. -type Timer interface { - C() <-chan time.Time - Stop() bool - Reset(d time.Duration) bool -} - -// realTimer is backed by an actual time.Timer. -type realTimer struct { - timer *time.Timer -} - -// C returns the underlying timer's channel. -func (r *realTimer) C() <-chan time.Time { - return r.timer.C -} - -// Stop calls Stop() on the underlying timer. -func (r *realTimer) Stop() bool { - return r.timer.Stop() -} - -// Reset calls Reset() on the underlying timer. -func (r *realTimer) Reset(d time.Duration) bool { - return r.timer.Reset(d) -} - -// fakeTimer implements Timer based on a FakeClock. -type fakeTimer struct { - fakeClock *FakeClock - waiter fakeClockWaiter -} - -// C returns the channel that notifies when this timer has fired. -func (f *fakeTimer) C() <-chan time.Time { - return f.waiter.destChan -} - -// Stop conditionally stops the timer. If the timer has neither fired -// nor been stopped then this call stops the timer and returns true, -// otherwise this call returns false. This is like time.Timer::Stop. -func (f *fakeTimer) Stop() bool { - f.fakeClock.lock.Lock() - defer f.fakeClock.lock.Unlock() - // The timer has already fired or been stopped, unless it is found - // among the clock's waiters. - stopped := false - oldWaiters := f.fakeClock.waiters - newWaiters := make([]fakeClockWaiter, 0, len(oldWaiters)) - seekChan := f.waiter.destChan - for i := range oldWaiters { - // Identify the timer's fakeClockWaiter by the identity of the - // destination channel, nothing else is necessarily unique and - // constant since the timer's creation. - if oldWaiters[i].destChan == seekChan { - stopped = true - } else { - newWaiters = append(newWaiters, oldWaiters[i]) - } - } - - f.fakeClock.waiters = newWaiters - - return stopped -} - -// Reset conditionally updates the firing time of the timer. If the -// timer has neither fired nor been stopped then this call resets the -// timer to the fake clock's "now" + d and returns true, otherwise -// this call returns false. This is like time.Timer::Reset. -func (f *fakeTimer) Reset(d time.Duration) bool { - f.fakeClock.lock.Lock() - defer f.fakeClock.lock.Unlock() - waiters := f.fakeClock.waiters - seekChan := f.waiter.destChan - for i := range waiters { - if waiters[i].destChan == seekChan { - waiters[i].targetTime = f.fakeClock.time.Add(d) - return true - } - } - return false -} - -// Ticker defines the Ticker interface -type Ticker interface { - C() <-chan time.Time - Stop() -} - -type realTicker struct { - ticker *time.Ticker -} - -func (t *realTicker) C() <-chan time.Time { - return t.ticker.C -} - -func (t *realTicker) Stop() { - t.ticker.Stop() -} - -type fakeTicker struct { - c <-chan time.Time -} - -func (t *fakeTicker) C() <-chan time.Time { - return t.c -} - -func (t *fakeTicker) Stop() { -} diff --git a/pkg/kutil/errors/doc.go b/pkg/kutil/errors/doc.go deleted file mode 100644 index 5d4d625..0000000 --- a/pkg/kutil/errors/doc.go +++ /dev/null @@ -1,18 +0,0 @@ -/* -Copyright 2015 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Package errors implements various utility functions and types around errors. -package errors // import "k8s.io/apimachinery/pkg/util/errors" diff --git a/pkg/kutil/errors/errors.go b/pkg/kutil/errors/errors.go deleted file mode 100644 index 5bafc21..0000000 --- a/pkg/kutil/errors/errors.go +++ /dev/null @@ -1,249 +0,0 @@ -/* -Copyright 2015 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package errors - -import ( - "errors" - "fmt" - - "k8s.io/apimachinery/pkg/util/sets" -) - -// MessageCountMap contains occurrence for each error message. -type MessageCountMap map[string]int - -// Aggregate represents an object that contains multiple errors, but does not -// necessarily have singular semantic meaning. -// The aggregate can be used with `errors.Is()` to check for the occurrence of -// a specific error type. -// Errors.As() is not supported, because the caller presumably cares about a -// specific error of potentially multiple that match the given type. -type Aggregate interface { - error - Errors() []error - Is(error) bool -} - -// NewAggregate converts a slice of errors into an Aggregate interface, which -// is itself an implementation of the error interface. If the slice is empty, -// this returns nil. -// It will check if any of the element of input error list is nil, to avoid -// nil pointer panic when call Error(). -func NewAggregate(errlist []error) Aggregate { - if len(errlist) == 0 { - return nil - } - // In case of input error list contains nil - var errs []error - for _, e := range errlist { - if e != nil { - errs = append(errs, e) - } - } - if len(errs) == 0 { - return nil - } - return aggregate(errs) -} - -// This helper implements the error and Errors interfaces. Keeping it private -// prevents people from making an aggregate of 0 errors, which is not -// an error, but does satisfy the error interface. -type aggregate []error - -// Error is part of the error interface. -func (agg aggregate) Error() string { - if len(agg) == 0 { - // This should never happen, really. - return "" - } - if len(agg) == 1 { - return agg[0].Error() - } - seenerrs := sets.NewString() - result := "" - agg.visit(func(err error) bool { - msg := err.Error() - if seenerrs.Has(msg) { - return false - } - seenerrs.Insert(msg) - if len(seenerrs) > 1 { - result += ", " - } - result += msg - return false - }) - if len(seenerrs) == 1 { - return result - } - return "[" + result + "]" -} - -func (agg aggregate) Is(target error) bool { - return agg.visit(func(err error) bool { - return errors.Is(err, target) - }) -} - -func (agg aggregate) visit(f func(err error) bool) bool { - for _, err := range agg { - switch err := err.(type) { - case aggregate: - if match := err.visit(f); match { - return match - } - case Aggregate: - for _, nestedErr := range err.Errors() { - if match := f(nestedErr); match { - return match - } - } - default: - if match := f(err); match { - return match - } - } - } - - return false -} - -// Errors is part of the Aggregate interface. -func (agg aggregate) Errors() []error { - return []error(agg) -} - -// Matcher is used to match errors. Returns true if the error matches. -type Matcher func(error) bool - -// FilterOut removes all errors that match any of the matchers from the input -// error. If the input is a singular error, only that error is tested. If the -// input implements the Aggregate interface, the list of errors will be -// processed recursively. -// -// This can be used, for example, to remove known-OK errors (such as io.EOF or -// os.PathNotFound) from a list of errors. -func FilterOut(err error, fns ...Matcher) error { - if err == nil { - return nil - } - if agg, ok := err.(Aggregate); ok { - return NewAggregate(filterErrors(agg.Errors(), fns...)) - } - if !matchesError(err, fns...) { - return err - } - return nil -} - -// matchesError returns true if any Matcher returns true -func matchesError(err error, fns ...Matcher) bool { - for _, fn := range fns { - if fn(err) { - return true - } - } - return false -} - -// filterErrors returns any errors (or nested errors, if the list contains -// nested Errors) for which all fns return false. If no errors -// remain a nil list is returned. The resulting silec will have all -// nested slices flattened as a side effect. -func filterErrors(list []error, fns ...Matcher) []error { - result := []error{} - for _, err := range list { - r := FilterOut(err, fns...) - if r != nil { - result = append(result, r) - } - } - return result -} - -// Flatten takes an Aggregate, which may hold other Aggregates in arbitrary -// nesting, and flattens them all into a single Aggregate, recursively. -func Flatten(agg Aggregate) Aggregate { - result := []error{} - if agg == nil { - return nil - } - for _, err := range agg.Errors() { - if a, ok := err.(Aggregate); ok { - r := Flatten(a) - if r != nil { - result = append(result, r.Errors()...) - } - } else { - if err != nil { - result = append(result, err) - } - } - } - return NewAggregate(result) -} - -// CreateAggregateFromMessageCountMap converts MessageCountMap Aggregate -func CreateAggregateFromMessageCountMap(m MessageCountMap) Aggregate { - if m == nil { - return nil - } - result := make([]error, 0, len(m)) - for errStr, count := range m { - var countStr string - if count > 1 { - countStr = fmt.Sprintf(" (repeated %v times)", count) - } - result = append(result, fmt.Errorf("%v%v", errStr, countStr)) - } - return NewAggregate(result) -} - -// Reduce will return err or, if err is an Aggregate and only has one item, -// the first item in the aggregate. -func Reduce(err error) error { - if agg, ok := err.(Aggregate); ok && err != nil { - switch len(agg.Errors()) { - case 1: - return agg.Errors()[0] - case 0: - return nil - } - } - return err -} - -// AggregateGoroutines runs the provided functions in parallel, stuffing all -// non-nil errors into the returned Aggregate. -// Returns nil if all the functions complete successfully. -func AggregateGoroutines(funcs ...func() error) Aggregate { - errChan := make(chan error, len(funcs)) - for _, f := range funcs { - go func(f func() error) { errChan <- f() }(f) - } - errs := make([]error, 0) - for i := 0; i < cap(errChan); i++ { - if err := <-errChan; err != nil { - errs = append(errs, err) - } - } - return NewAggregate(errs) -} - -// ErrPreconditionViolated is returned when the precondition is violated -var ErrPreconditionViolated = errors.New("precondition is violated") diff --git a/pkg/kutil/framer/framer.go b/pkg/kutil/framer/framer.go deleted file mode 100644 index 066680f..0000000 --- a/pkg/kutil/framer/framer.go +++ /dev/null @@ -1,167 +0,0 @@ -/* -Copyright 2015 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Package framer implements simple frame decoding techniques for an io.ReadCloser -package framer - -import ( - "encoding/binary" - "encoding/json" - "io" -) - -type lengthDelimitedFrameWriter struct { - w io.Writer - h [4]byte -} - -func NewLengthDelimitedFrameWriter(w io.Writer) io.Writer { - return &lengthDelimitedFrameWriter{w: w} -} - -// Write writes a single frame to the nested writer, prepending it with the length in -// in bytes of data (as a 4 byte, bigendian uint32). -func (w *lengthDelimitedFrameWriter) Write(data []byte) (int, error) { - binary.BigEndian.PutUint32(w.h[:], uint32(len(data))) - n, err := w.w.Write(w.h[:]) - if err != nil { - return 0, err - } - if n != len(w.h) { - return 0, io.ErrShortWrite - } - return w.w.Write(data) -} - -type lengthDelimitedFrameReader struct { - r io.ReadCloser - remaining int -} - -// NewLengthDelimitedFrameReader returns an io.Reader that will decode length-prefixed -// frames off of a stream. -// -// The protocol is: -// -// stream: message ... -// message: prefix body -// prefix: 4 byte uint32 in BigEndian order, denotes length of body -// body: bytes (0..prefix) -// -// If the buffer passed to Read is not long enough to contain an entire frame, io.ErrShortRead -// will be returned along with the number of bytes read. -func NewLengthDelimitedFrameReader(r io.ReadCloser) io.ReadCloser { - return &lengthDelimitedFrameReader{r: r} -} - -// Read attempts to read an entire frame into data. If that is not possible, io.ErrShortBuffer -// is returned and subsequent calls will attempt to read the last frame. A frame is complete when -// err is nil. -func (r *lengthDelimitedFrameReader) Read(data []byte) (int, error) { - if r.remaining <= 0 { - header := [4]byte{} - n, err := io.ReadAtLeast(r.r, header[:4], 4) - if err != nil { - return 0, err - } - if n != 4 { - return 0, io.ErrUnexpectedEOF - } - frameLength := int(binary.BigEndian.Uint32(header[:])) - r.remaining = frameLength - } - - expect := r.remaining - max := expect - if max > len(data) { - max = len(data) - } - n, err := io.ReadAtLeast(r.r, data[:max], int(max)) - r.remaining -= n - if err == io.ErrShortBuffer || r.remaining > 0 { - return n, io.ErrShortBuffer - } - if err != nil { - return n, err - } - if n != expect { - return n, io.ErrUnexpectedEOF - } - - return n, nil -} - -func (r *lengthDelimitedFrameReader) Close() error { - return r.r.Close() -} - -type jsonFrameReader struct { - r io.ReadCloser - decoder *json.Decoder - remaining []byte -} - -// NewJSONFramedReader returns an io.Reader that will decode individual JSON objects off -// of a wire. -// -// The boundaries between each frame are valid JSON objects. A JSON parsing error will terminate -// the read. -func NewJSONFramedReader(r io.ReadCloser) io.ReadCloser { - return &jsonFrameReader{ - r: r, - decoder: json.NewDecoder(r), - } -} - -// ReadFrame decodes the next JSON object in the stream, or returns an error. The returned -// byte slice will be modified the next time ReadFrame is invoked and should not be altered. -func (r *jsonFrameReader) Read(data []byte) (int, error) { - // Return whatever remaining data exists from an in progress frame - if n := len(r.remaining); n > 0 { - if n <= len(data) { - data = append(data[0:0], r.remaining...) - r.remaining = nil - return n, nil - } - - n = len(data) - data = append(data[0:0], r.remaining[:n]...) - r.remaining = r.remaining[n:] - return n, io.ErrShortBuffer - } - - // RawMessage#Unmarshal appends to data - we reset the slice down to 0 and will either see - // data written to data, or be larger than data and a different array. - n := len(data) - m := json.RawMessage(data[:0]) - if err := r.decoder.Decode(&m); err != nil { - return 0, err - } - - // If capacity of data is less than length of the message, decoder will allocate a new slice - // and set m to it, which means we need to copy the partial result back into data and preserve - // the remaining result for subsequent reads. - if len(m) > n { - data = append(data[0:0], m[:n]...) - r.remaining = m[n:] - return n, io.ErrShortBuffer - } - return len(m), nil -} - -func (r *jsonFrameReader) Close() error { - return r.r.Close() -} diff --git a/pkg/kutil/intstr/generated.pb.go b/pkg/kutil/intstr/generated.pb.go deleted file mode 100644 index ec1cb70..0000000 --- a/pkg/kutil/intstr/generated.pb.go +++ /dev/null @@ -1,372 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/intstr/generated.proto - -package intstr - -import ( - fmt "fmt" - - io "io" - math "math" - math_bits "math/bits" - - proto "github.com/gogo/protobuf/proto" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package - -func (m *IntOrString) Reset() { *m = IntOrString{} } -func (*IntOrString) ProtoMessage() {} -func (*IntOrString) Descriptor() ([]byte, []int) { - return fileDescriptor_94e046ae3ce6121c, []int{0} -} -func (m *IntOrString) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *IntOrString) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *IntOrString) XXX_Merge(src proto.Message) { - xxx_messageInfo_IntOrString.Merge(m, src) -} -func (m *IntOrString) XXX_Size() int { - return m.Size() -} -func (m *IntOrString) XXX_DiscardUnknown() { - xxx_messageInfo_IntOrString.DiscardUnknown(m) -} - -var xxx_messageInfo_IntOrString proto.InternalMessageInfo - -func init() { - proto.RegisterType((*IntOrString)(nil), "k8s.io.apimachinery.pkg.util.intstr.IntOrString") -} - -func init() { - proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/intstr/generated.proto", fileDescriptor_94e046ae3ce6121c) -} - -var fileDescriptor_94e046ae3ce6121c = []byte{ - // 292 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x4c, 0x8f, 0x31, 0x4b, 0x33, 0x31, - 0x1c, 0xc6, 0x93, 0xb7, 0x7d, 0x8b, 0x9e, 0xe0, 0x50, 0x1c, 0x8a, 0x43, 0x7a, 0x28, 0xc8, 0x0d, - 0x9a, 0xac, 0xe2, 0xd8, 0xad, 0x20, 0x08, 0x57, 0x71, 0x70, 0xbb, 0x6b, 0x63, 0x1a, 0xae, 0x4d, - 0x42, 0xee, 0x7f, 0xc2, 0x6d, 0xfd, 0x08, 0xba, 0x39, 0xfa, 0x71, 0x6e, 0xec, 0xd8, 0x41, 0x8a, - 0x17, 0xbf, 0x85, 0x93, 0x5c, 0xee, 0x40, 0xa7, 0xe4, 0x79, 0x9e, 0xdf, 0x2f, 0x90, 0xe0, 0x36, - 0xbb, 0xce, 0xa9, 0xd4, 0x2c, 0x2b, 0x52, 0x6e, 0x15, 0x07, 0x9e, 0xb3, 0x67, 0xae, 0x16, 0xda, - 0xb2, 0x6e, 0x48, 0x8c, 0x5c, 0x27, 0xf3, 0xa5, 0x54, 0xdc, 0x96, 0xcc, 0x64, 0x82, 0x15, 0x20, - 0x57, 0x4c, 0x2a, 0xc8, 0xc1, 0x32, 0xc1, 0x15, 0xb7, 0x09, 0xf0, 0x05, 0x35, 0x56, 0x83, 0x1e, - 0x9e, 0xb7, 0x12, 0xfd, 0x2b, 0x51, 0x93, 0x09, 0xda, 0x48, 0xb4, 0x95, 0x4e, 0xaf, 0x84, 0x84, - 0x65, 0x91, 0xd2, 0xb9, 0x5e, 0x33, 0xa1, 0x85, 0x66, 0xde, 0x4d, 0x8b, 0x27, 0x9f, 0x7c, 0xf0, - 0xb7, 0xf6, 0xcd, 0xb3, 0x57, 0x1c, 0x1c, 0x4d, 0x15, 0xdc, 0xd9, 0x19, 0x58, 0xa9, 0xc4, 0x30, - 0x0a, 0xfa, 0x50, 0x1a, 0x3e, 0xc2, 0x21, 0x8e, 0x7a, 0x93, 0x93, 0x6a, 0x3f, 0x46, 0x6e, 0x3f, - 0xee, 0xdf, 0x97, 0x86, 0x7f, 0x77, 0x67, 0xec, 0x89, 0xe1, 0x45, 0x30, 0x90, 0x0a, 0x1e, 0x92, - 0xd5, 0xe8, 0x5f, 0x88, 0xa3, 0xff, 0x93, 0xe3, 0x8e, 0x1d, 0x4c, 0x7d, 0x1b, 0x77, 0x6b, 0xc3, - 0xe5, 0x60, 0x1b, 0xae, 0x17, 0xe2, 0xe8, 0xf0, 0x97, 0x9b, 0xf9, 0x36, 0xee, 0xd6, 0x9b, 0x83, - 0xb7, 0xf7, 0x31, 0xda, 0x7c, 0x84, 0x68, 0x72, 0x59, 0xd5, 0x04, 0x6d, 0x6b, 0x82, 0x76, 0x35, - 0x41, 0x1b, 0x47, 0x70, 0xe5, 0x08, 0xde, 0x3a, 0x82, 0x77, 0x8e, 0xe0, 0x4f, 0x47, 0xf0, 0xcb, - 0x17, 0x41, 0x8f, 0x83, 0xf6, 0xc3, 0x3f, 0x01, 0x00, 0x00, 0xff, 0xff, 0x52, 0xa0, 0xb5, 0xc9, - 0x64, 0x01, 0x00, 0x00, -} - -func (m *IntOrString) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *IntOrString) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *IntOrString) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - i -= len(m.StrVal) - copy(dAtA[i:], m.StrVal) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.StrVal))) - i-- - dAtA[i] = 0x1a - i = encodeVarintGenerated(dAtA, i, uint64(m.IntVal)) - i-- - dAtA[i] = 0x10 - i = encodeVarintGenerated(dAtA, i, uint64(m.Type)) - i-- - dAtA[i] = 0x8 - return len(dAtA) - i, nil -} - -func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { - offset -= sovGenerated(v) - base := offset - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return base -} -func (m *IntOrString) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - n += 1 + sovGenerated(uint64(m.Type)) - n += 1 + sovGenerated(uint64(m.IntVal)) - l = len(m.StrVal) - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func sovGenerated(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 -} -func sozGenerated(x uint64) (n int) { - return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (m *IntOrString) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: IntOrString: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: IntOrString: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) - } - m.Type = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Type |= Type(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field IntVal", wireType) - } - m.IntVal = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.IntVal |= int32(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field StrVal", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.StrVal = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipGenerated(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - depth := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenerated - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenerated - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - case 1: - iNdEx += 8 - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenerated - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if length < 0 { - return 0, ErrInvalidLengthGenerated - } - iNdEx += length - case 3: - depth++ - case 4: - if depth == 0 { - return 0, ErrUnexpectedEndOfGroupGenerated - } - depth-- - case 5: - iNdEx += 4 - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - if iNdEx < 0 { - return 0, ErrInvalidLengthGenerated - } - if depth == 0 { - return iNdEx, nil - } - } - return 0, io.ErrUnexpectedEOF -} - -var ( - ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") - ErrUnexpectedEndOfGroupGenerated = fmt.Errorf("proto: unexpected end of group") -) diff --git a/pkg/kutil/intstr/generated.proto b/pkg/kutil/intstr/generated.proto deleted file mode 100644 index e79fb9e..0000000 --- a/pkg/kutil/intstr/generated.proto +++ /dev/null @@ -1,43 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - - -// This file was autogenerated by go-to-protobuf. Do not edit it manually! - -syntax = 'proto2'; - -package k8s.io.apimachinery.pkg.util.intstr; - -// Package-wide variables from generator "generated". -option go_package = "intstr"; - -// IntOrString is a type that can hold an int32 or a string. When used in -// JSON or YAML marshalling and unmarshalling, it produces or consumes the -// inner type. This allows you to have, for example, a JSON field that can -// accept a name or number. -// TODO: Rename to Int32OrString -// -// +protobuf=true -// +protobuf.options.(gogoproto.goproto_stringer)=false -// +k8s:openapi-gen=true -message IntOrString { - optional int64 type = 1; - - optional int32 intVal = 2; - - optional string strVal = 3; -} - diff --git a/pkg/kutil/intstr/intstr.go b/pkg/kutil/intstr/intstr.go deleted file mode 100644 index 6576def..0000000 --- a/pkg/kutil/intstr/intstr.go +++ /dev/null @@ -1,185 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package intstr - -import ( - "encoding/json" - "errors" - "fmt" - "math" - "runtime/debug" - "strconv" - "strings" - - "github.com/google/gofuzz" - "k8s.io/klog/v2" -) - -// IntOrString is a type that can hold an int32 or a string. When used in -// JSON or YAML marshalling and unmarshalling, it produces or consumes the -// inner type. This allows you to have, for example, a JSON field that can -// accept a name or number. -// TODO: Rename to Int32OrString -// -// +protobuf=true -// +protobuf.options.(gogoproto.goproto_stringer)=false -// +k8s:openapi-gen=true -type IntOrString struct { - Type Type `protobuf:"varint,1,opt,name=type,casttype=Type"` - IntVal int32 `protobuf:"varint,2,opt,name=intVal"` - StrVal string `protobuf:"bytes,3,opt,name=strVal"` -} - -// Type represents the stored type of IntOrString. -type Type int64 - -const ( - Int Type = iota // The IntOrString holds an int. - String // The IntOrString holds a string. -) - -// FromInt creates an IntOrString object with an int32 value. It is -// your responsibility not to call this method with a value greater -// than int32. -// TODO: convert to (val int32) -func FromInt(val int) IntOrString { - if val > math.MaxInt32 || val < math.MinInt32 { - klog.Errorf("value: %d overflows int32\n%s\n", val, debug.Stack()) - } - return IntOrString{Type: Int, IntVal: int32(val)} -} - -// FromString creates an IntOrString object with a string value. -func FromString(val string) IntOrString { - return IntOrString{Type: String, StrVal: val} -} - -// Parse the given string and try to convert it to an integer before -// setting it as a string value. -func Parse(val string) IntOrString { - i, err := strconv.Atoi(val) - if err != nil { - return FromString(val) - } - return FromInt(i) -} - -// UnmarshalJSON implements the json.Unmarshaller interface. -func (intstr *IntOrString) UnmarshalJSON(value []byte) error { - if value[0] == '"' { - intstr.Type = String - return json.Unmarshal(value, &intstr.StrVal) - } - intstr.Type = Int - return json.Unmarshal(value, &intstr.IntVal) -} - -// String returns the string value, or the Itoa of the int value. -func (intstr *IntOrString) String() string { - if intstr.Type == String { - return intstr.StrVal - } - return strconv.Itoa(intstr.IntValue()) -} - -// IntValue returns the IntVal if type Int, or if -// it is a String, will attempt a conversion to int, -// returning 0 if a parsing error occurs. -func (intstr *IntOrString) IntValue() int { - if intstr.Type == String { - i, _ := strconv.Atoi(intstr.StrVal) - return i - } - return int(intstr.IntVal) -} - -// MarshalJSON implements the json.Marshaller interface. -func (intstr IntOrString) MarshalJSON() ([]byte, error) { - switch intstr.Type { - case Int: - return json.Marshal(intstr.IntVal) - case String: - return json.Marshal(intstr.StrVal) - default: - return []byte{}, fmt.Errorf("impossible IntOrString.Type") - } -} - -// OpenAPISchemaType is used by the kube-openapi generator when constructing -// the OpenAPI spec of this type. -// -// See: https://github.com/kubernetes/kube-openapi/tree/master/pkg/generators -func (IntOrString) OpenAPISchemaType() []string { return []string{"string"} } - -// OpenAPISchemaFormat is used by the kube-openapi generator when constructing -// the OpenAPI spec of this type. -func (IntOrString) OpenAPISchemaFormat() string { return "int-or-string" } - -func (intstr *IntOrString) Fuzz(c fuzz.Continue) { - if intstr == nil { - return - } - if c.RandBool() { - intstr.Type = Int - c.Fuzz(&intstr.IntVal) - intstr.StrVal = "" - } else { - intstr.Type = String - intstr.IntVal = 0 - c.Fuzz(&intstr.StrVal) - } -} - -func ValueOrDefault(intOrPercent *IntOrString, defaultValue IntOrString) *IntOrString { - if intOrPercent == nil { - return &defaultValue - } - return intOrPercent -} - -func GetValueFromIntOrPercent(intOrPercent *IntOrString, total int, roundUp bool) (int, error) { - if intOrPercent == nil { - return 0, errors.New("nil value for IntOrString") - } - value, isPercent, err := getIntOrPercentValue(intOrPercent) - if err != nil { - return 0, fmt.Errorf("invalid value for IntOrString: %v", err) - } - if isPercent { - if roundUp { - value = int(math.Ceil(float64(value) * (float64(total)) / 100)) - } else { - value = int(math.Floor(float64(value) * (float64(total)) / 100)) - } - } - return value, nil -} - -func getIntOrPercentValue(intOrStr *IntOrString) (int, bool, error) { - switch intOrStr.Type { - case Int: - return intOrStr.IntValue(), false, nil - case String: - s := strings.Replace(intOrStr.StrVal, "%", "", -1) - v, err := strconv.Atoi(s) - if err != nil { - return 0, false, fmt.Errorf("invalid value %q: %v", intOrStr.StrVal, err) - } - return int(v), true, nil - } - return 0, false, fmt.Errorf("invalid type: neither int nor percentage") -} diff --git a/pkg/kutil/json/json.go b/pkg/kutil/json/json.go deleted file mode 100644 index 2048348..0000000 --- a/pkg/kutil/json/json.go +++ /dev/null @@ -1,156 +0,0 @@ -/* -Copyright 2015 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package json - -import ( - "bytes" - "encoding/json" - "fmt" - "io" -) - -// NewEncoder delegates to json.NewEncoder -// It is only here so this package can be a drop-in for common encoding/json uses -func NewEncoder(w io.Writer) *json.Encoder { - return json.NewEncoder(w) -} - -// Marshal delegates to json.Marshal -// It is only here so this package can be a drop-in for common encoding/json uses -func Marshal(v interface{}) ([]byte, error) { - return json.Marshal(v) -} - -// limit recursive depth to prevent stack overflow errors -const maxDepth = 10000 - -// Unmarshal unmarshals the given data -// If v is a *map[string]interface{}, numbers are converted to int64 or float64 -func Unmarshal(data []byte, v interface{}) error { - switch v := v.(type) { - case *map[string]interface{}: - // Build a decoder from the given data - decoder := json.NewDecoder(bytes.NewBuffer(data)) - // Preserve numbers, rather than casting to float64 automatically - decoder.UseNumber() - // Run the decode - if err := decoder.Decode(v); err != nil { - return err - } - // If the decode succeeds, post-process the map to convert json.Number objects to int64 or float64 - return convertMapNumbers(*v, 0) - - case *[]interface{}: - // Build a decoder from the given data - decoder := json.NewDecoder(bytes.NewBuffer(data)) - // Preserve numbers, rather than casting to float64 automatically - decoder.UseNumber() - // Run the decode - if err := decoder.Decode(v); err != nil { - return err - } - // If the decode succeeds, post-process the map to convert json.Number objects to int64 or float64 - return convertSliceNumbers(*v, 0) - - case *interface{}: - // Build a decoder from the given data - decoder := json.NewDecoder(bytes.NewBuffer(data)) - // Preserve numbers, rather than casting to float64 automatically - decoder.UseNumber() - // Run the decode - if err := decoder.Decode(v); err != nil { - return err - } - // If the decode succeeds, post-process the map to convert json.Number objects to int64 or float64 - return convertInterfaceNumbers(v, 0) - - default: - return json.Unmarshal(data, v) - } -} - -func convertInterfaceNumbers(v *interface{}, depth int) error { - var err error - switch v2 := (*v).(type) { - case json.Number: - *v, err = convertNumber(v2) - case map[string]interface{}: - err = convertMapNumbers(v2, depth+1) - case []interface{}: - err = convertSliceNumbers(v2, depth+1) - } - return err -} - -// convertMapNumbers traverses the map, converting any json.Number values to int64 or float64. -// values which are map[string]interface{} or []interface{} are recursively visited -func convertMapNumbers(m map[string]interface{}, depth int) error { - if depth > maxDepth { - return fmt.Errorf("exceeded max depth of %d", maxDepth) - } - - var err error - for k, v := range m { - switch v := v.(type) { - case json.Number: - m[k], err = convertNumber(v) - case map[string]interface{}: - err = convertMapNumbers(v, depth+1) - case []interface{}: - err = convertSliceNumbers(v, depth+1) - } - if err != nil { - return err - } - } - return nil -} - -// convertSliceNumbers traverses the slice, converting any json.Number values to int64 or float64. -// values which are map[string]interface{} or []interface{} are recursively visited -func convertSliceNumbers(s []interface{}, depth int) error { - if depth > maxDepth { - return fmt.Errorf("exceeded max depth of %d", maxDepth) - } - - var err error - for i, v := range s { - switch v := v.(type) { - case json.Number: - s[i], err = convertNumber(v) - case map[string]interface{}: - err = convertMapNumbers(v, depth+1) - case []interface{}: - err = convertSliceNumbers(v, depth+1) - } - if err != nil { - return err - } - } - return nil -} - -// convertNumber converts a json.Number to an int64 or float64, or returns an error -func convertNumber(n json.Number) (interface{}, error) { - // Attempt to convert to an int64 first - if i, err := n.Int64(); err == nil { - return i, nil - } - // Return a float64 (default json.Decode() behavior) - // An overflow will return an error - return n.Float64() -} diff --git a/pkg/kutil/mergepatch/OWNERS b/pkg/kutil/mergepatch/OWNERS deleted file mode 100644 index 3f72c69..0000000 --- a/pkg/kutil/mergepatch/OWNERS +++ /dev/null @@ -1,7 +0,0 @@ -# See the OWNERS docs at https://go.k8s.io/owners - -approvers: -- pwittrock -reviewers: -- mengqiy -- apelisse diff --git a/pkg/kutil/mergepatch/errors.go b/pkg/kutil/mergepatch/errors.go deleted file mode 100644 index 16501d5..0000000 --- a/pkg/kutil/mergepatch/errors.go +++ /dev/null @@ -1,102 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package mergepatch - -import ( - "errors" - "fmt" - "reflect" -) - -var ( - ErrBadJSONDoc = errors.New("invalid JSON document") - ErrNoListOfLists = errors.New("lists of lists are not supported") - ErrBadPatchFormatForPrimitiveList = errors.New("invalid patch format of primitive list") - ErrBadPatchFormatForRetainKeys = errors.New("invalid patch format of retainKeys") - ErrBadPatchFormatForSetElementOrderList = errors.New("invalid patch format of setElementOrder list") - ErrPatchContentNotMatchRetainKeys = errors.New("patch content doesn't match retainKeys list") - ErrUnsupportedStrategicMergePatchFormat = errors.New("strategic merge patch format is not supported") -) - -func ErrNoMergeKey(m map[string]interface{}, k string) error { - return fmt.Errorf("map: %v does not contain declared merge key: %s", m, k) -} - -func ErrBadArgType(expected, actual interface{}) error { - return fmt.Errorf("expected a %s, but received a %s", - reflect.TypeOf(expected), - reflect.TypeOf(actual)) -} - -func ErrBadArgKind(expected, actual interface{}) error { - var expectedKindString, actualKindString string - if expected == nil { - expectedKindString = "nil" - } else { - expectedKindString = reflect.TypeOf(expected).Kind().String() - } - if actual == nil { - actualKindString = "nil" - } else { - actualKindString = reflect.TypeOf(actual).Kind().String() - } - return fmt.Errorf("expected a %s, but received a %s", expectedKindString, actualKindString) -} - -func ErrBadPatchType(t interface{}, m map[string]interface{}) error { - return fmt.Errorf("unknown patch type: %s in map: %v", t, m) -} - -// IsPreconditionFailed returns true if the provided error indicates -// a precondition failed. -func IsPreconditionFailed(err error) bool { - _, ok := err.(ErrPreconditionFailed) - return ok -} - -type ErrPreconditionFailed struct { - message string -} - -func NewErrPreconditionFailed(target map[string]interface{}) ErrPreconditionFailed { - s := fmt.Sprintf("precondition failed for: %v", target) - return ErrPreconditionFailed{s} -} - -func (err ErrPreconditionFailed) Error() string { - return err.message -} - -type ErrConflict struct { - message string -} - -func NewErrConflict(patch, current string) ErrConflict { - s := fmt.Sprintf("patch:\n%s\nconflicts with changes made from original to current:\n%s\n", patch, current) - return ErrConflict{s} -} - -func (err ErrConflict) Error() string { - return err.message -} - -// IsConflict returns true if the provided error indicates -// a conflict between the patch and the current configuration. -func IsConflict(err error) bool { - _, ok := err.(ErrConflict) - return ok -} diff --git a/pkg/kutil/mergepatch/util.go b/pkg/kutil/mergepatch/util.go deleted file mode 100644 index 990fa0d..0000000 --- a/pkg/kutil/mergepatch/util.go +++ /dev/null @@ -1,133 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package mergepatch - -import ( - "fmt" - "reflect" - - "github.com/davecgh/go-spew/spew" - "sigs.k8s.io/yaml" -) - -// PreconditionFunc asserts that an incompatible change is not present within a patch. -type PreconditionFunc func(interface{}) bool - -// RequireKeyUnchanged returns a precondition function that fails if the provided key -// is present in the patch (indicating that its value has changed). -func RequireKeyUnchanged(key string) PreconditionFunc { - return func(patch interface{}) bool { - patchMap, ok := patch.(map[string]interface{}) - if !ok { - return true - } - - // The presence of key means that its value has been changed, so the test fails. - _, ok = patchMap[key] - return !ok - } -} - -// RequireMetadataKeyUnchanged creates a precondition function that fails -// if the metadata.key is present in the patch (indicating its value -// has changed). -func RequireMetadataKeyUnchanged(key string) PreconditionFunc { - return func(patch interface{}) bool { - patchMap, ok := patch.(map[string]interface{}) - if !ok { - return true - } - patchMap1, ok := patchMap["metadata"] - if !ok { - return true - } - patchMap2, ok := patchMap1.(map[string]interface{}) - if !ok { - return true - } - _, ok = patchMap2[key] - return !ok - } -} - -func ToYAMLOrError(v interface{}) string { - y, err := toYAML(v) - if err != nil { - return err.Error() - } - - return y -} - -func toYAML(v interface{}) (string, error) { - y, err := yaml.Marshal(v) - if err != nil { - return "", fmt.Errorf("yaml marshal failed:%v\n%v\n", err, spew.Sdump(v)) - } - - return string(y), nil -} - -// HasConflicts returns true if the left and right JSON interface objects overlap with -// different values in any key. All keys are required to be strings. Since patches of the -// same Type have congruent keys, this is valid for multiple patch types. This method -// supports JSON merge patch semantics. -// -// NOTE: Numbers with different types (e.g. int(0) vs int64(0)) will be detected as conflicts. -// Make sure the unmarshaling of left and right are consistent (e.g. use the same library). -func HasConflicts(left, right interface{}) (bool, error) { - switch typedLeft := left.(type) { - case map[string]interface{}: - switch typedRight := right.(type) { - case map[string]interface{}: - for key, leftValue := range typedLeft { - rightValue, ok := typedRight[key] - if !ok { - continue - } - if conflict, err := HasConflicts(leftValue, rightValue); err != nil || conflict { - return conflict, err - } - } - - return false, nil - default: - return true, nil - } - case []interface{}: - switch typedRight := right.(type) { - case []interface{}: - if len(typedLeft) != len(typedRight) { - return true, nil - } - - for i := range typedLeft { - if conflict, err := HasConflicts(typedLeft[i], typedRight[i]); err != nil || conflict { - return conflict, err - } - } - - return false, nil - default: - return true, nil - } - case string, float64, bool, int64, nil: - return !reflect.DeepEqual(left, right), nil - default: - return true, fmt.Errorf("unknown type: %v", reflect.TypeOf(left)) - } -} diff --git a/pkg/kutil/naming/from_stack.go b/pkg/kutil/naming/from_stack.go deleted file mode 100644 index d69bf32..0000000 --- a/pkg/kutil/naming/from_stack.go +++ /dev/null @@ -1,93 +0,0 @@ -/* -Copyright 2018 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package naming - -import ( - "fmt" - "regexp" - goruntime "runtime" - "runtime/debug" - "strconv" - "strings" -) - -// GetNameFromCallsite walks back through the call stack until we find a caller from outside of the ignoredPackages -// it returns back a shortpath/filename:line to aid in identification of this reflector when it starts logging -func GetNameFromCallsite(ignoredPackages ...string) string { - name := "????" - const maxStack = 10 - for i := 1; i < maxStack; i++ { - _, file, line, ok := goruntime.Caller(i) - if !ok { - file, line, ok = extractStackCreator() - if !ok { - break - } - i += maxStack - } - if hasPackage(file, append(ignoredPackages, "/runtime/asm_")) { - continue - } - - file = trimPackagePrefix(file) - name = fmt.Sprintf("%s:%d", file, line) - break - } - return name -} - -// hasPackage returns true if the file is in one of the ignored packages. -func hasPackage(file string, ignoredPackages []string) bool { - for _, ignoredPackage := range ignoredPackages { - if strings.Contains(file, ignoredPackage) { - return true - } - } - return false -} - -// trimPackagePrefix reduces duplicate values off the front of a package name. -func trimPackagePrefix(file string) string { - if l := strings.LastIndex(file, "/vendor/"); l >= 0 { - return file[l+len("/vendor/"):] - } - if l := strings.LastIndex(file, "/src/"); l >= 0 { - return file[l+5:] - } - if l := strings.LastIndex(file, "/pkg/"); l >= 0 { - return file[l+1:] - } - return file -} - -var stackCreator = regexp.MustCompile(`(?m)^created by (.*)\n\s+(.*):(\d+) \+0x[[:xdigit:]]+$`) - -// extractStackCreator retrieves the goroutine file and line that launched this stack. Returns false -// if the creator cannot be located. -// TODO: Go does not expose this via runtime https://github.com/golang/go/issues/11440 -func extractStackCreator() (string, int, bool) { - stack := debug.Stack() - matches := stackCreator.FindStringSubmatch(string(stack)) - if len(matches) != 4 { - return "", 0, false - } - line, err := strconv.Atoi(matches[3]) - if err != nil { - return "", 0, false - } - return matches[2], line, true -} diff --git a/pkg/kutil/net/http.go b/pkg/kutil/net/http.go deleted file mode 100644 index 945886c..0000000 --- a/pkg/kutil/net/http.go +++ /dev/null @@ -1,724 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package net - -import ( - "bufio" - "bytes" - "context" - "crypto/tls" - "errors" - "fmt" - "io" - "mime" - "net" - "net/http" - "net/url" - "os" - "path" - "regexp" - "strconv" - "strings" - "unicode" - "unicode/utf8" - - "golang.org/x/net/http2" - "k8s.io/klog/v2" -) - -// JoinPreservingTrailingSlash does a path.Join of the specified elements, -// preserving any trailing slash on the last non-empty segment -func JoinPreservingTrailingSlash(elem ...string) string { - // do the basic path join - result := path.Join(elem...) - - // find the last non-empty segment - for i := len(elem) - 1; i >= 0; i-- { - if len(elem[i]) > 0 { - // if the last segment ended in a slash, ensure our result does as well - if strings.HasSuffix(elem[i], "/") && !strings.HasSuffix(result, "/") { - result += "/" - } - break - } - } - - return result -} - -// IsTimeout returns true if the given error is a network timeout error -func IsTimeout(err error) bool { - var neterr net.Error - if errors.As(err, &neterr) { - return neterr != nil && neterr.Timeout() - } - return false -} - -// IsProbableEOF returns true if the given error resembles a connection termination -// scenario that would justify assuming that the watch is empty. -// These errors are what the Go http stack returns back to us which are general -// connection closure errors (strongly correlated) and callers that need to -// differentiate probable errors in connection behavior between normal "this is -// disconnected" should use the method. -func IsProbableEOF(err error) bool { - if err == nil { - return false - } - var uerr *url.Error - if errors.As(err, &uerr) { - err = uerr.Err - } - msg := err.Error() - switch { - case err == io.EOF: - return true - case err == io.ErrUnexpectedEOF: - return true - case msg == "http: can't write HTTP request on broken connection": - return true - case strings.Contains(msg, "http2: server sent GOAWAY and closed the connection"): - return true - case strings.Contains(msg, "connection reset by peer"): - return true - case strings.Contains(strings.ToLower(msg), "use of closed network connection"): - return true - } - return false -} - -var defaultTransport = http.DefaultTransport.(*http.Transport) - -// SetOldTransportDefaults applies the defaults from http.DefaultTransport -// for the Proxy, Dial, and TLSHandshakeTimeout fields if unset -func SetOldTransportDefaults(t *http.Transport) *http.Transport { - if t.Proxy == nil || isDefault(t.Proxy) { - // http.ProxyFromEnvironment doesn't respect CIDRs and that makes it impossible to exclude things like pod and service IPs from proxy settings - // ProxierWithNoProxyCIDR allows CIDR rules in NO_PROXY - t.Proxy = NewProxierWithNoProxyCIDR(http.ProxyFromEnvironment) - } - // If no custom dialer is set, use the default context dialer - if t.DialContext == nil && t.Dial == nil { - t.DialContext = defaultTransport.DialContext - } - if t.TLSHandshakeTimeout == 0 { - t.TLSHandshakeTimeout = defaultTransport.TLSHandshakeTimeout - } - if t.IdleConnTimeout == 0 { - t.IdleConnTimeout = defaultTransport.IdleConnTimeout - } - return t -} - -// SetTransportDefaults applies the defaults from http.DefaultTransport -// for the Proxy, Dial, and TLSHandshakeTimeout fields if unset -func SetTransportDefaults(t *http.Transport) *http.Transport { - t = SetOldTransportDefaults(t) - // Allow clients to disable http2 if needed. - if s := os.Getenv("DISABLE_HTTP2"); len(s) > 0 { - klog.Infof("HTTP2 has been explicitly disabled") - } else if allowsHTTP2(t) { - if err := http2.ConfigureTransport(t); err != nil { - klog.Warningf("Transport failed http2 configuration: %v", err) - } - } - return t -} - -func allowsHTTP2(t *http.Transport) bool { - if t.TLSClientConfig == nil || len(t.TLSClientConfig.NextProtos) == 0 { - // the transport expressed no NextProto preference, allow - return true - } - for _, p := range t.TLSClientConfig.NextProtos { - if p == http2.NextProtoTLS { - // the transport explicitly allowed http/2 - return true - } - } - // the transport explicitly set NextProtos and excluded http/2 - return false -} - -type RoundTripperWrapper interface { - http.RoundTripper - WrappedRoundTripper() http.RoundTripper -} - -type DialFunc func(ctx context.Context, net, addr string) (net.Conn, error) - -func DialerFor(transport http.RoundTripper) (DialFunc, error) { - if transport == nil { - return nil, nil - } - - switch transport := transport.(type) { - case *http.Transport: - // transport.DialContext takes precedence over transport.Dial - if transport.DialContext != nil { - return transport.DialContext, nil - } - // adapt transport.Dial to the DialWithContext signature - if transport.Dial != nil { - return func(ctx context.Context, net, addr string) (net.Conn, error) { - return transport.Dial(net, addr) - }, nil - } - // otherwise return nil - return nil, nil - case RoundTripperWrapper: - return DialerFor(transport.WrappedRoundTripper()) - default: - return nil, fmt.Errorf("unknown transport type: %T", transport) - } -} - -type TLSClientConfigHolder interface { - TLSClientConfig() *tls.Config -} - -func TLSClientConfig(transport http.RoundTripper) (*tls.Config, error) { - if transport == nil { - return nil, nil - } - - switch transport := transport.(type) { - case *http.Transport: - return transport.TLSClientConfig, nil - case TLSClientConfigHolder: - return transport.TLSClientConfig(), nil - case RoundTripperWrapper: - return TLSClientConfig(transport.WrappedRoundTripper()) - default: - return nil, fmt.Errorf("unknown transport type: %T", transport) - } -} - -func FormatURL(scheme string, host string, port int, path string) *url.URL { - return &url.URL{ - Scheme: scheme, - Host: net.JoinHostPort(host, strconv.Itoa(port)), - Path: path, - } -} - -func GetHTTPClient(req *http.Request) string { - if ua := req.UserAgent(); len(ua) != 0 { - return ua - } - return "unknown" -} - -// SourceIPs splits the comma separated X-Forwarded-For header and joins it with -// the X-Real-Ip header and/or req.RemoteAddr, ignoring invalid IPs. -// The X-Real-Ip is omitted if it's already present in the X-Forwarded-For chain. -// The req.RemoteAddr is always the last IP in the returned list. -// It returns nil if all of these are empty or invalid. -func SourceIPs(req *http.Request) []net.IP { - var srcIPs []net.IP - - hdr := req.Header - // First check the X-Forwarded-For header for requests via proxy. - hdrForwardedFor := hdr.Get("X-Forwarded-For") - if hdrForwardedFor != "" { - // X-Forwarded-For can be a csv of IPs in case of multiple proxies. - // Use the first valid one. - parts := strings.Split(hdrForwardedFor, ",") - for _, part := range parts { - ip := net.ParseIP(strings.TrimSpace(part)) - if ip != nil { - srcIPs = append(srcIPs, ip) - } - } - } - - // Try the X-Real-Ip header. - hdrRealIp := hdr.Get("X-Real-Ip") - if hdrRealIp != "" { - ip := net.ParseIP(hdrRealIp) - // Only append the X-Real-Ip if it's not already contained in the X-Forwarded-For chain. - if ip != nil && !containsIP(srcIPs, ip) { - srcIPs = append(srcIPs, ip) - } - } - - // Always include the request Remote Address as it cannot be easily spoofed. - var remoteIP net.IP - // Remote Address in Go's HTTP server is in the form host:port so we need to split that first. - host, _, err := net.SplitHostPort(req.RemoteAddr) - if err == nil { - remoteIP = net.ParseIP(host) - } - // Fallback if Remote Address was just IP. - if remoteIP == nil { - remoteIP = net.ParseIP(req.RemoteAddr) - } - - // Don't duplicate remote IP if it's already the last address in the chain. - if remoteIP != nil && (len(srcIPs) == 0 || !remoteIP.Equal(srcIPs[len(srcIPs)-1])) { - srcIPs = append(srcIPs, remoteIP) - } - - return srcIPs -} - -// Checks whether the given IP address is contained in the list of IPs. -func containsIP(ips []net.IP, ip net.IP) bool { - for _, v := range ips { - if v.Equal(ip) { - return true - } - } - return false -} - -// Extracts and returns the clients IP from the given request. -// Looks at X-Forwarded-For header, X-Real-Ip header and request.RemoteAddr in that order. -// Returns nil if none of them are set or is set to an invalid value. -func GetClientIP(req *http.Request) net.IP { - ips := SourceIPs(req) - if len(ips) == 0 { - return nil - } - return ips[0] -} - -// Prepares the X-Forwarded-For header for another forwarding hop by appending the previous sender's -// IP address to the X-Forwarded-For chain. -func AppendForwardedForHeader(req *http.Request) { - // Copied from net/http/httputil/reverseproxy.go: - if clientIP, _, err := net.SplitHostPort(req.RemoteAddr); err == nil { - // If we aren't the first proxy retain prior - // X-Forwarded-For information as a comma+space - // separated list and fold multiple headers into one. - if prior, ok := req.Header["X-Forwarded-For"]; ok { - clientIP = strings.Join(prior, ", ") + ", " + clientIP - } - req.Header.Set("X-Forwarded-For", clientIP) - } -} - -var defaultProxyFuncPointer = fmt.Sprintf("%p", http.ProxyFromEnvironment) - -// isDefault checks to see if the transportProxierFunc is pointing to the default one -func isDefault(transportProxier func(*http.Request) (*url.URL, error)) bool { - transportProxierPointer := fmt.Sprintf("%p", transportProxier) - return transportProxierPointer == defaultProxyFuncPointer -} - -// NewProxierWithNoProxyCIDR constructs a Proxier function that respects CIDRs in NO_PROXY and delegates if -// no matching CIDRs are found -func NewProxierWithNoProxyCIDR(delegate func(req *http.Request) (*url.URL, error)) func(req *http.Request) (*url.URL, error) { - // we wrap the default method, so we only need to perform our check if the NO_PROXY (or no_proxy) envvar has a CIDR in it - noProxyEnv := os.Getenv("NO_PROXY") - if noProxyEnv == "" { - noProxyEnv = os.Getenv("no_proxy") - } - noProxyRules := strings.Split(noProxyEnv, ",") - - cidrs := []*net.IPNet{} - for _, noProxyRule := range noProxyRules { - _, cidr, _ := net.ParseCIDR(noProxyRule) - if cidr != nil { - cidrs = append(cidrs, cidr) - } - } - - if len(cidrs) == 0 { - return delegate - } - - return func(req *http.Request) (*url.URL, error) { - ip := net.ParseIP(req.URL.Hostname()) - if ip == nil { - return delegate(req) - } - - for _, cidr := range cidrs { - if cidr.Contains(ip) { - return nil, nil - } - } - - return delegate(req) - } -} - -// DialerFunc implements Dialer for the provided function. -type DialerFunc func(req *http.Request) (net.Conn, error) - -func (fn DialerFunc) Dial(req *http.Request) (net.Conn, error) { - return fn(req) -} - -// Dialer dials a host and writes a request to it. -type Dialer interface { - // Dial connects to the host specified by req's URL, writes the request to the connection, and - // returns the opened net.Conn. - Dial(req *http.Request) (net.Conn, error) -} - -// ConnectWithRedirects uses dialer to send req, following up to 10 redirects (relative to -// originalLocation). It returns the opened net.Conn and the raw response bytes. -// If requireSameHostRedirects is true, only redirects to the same host are permitted. -func ConnectWithRedirects(originalMethod string, originalLocation *url.URL, header http.Header, originalBody io.Reader, dialer Dialer, requireSameHostRedirects bool) (net.Conn, []byte, error) { - const ( - maxRedirects = 9 // Fail on the 10th redirect - maxResponseSize = 16384 // play it safe to allow the potential for lots of / large headers - ) - - var ( - location = originalLocation - method = originalMethod - intermediateConn net.Conn - rawResponse = bytes.NewBuffer(make([]byte, 0, 256)) - body = originalBody - ) - - defer func() { - if intermediateConn != nil { - intermediateConn.Close() - } - }() - -redirectLoop: - for redirects := 0; ; redirects++ { - if redirects > maxRedirects { - return nil, nil, fmt.Errorf("too many redirects (%d)", redirects) - } - - req, err := http.NewRequest(method, location.String(), body) - if err != nil { - return nil, nil, err - } - - req.Header = header - - intermediateConn, err = dialer.Dial(req) - if err != nil { - return nil, nil, err - } - - // Peek at the backend response. - rawResponse.Reset() - respReader := bufio.NewReader(io.TeeReader( - io.LimitReader(intermediateConn, maxResponseSize), // Don't read more than maxResponseSize bytes. - rawResponse)) // Save the raw response. - resp, err := http.ReadResponse(respReader, nil) - if err != nil { - // Unable to read the backend response; let the client handle it. - klog.Warningf("Error reading backend response: %v", err) - break redirectLoop - } - - switch resp.StatusCode { - case http.StatusFound: - // Redirect, continue. - default: - // Don't redirect. - break redirectLoop - } - - // Redirected requests switch to "GET" according to the HTTP spec: - // https://www.w3.org/Protocols/rfc2616/rfc2616-sec10.html#sec10.3 - method = "GET" - // don't send a body when following redirects - body = nil - - resp.Body.Close() // not used - - // Prepare to follow the redirect. - redirectStr := resp.Header.Get("Location") - if redirectStr == "" { - return nil, nil, fmt.Errorf("%d response missing Location header", resp.StatusCode) - } - // We have to parse relative to the current location, NOT originalLocation. For example, - // if we request http://foo.com/a and get back "http://bar.com/b", the result should be - // http://bar.com/b. If we then make that request and get back a redirect to "/c", the result - // should be http://bar.com/c, not http://foo.com/c. - location, err = location.Parse(redirectStr) - if err != nil { - return nil, nil, fmt.Errorf("malformed Location header: %v", err) - } - - // Only follow redirects to the same host. Otherwise, propagate the redirect response back. - if requireSameHostRedirects && location.Hostname() != originalLocation.Hostname() { - return nil, nil, fmt.Errorf("hostname mismatch: expected %s, found %s", originalLocation.Hostname(), location.Hostname()) - } - - // Reset the connection. - intermediateConn.Close() - intermediateConn = nil - } - - connToReturn := intermediateConn - intermediateConn = nil // Don't close the connection when we return it. - return connToReturn, rawResponse.Bytes(), nil -} - -// CloneRequest creates a shallow copy of the request along with a deep copy of the Headers. -func CloneRequest(req *http.Request) *http.Request { - r := new(http.Request) - - // shallow clone - *r = *req - - // deep copy headers - r.Header = CloneHeader(req.Header) - - return r -} - -// CloneHeader creates a deep copy of an http.Header. -func CloneHeader(in http.Header) http.Header { - out := make(http.Header, len(in)) - for key, values := range in { - newValues := make([]string, len(values)) - copy(newValues, values) - out[key] = newValues - } - return out -} - -// WarningHeader contains a single RFC2616 14.46 warnings header -type WarningHeader struct { - // Codeindicates the type of warning. 299 is a miscellaneous persistent warning - Code int - // Agent contains the name or pseudonym of the server adding the Warning header. - // A single "-" is recommended when agent is unknown. - Agent string - // Warning text - Text string -} - -// ParseWarningHeaders extract RFC2616 14.46 warnings headers from the specified set of header values. -// Multiple comma-separated warnings per header are supported. -// If errors are encountered on a header, the remainder of that header are skipped and subsequent headers are parsed. -// Returns successfully parsed warnings and any errors encountered. -func ParseWarningHeaders(headers []string) ([]WarningHeader, []error) { - var ( - results []WarningHeader - errs []error - ) - for _, header := range headers { - for len(header) > 0 { - result, remainder, err := ParseWarningHeader(header) - if err != nil { - errs = append(errs, err) - break - } - results = append(results, result) - header = remainder - } - } - return results, errs -} - -var ( - codeMatcher = regexp.MustCompile(`^[0-9]{3}$`) - wordDecoder = &mime.WordDecoder{} -) - -// ParseWarningHeader extracts one RFC2616 14.46 warning from the specified header, -// returning an error if the header does not contain a correctly formatted warning. -// Any remaining content in the header is returned. -func ParseWarningHeader(header string) (result WarningHeader, remainder string, err error) { - // https://tools.ietf.org/html/rfc2616#section-14.46 - // updated by - // https://tools.ietf.org/html/rfc7234#section-5.5 - // https://tools.ietf.org/html/rfc7234#appendix-A - // Some requirements regarding production and processing of the Warning - // header fields have been relaxed, as it is not widely implemented. - // Furthermore, the Warning header field no longer uses RFC 2047 - // encoding, nor does it allow multiple languages, as these aspects were - // not implemented. - // - // Format is one of: - // warn-code warn-agent "warn-text" - // warn-code warn-agent "warn-text" "warn-date" - // - // warn-code is a three digit number - // warn-agent is unquoted and contains no spaces - // warn-text is quoted with backslash escaping (RFC2047-encoded according to RFC2616, not encoded according to RFC7234) - // warn-date is optional, quoted, and in HTTP-date format (no embedded or escaped quotes) - // - // additional warnings can optionally be included in the same header by comma-separating them: - // warn-code warn-agent "warn-text" "warn-date"[, warn-code warn-agent "warn-text" "warn-date", ...] - - // tolerate leading whitespace - header = strings.TrimSpace(header) - - parts := strings.SplitN(header, " ", 3) - if len(parts) != 3 { - return WarningHeader{}, "", errors.New("invalid warning header: fewer than 3 segments") - } - code, agent, textDateRemainder := parts[0], parts[1], parts[2] - - // verify code format - if !codeMatcher.Match([]byte(code)) { - return WarningHeader{}, "", errors.New("invalid warning header: code segment is not 3 digits between 100-299") - } - codeInt, _ := strconv.ParseInt(code, 10, 64) - - // verify agent presence - if len(agent) == 0 { - return WarningHeader{}, "", errors.New("invalid warning header: empty agent segment") - } - if !utf8.ValidString(agent) || hasAnyRunes(agent, unicode.IsControl) { - return WarningHeader{}, "", errors.New("invalid warning header: invalid agent") - } - - // verify textDateRemainder presence - if len(textDateRemainder) == 0 { - return WarningHeader{}, "", errors.New("invalid warning header: empty text segment") - } - - // extract text - text, dateAndRemainder, err := parseQuotedString(textDateRemainder) - if err != nil { - return WarningHeader{}, "", fmt.Errorf("invalid warning header: %v", err) - } - // tolerate RFC2047-encoded text from warnings produced according to RFC2616 - if decodedText, err := wordDecoder.DecodeHeader(text); err == nil { - text = decodedText - } - if !utf8.ValidString(text) || hasAnyRunes(text, unicode.IsControl) { - return WarningHeader{}, "", errors.New("invalid warning header: invalid text") - } - result = WarningHeader{Code: int(codeInt), Agent: agent, Text: text} - - if len(dateAndRemainder) > 0 { - if dateAndRemainder[0] == '"' { - // consume date - foundEndQuote := false - for i := 1; i < len(dateAndRemainder); i++ { - if dateAndRemainder[i] == '"' { - foundEndQuote = true - remainder = strings.TrimSpace(dateAndRemainder[i+1:]) - break - } - } - if !foundEndQuote { - return WarningHeader{}, "", errors.New("invalid warning header: unterminated date segment") - } - } else { - remainder = dateAndRemainder - } - } - if len(remainder) > 0 { - if remainder[0] == ',' { - // consume comma if present - remainder = strings.TrimSpace(remainder[1:]) - } else { - return WarningHeader{}, "", errors.New("invalid warning header: unexpected token after warn-date") - } - } - - return result, remainder, nil -} - -func parseQuotedString(quotedString string) (string, string, error) { - if len(quotedString) == 0 { - return "", "", errors.New("invalid quoted string: 0-length") - } - - if quotedString[0] != '"' { - return "", "", errors.New("invalid quoted string: missing initial quote") - } - - quotedString = quotedString[1:] - var remainder string - escaping := false - closedQuote := false - result := &bytes.Buffer{} -loop: - for i := 0; i < len(quotedString); i++ { - b := quotedString[i] - switch b { - case '"': - if escaping { - result.WriteByte(b) - escaping = false - } else { - closedQuote = true - remainder = strings.TrimSpace(quotedString[i+1:]) - break loop - } - case '\\': - if escaping { - result.WriteByte(b) - escaping = false - } else { - escaping = true - } - default: - result.WriteByte(b) - escaping = false - } - } - - if !closedQuote { - return "", "", errors.New("invalid quoted string: missing closing quote") - } - return result.String(), remainder, nil -} - -func NewWarningHeader(code int, agent, text string) (string, error) { - if code < 0 || code > 999 { - return "", errors.New("code must be between 0 and 999") - } - if len(agent) == 0 { - agent = "-" - } else if !utf8.ValidString(agent) || strings.ContainsAny(agent, `\"`) || hasAnyRunes(agent, unicode.IsSpace, unicode.IsControl) { - return "", errors.New("agent must be valid UTF-8 and must not contain spaces, quotes, backslashes, or control characters") - } - if !utf8.ValidString(text) || hasAnyRunes(text, unicode.IsControl) { - return "", errors.New("text must be valid UTF-8 and must not contain control characters") - } - return fmt.Sprintf("%03d %s %s", code, agent, makeQuotedString(text)), nil -} - -func hasAnyRunes(s string, runeCheckers ...func(rune) bool) bool { - for _, r := range s { - for _, checker := range runeCheckers { - if checker(r) { - return true - } - } - } - return false -} - -func makeQuotedString(s string) string { - result := &bytes.Buffer{} - // opening quote - result.WriteRune('"') - for _, c := range s { - switch c { - case '"', '\\': - // escape " and \ - result.WriteRune('\\') - result.WriteRune(c) - default: - // write everything else as-is - result.WriteRune(c) - } - } - // closing quote - result.WriteRune('"') - return result.String() -} diff --git a/pkg/kutil/net/interface.go b/pkg/kutil/net/interface.go deleted file mode 100644 index 204e223..0000000 --- a/pkg/kutil/net/interface.go +++ /dev/null @@ -1,457 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package net - -import ( - "bufio" - "encoding/hex" - "fmt" - "io" - "net" - "os" - - "strings" - - "k8s.io/klog/v2" -) - -type AddressFamily uint - -const ( - familyIPv4 AddressFamily = 4 - familyIPv6 AddressFamily = 6 -) - -type AddressFamilyPreference []AddressFamily - -var ( - preferIPv4 = AddressFamilyPreference{familyIPv4, familyIPv6} - preferIPv6 = AddressFamilyPreference{familyIPv6, familyIPv4} -) - -const ( - // LoopbackInterfaceName is the default name of the loopback interface - LoopbackInterfaceName = "lo" -) - -const ( - ipv4RouteFile = "/proc/net/route" - ipv6RouteFile = "/proc/net/ipv6_route" -) - -type Route struct { - Interface string - Destination net.IP - Gateway net.IP - Family AddressFamily -} - -type RouteFile struct { - name string - parse func(input io.Reader) ([]Route, error) -} - -// noRoutesError can be returned in case of no routes -type noRoutesError struct { - message string -} - -func (e noRoutesError) Error() string { - return e.message -} - -// IsNoRoutesError checks if an error is of type noRoutesError -func IsNoRoutesError(err error) bool { - if err == nil { - return false - } - switch err.(type) { - case noRoutesError: - return true - default: - return false - } -} - -var ( - v4File = RouteFile{name: ipv4RouteFile, parse: getIPv4DefaultRoutes} - v6File = RouteFile{name: ipv6RouteFile, parse: getIPv6DefaultRoutes} -) - -func (rf RouteFile) extract() ([]Route, error) { - file, err := os.Open(rf.name) - if err != nil { - return nil, err - } - defer file.Close() - return rf.parse(file) -} - -// getIPv4DefaultRoutes obtains the IPv4 routes, and filters out non-default routes. -func getIPv4DefaultRoutes(input io.Reader) ([]Route, error) { - routes := []Route{} - scanner := bufio.NewReader(input) - for { - line, err := scanner.ReadString('\n') - if err == io.EOF { - break - } - //ignore the headers in the route info - if strings.HasPrefix(line, "Iface") { - continue - } - fields := strings.Fields(line) - // Interested in fields: - // 0 - interface name - // 1 - destination address - // 2 - gateway - dest, err := parseIP(fields[1], familyIPv4) - if err != nil { - return nil, err - } - gw, err := parseIP(fields[2], familyIPv4) - if err != nil { - return nil, err - } - if !dest.Equal(net.IPv4zero) { - continue - } - routes = append(routes, Route{ - Interface: fields[0], - Destination: dest, - Gateway: gw, - Family: familyIPv4, - }) - } - return routes, nil -} - -func getIPv6DefaultRoutes(input io.Reader) ([]Route, error) { - routes := []Route{} - scanner := bufio.NewReader(input) - for { - line, err := scanner.ReadString('\n') - if err == io.EOF { - break - } - fields := strings.Fields(line) - // Interested in fields: - // 0 - destination address - // 4 - gateway - // 9 - interface name - dest, err := parseIP(fields[0], familyIPv6) - if err != nil { - return nil, err - } - gw, err := parseIP(fields[4], familyIPv6) - if err != nil { - return nil, err - } - if !dest.Equal(net.IPv6zero) { - continue - } - if gw.Equal(net.IPv6zero) { - continue // loopback - } - routes = append(routes, Route{ - Interface: fields[9], - Destination: dest, - Gateway: gw, - Family: familyIPv6, - }) - } - return routes, nil -} - -// parseIP takes the hex IP address string from route file and converts it -// to a net.IP address. For IPv4, the value must be converted to big endian. -func parseIP(str string, family AddressFamily) (net.IP, error) { - if str == "" { - return nil, fmt.Errorf("input is nil") - } - bytes, err := hex.DecodeString(str) - if err != nil { - return nil, err - } - if family == familyIPv4 { - if len(bytes) != net.IPv4len { - return nil, fmt.Errorf("invalid IPv4 address in route") - } - return net.IP([]byte{bytes[3], bytes[2], bytes[1], bytes[0]}), nil - } - // Must be IPv6 - if len(bytes) != net.IPv6len { - return nil, fmt.Errorf("invalid IPv6 address in route") - } - return net.IP(bytes), nil -} - -func isInterfaceUp(intf *net.Interface) bool { - if intf == nil { - return false - } - if intf.Flags&net.FlagUp != 0 { - klog.V(4).Infof("Interface %v is up", intf.Name) - return true - } - return false -} - -func isLoopbackOrPointToPoint(intf *net.Interface) bool { - return intf.Flags&(net.FlagLoopback|net.FlagPointToPoint) != 0 -} - -// getMatchingGlobalIP returns the first valid global unicast address of the given -// 'family' from the list of 'addrs'. -func getMatchingGlobalIP(addrs []net.Addr, family AddressFamily) (net.IP, error) { - if len(addrs) > 0 { - for i := range addrs { - klog.V(4).Infof("Checking addr %s.", addrs[i].String()) - ip, _, err := net.ParseCIDR(addrs[i].String()) - if err != nil { - return nil, err - } - if memberOf(ip, family) { - if ip.IsGlobalUnicast() { - klog.V(4).Infof("IP found %v", ip) - return ip, nil - } else { - klog.V(4).Infof("Non-global unicast address found %v", ip) - } - } else { - klog.V(4).Infof("%v is not an IPv%d address", ip, int(family)) - } - - } - } - return nil, nil -} - -// getIPFromInterface gets the IPs on an interface and returns a global unicast address, if any. The -// interface must be up, the IP must in the family requested, and the IP must be a global unicast address. -func getIPFromInterface(intfName string, forFamily AddressFamily, nw networkInterfacer) (net.IP, error) { - intf, err := nw.InterfaceByName(intfName) - if err != nil { - return nil, err - } - if isInterfaceUp(intf) { - addrs, err := nw.Addrs(intf) - if err != nil { - return nil, err - } - klog.V(4).Infof("Interface %q has %d addresses :%v.", intfName, len(addrs), addrs) - matchingIP, err := getMatchingGlobalIP(addrs, forFamily) - if err != nil { - return nil, err - } - if matchingIP != nil { - klog.V(4).Infof("Found valid IPv%d address %v for interface %q.", int(forFamily), matchingIP, intfName) - return matchingIP, nil - } - } - return nil, nil -} - -// memberOf tells if the IP is of the desired family. Used for checking interface addresses. -func memberOf(ip net.IP, family AddressFamily) bool { - if ip.To4() != nil { - return family == familyIPv4 - } else { - return family == familyIPv6 - } -} - -// chooseIPFromHostInterfaces looks at all system interfaces, trying to find one that is up that -// has a global unicast address (non-loopback, non-link local, non-point2point), and returns the IP. -// addressFamilies determines whether it prefers IPv4 or IPv6 -func chooseIPFromHostInterfaces(nw networkInterfacer, addressFamilies AddressFamilyPreference) (net.IP, error) { - intfs, err := nw.Interfaces() - if err != nil { - return nil, err - } - if len(intfs) == 0 { - return nil, fmt.Errorf("no interfaces found on host.") - } - for _, family := range addressFamilies { - klog.V(4).Infof("Looking for system interface with a global IPv%d address", uint(family)) - for _, intf := range intfs { - if !isInterfaceUp(&intf) { - klog.V(4).Infof("Skipping: down interface %q", intf.Name) - continue - } - if isLoopbackOrPointToPoint(&intf) { - klog.V(4).Infof("Skipping: LB or P2P interface %q", intf.Name) - continue - } - addrs, err := nw.Addrs(&intf) - if err != nil { - return nil, err - } - if len(addrs) == 0 { - klog.V(4).Infof("Skipping: no addresses on interface %q", intf.Name) - continue - } - for _, addr := range addrs { - ip, _, err := net.ParseCIDR(addr.String()) - if err != nil { - return nil, fmt.Errorf("Unable to parse CIDR for interface %q: %s", intf.Name, err) - } - if !memberOf(ip, family) { - klog.V(4).Infof("Skipping: no address family match for %q on interface %q.", ip, intf.Name) - continue - } - // TODO: Decide if should open up to allow IPv6 LLAs in future. - if !ip.IsGlobalUnicast() { - klog.V(4).Infof("Skipping: non-global address %q on interface %q.", ip, intf.Name) - continue - } - klog.V(4).Infof("Found global unicast address %q on interface %q.", ip, intf.Name) - return ip, nil - } - } - } - return nil, fmt.Errorf("no acceptable interface with global unicast address found on host") -} - -// ChooseHostInterface is a method used fetch an IP for a daemon. -// If there is no routing info file, it will choose a global IP from the system -// interfaces. Otherwise, it will use IPv4 and IPv6 route information to return the -// IP of the interface with a gateway on it (with priority given to IPv4). For a node -// with no internet connection, it returns error. -func ChooseHostInterface() (net.IP, error) { - return chooseHostInterface(preferIPv4) -} - -func chooseHostInterface(addressFamilies AddressFamilyPreference) (net.IP, error) { - var nw networkInterfacer = networkInterface{} - if _, err := os.Stat(ipv4RouteFile); os.IsNotExist(err) { - return chooseIPFromHostInterfaces(nw, addressFamilies) - } - routes, err := getAllDefaultRoutes() - if err != nil { - return nil, err - } - return chooseHostInterfaceFromRoute(routes, nw, addressFamilies) -} - -// networkInterfacer defines an interface for several net library functions. Production -// code will forward to net library functions, and unit tests will override the methods -// for testing purposes. -type networkInterfacer interface { - InterfaceByName(intfName string) (*net.Interface, error) - Addrs(intf *net.Interface) ([]net.Addr, error) - Interfaces() ([]net.Interface, error) -} - -// networkInterface implements the networkInterfacer interface for production code, just -// wrapping the underlying net library function calls. -type networkInterface struct{} - -func (_ networkInterface) InterfaceByName(intfName string) (*net.Interface, error) { - return net.InterfaceByName(intfName) -} - -func (_ networkInterface) Addrs(intf *net.Interface) ([]net.Addr, error) { - return intf.Addrs() -} - -func (_ networkInterface) Interfaces() ([]net.Interface, error) { - return net.Interfaces() -} - -// getAllDefaultRoutes obtains IPv4 and IPv6 default routes on the node. If unable -// to read the IPv4 routing info file, we return an error. If unable to read the IPv6 -// routing info file (which is optional), we'll just use the IPv4 route information. -// Using all the routing info, if no default routes are found, an error is returned. -func getAllDefaultRoutes() ([]Route, error) { - routes, err := v4File.extract() - if err != nil { - return nil, err - } - v6Routes, _ := v6File.extract() - routes = append(routes, v6Routes...) - if len(routes) == 0 { - return nil, noRoutesError{ - message: fmt.Sprintf("no default routes found in %q or %q", v4File.name, v6File.name), - } - } - return routes, nil -} - -// chooseHostInterfaceFromRoute cycles through each default route provided, looking for a -// global IP address from the interface for the route. addressFamilies determines whether it -// prefers IPv4 or IPv6 -func chooseHostInterfaceFromRoute(routes []Route, nw networkInterfacer, addressFamilies AddressFamilyPreference) (net.IP, error) { - for _, family := range addressFamilies { - klog.V(4).Infof("Looking for default routes with IPv%d addresses", uint(family)) - for _, route := range routes { - if route.Family != family { - continue - } - klog.V(4).Infof("Default route transits interface %q", route.Interface) - finalIP, err := getIPFromInterface(route.Interface, family, nw) - if err != nil { - return nil, err - } - if finalIP != nil { - klog.V(4).Infof("Found active IP %v ", finalIP) - return finalIP, nil - } - } - } - klog.V(4).Infof("No active IP found by looking at default routes") - return nil, fmt.Errorf("unable to select an IP from default routes.") -} - -// ResolveBindAddress returns the IP address of a daemon, based on the given bindAddress: -// If bindAddress is unset, it returns the host's default IP, as with ChooseHostInterface(). -// If bindAddress is unspecified or loopback, it returns the default IP of the same -// address family as bindAddress. -// Otherwise, it just returns bindAddress. -func ResolveBindAddress(bindAddress net.IP) (net.IP, error) { - addressFamilies := preferIPv4 - if bindAddress != nil && memberOf(bindAddress, familyIPv6) { - addressFamilies = preferIPv6 - } - - if bindAddress == nil || bindAddress.IsUnspecified() || bindAddress.IsLoopback() { - hostIP, err := chooseHostInterface(addressFamilies) - if err != nil { - return nil, err - } - bindAddress = hostIP - } - return bindAddress, nil -} - -// ChooseBindAddressForInterface choose a global IP for a specific interface, with priority given to IPv4. -// This is required in case of network setups where default routes are present, but network -// interfaces use only link-local addresses (e.g. as described in RFC5549). -// e.g when using BGP to announce a host IP over link-local ip addresses and this ip address is attached to the lo interface. -func ChooseBindAddressForInterface(intfName string) (net.IP, error) { - var nw networkInterfacer = networkInterface{} - for _, family := range preferIPv4 { - ip, err := getIPFromInterface(intfName, family, nw) - if err != nil { - return nil, err - } - if ip != nil { - return ip, nil - } - } - return nil, fmt.Errorf("unable to select an IP from %s network interface", intfName) -} diff --git a/pkg/kutil/net/port_range.go b/pkg/kutil/net/port_range.go deleted file mode 100644 index 7b6eca8..0000000 --- a/pkg/kutil/net/port_range.go +++ /dev/null @@ -1,149 +0,0 @@ -/* -Copyright 2015 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package net - -import ( - "fmt" - "strconv" - "strings" -) - -// PortRange represents a range of TCP/UDP ports. To represent a single port, -// set Size to 1. -type PortRange struct { - Base int - Size int -} - -// Contains tests whether a given port falls within the PortRange. -func (pr *PortRange) Contains(p int) bool { - return (p >= pr.Base) && ((p - pr.Base) < pr.Size) -} - -// String converts the PortRange to a string representation, which can be -// parsed by PortRange.Set or ParsePortRange. -func (pr PortRange) String() string { - if pr.Size == 0 { - return "" - } - return fmt.Sprintf("%d-%d", pr.Base, pr.Base+pr.Size-1) -} - -// Set parses a string of the form "value", "min-max", or "min+offset", inclusive at both ends, and -// sets the PortRange from it. This is part of the flag.Value and pflag.Value -// interfaces. -func (pr *PortRange) Set(value string) error { - const ( - SinglePortNotation = 1 << iota - HyphenNotation - PlusNotation - ) - - value = strings.TrimSpace(value) - hyphenIndex := strings.Index(value, "-") - plusIndex := strings.Index(value, "+") - - if value == "" { - pr.Base = 0 - pr.Size = 0 - return nil - } - - var err error - var low, high int - var notation int - - if plusIndex == -1 && hyphenIndex == -1 { - notation |= SinglePortNotation - } - if hyphenIndex != -1 { - notation |= HyphenNotation - } - if plusIndex != -1 { - notation |= PlusNotation - } - - switch notation { - case SinglePortNotation: - var port int - port, err = strconv.Atoi(value) - if err != nil { - return err - } - low = port - high = port - case HyphenNotation: - low, err = strconv.Atoi(value[:hyphenIndex]) - if err != nil { - return err - } - high, err = strconv.Atoi(value[hyphenIndex+1:]) - if err != nil { - return err - } - case PlusNotation: - var offset int - low, err = strconv.Atoi(value[:plusIndex]) - if err != nil { - return err - } - offset, err = strconv.Atoi(value[plusIndex+1:]) - if err != nil { - return err - } - high = low + offset - default: - return fmt.Errorf("unable to parse port range: %s", value) - } - - if low > 65535 || high > 65535 { - return fmt.Errorf("the port range cannot be greater than 65535: %s", value) - } - - if high < low { - return fmt.Errorf("end port cannot be less than start port: %s", value) - } - - pr.Base = low - pr.Size = 1 + high - low - return nil -} - -// Type returns a descriptive string about this type. This is part of the -// pflag.Value interface. -func (*PortRange) Type() string { - return "portRange" -} - -// ParsePortRange parses a string of the form "min-max", inclusive at both -// ends, and initializs a new PortRange from it. -func ParsePortRange(value string) (*PortRange, error) { - pr := &PortRange{} - err := pr.Set(value) - if err != nil { - return nil, err - } - return pr, nil -} - -func ParsePortRangeOrDie(value string) *PortRange { - pr, err := ParsePortRange(value) - if err != nil { - panic(fmt.Sprintf("couldn't parse port range %q: %v", value, err)) - } - return pr -} diff --git a/pkg/kutil/net/port_split.go b/pkg/kutil/net/port_split.go deleted file mode 100644 index c0fd4e2..0000000 --- a/pkg/kutil/net/port_split.go +++ /dev/null @@ -1,77 +0,0 @@ -/* -Copyright 2015 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package net - -import ( - "strings" - - "k8s.io/apimachinery/pkg/util/sets" -) - -var validSchemes = sets.NewString("http", "https", "") - -// SplitSchemeNamePort takes a string of the following forms: -// * "", returns "", "","", true -// * ":", returns "", "","",true -// * "::", returns "","","",true -// -// Name must be non-empty or valid will be returned false. -// Scheme must be "http" or "https" if specified -// Port is returned as a string, and it is not required to be numeric (could be -// used for a named port, for example). -func SplitSchemeNamePort(id string) (scheme, name, port string, valid bool) { - parts := strings.Split(id, ":") - switch len(parts) { - case 1: - name = parts[0] - case 2: - name = parts[0] - port = parts[1] - case 3: - scheme = parts[0] - name = parts[1] - port = parts[2] - default: - return "", "", "", false - } - - if len(name) > 0 && validSchemes.Has(scheme) { - return scheme, name, port, true - } else { - return "", "", "", false - } -} - -// JoinSchemeNamePort returns a string that specifies the scheme, name, and port: -// * "" -// * ":" -// * "::" -// None of the parameters may contain a ':' character -// Name is required -// Scheme must be "", "http", or "https" -func JoinSchemeNamePort(scheme, name, port string) string { - if len(scheme) > 0 { - // Must include three segments to specify scheme - return scheme + ":" + name + ":" + port - } - if len(port) > 0 { - // Must include two segments to specify port - return name + ":" + port - } - // Return name alone - return name -} diff --git a/pkg/kutil/net/util.go b/pkg/kutil/net/util.go deleted file mode 100644 index 5950087..0000000 --- a/pkg/kutil/net/util.go +++ /dev/null @@ -1,56 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package net - -import ( - "errors" - "net" - "reflect" - "syscall" -) - -// IPNetEqual checks if the two input IPNets are representing the same subnet. -// For example, -// 10.0.0.1/24 and 10.0.0.0/24 are the same subnet. -// 10.0.0.1/24 and 10.0.0.0/25 are not the same subnet. -func IPNetEqual(ipnet1, ipnet2 *net.IPNet) bool { - if ipnet1 == nil || ipnet2 == nil { - return false - } - if reflect.DeepEqual(ipnet1.Mask, ipnet2.Mask) && ipnet1.Contains(ipnet2.IP) && ipnet2.Contains(ipnet1.IP) { - return true - } - return false -} - -// Returns if the given err is "connection reset by peer" error. -func IsConnectionReset(err error) bool { - var errno syscall.Errno - if errors.As(err, &errno) { - return errno == syscall.ECONNRESET - } - return false -} - -// Returns if the given err is "connection refused" error -func IsConnectionRefused(err error) bool { - var errno syscall.Errno - if errors.As(err, &errno) { - return errno == syscall.ECONNREFUSED - } - return false -} diff --git a/pkg/kutil/runtime/runtime.go b/pkg/kutil/runtime/runtime.go deleted file mode 100644 index 7b53f13..0000000 --- a/pkg/kutil/runtime/runtime.go +++ /dev/null @@ -1,172 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package runtime - -import ( - "fmt" - "net/http" - "os" - "runtime" - "sync" - "time" -) - -var ( - // ReallyCrash controls the behavior of HandleCrash and now defaults - // true. It's still exposed so components can optionally set to false - // to restore prior behavior. - ReallyCrash = true -) - -// PanicHandlers is a list of functions which will be invoked when a panic happens. -var PanicHandlers = []func(interface{}){logPanic} - -// HandleCrash simply catches a crash and logs an error. Meant to be called via -// defer. Additional context-specific handlers can be provided, and will be -// called in case of panic. HandleCrash actually crashes, after calling the -// handlers and logging the panic message. -// -// E.g., you can provide one or more additional handlers for something like shutting down go routines gracefully. -func HandleCrash(additionalHandlers ...func(interface{})) { - if r := recover(); r != nil { - for _, fn := range PanicHandlers { - fn(r) - } - for _, fn := range additionalHandlers { - fn(r) - } - if ReallyCrash { - // Actually proceed to panic. - panic(r) - } - } -} - -// logPanic logs the caller tree when a panic occurs (except in the special case of http.ErrAbortHandler). -func logPanic(r interface{}) { - if r == http.ErrAbortHandler { - // honor the http.ErrAbortHandler sentinel panic value: - // ErrAbortHandler is a sentinel panic value to abort a handler. - // While any panic from ServeHTTP aborts the response to the client, - // panicking with ErrAbortHandler also suppresses logging of a stack trace to the server's error log. - return - } - - // Same as stdlib http server code. Manually allocate stack trace buffer size - // to prevent excessively large logs - const size = 64 << 10 - stacktrace := make([]byte, size) - stacktrace = stacktrace[:runtime.Stack(stacktrace, false)] - if _, ok := r.(string); ok { - fmt.Fprintf(os.Stderr,"Observed a panic: %s\n%s", r, stacktrace) - } else { - fmt.Fprintf(os.Stderr,"Observed a panic: %#v (%v)\n%s", r, r, stacktrace) - } -} - -// ErrorHandlers is a list of functions which will be invoked when an unreturnable -// error occurs. -// TODO(lavalamp): for testability, this and the below HandleError function -// should be packaged up into a testable and reusable object. -var ErrorHandlers = []func(error){ - logError, - (&rudimentaryErrorBackoff{ - lastErrorTime: time.Now(), - // 1ms was the number folks were able to stomach as a global rate limit. - // If you need to log errors more than 1000 times a second you - // should probably consider fixing your code instead. :) - minPeriod: time.Millisecond, - }).OnError, -} - -// HandlerError is a method to invoke when a non-user facing piece of code cannot -// return an error and needs to indicate it has been ignored. Invoking this method -// is preferable to logging the error - the default behavior is to log but the -// errors may be sent to a remote server for analysis. -func HandleError(err error) { - // this is sometimes called with a nil error. We probably shouldn't fail and should do nothing instead - if err == nil { - return - } - - for _, fn := range ErrorHandlers { - fn(err) - } -} - -// logError prints an error with the call stack of the location it was reported -func logError(err error) { - fmt.Fprint(os.Stderr, err) -} - -type rudimentaryErrorBackoff struct { - minPeriod time.Duration // immutable - // TODO(lavalamp): use the clock for testability. Need to move that - // package for that to be accessible here. - lastErrorTimeLock sync.Mutex - lastErrorTime time.Time -} - -// OnError will block if it is called more often than the embedded period time. -// This will prevent overly tight hot error loops. -func (r *rudimentaryErrorBackoff) OnError(error) { - r.lastErrorTimeLock.Lock() - defer r.lastErrorTimeLock.Unlock() - d := time.Since(r.lastErrorTime) - if d < r.minPeriod { - // If the time moves backwards for any reason, do nothing - time.Sleep(r.minPeriod - d) - } - r.lastErrorTime = time.Now() -} - -// GetCaller returns the caller of the function that calls it. -func GetCaller() string { - var pc [1]uintptr - runtime.Callers(3, pc[:]) - f := runtime.FuncForPC(pc[0]) - if f == nil { - return fmt.Sprintf("Unable to find caller") - } - return f.Name() -} - -// RecoverFromPanic replaces the specified error with an error containing the -// original error, and the call tree when a panic occurs. This enables error -// handlers to handle errors and panics the same way. -func RecoverFromPanic(err *error) { - if r := recover(); r != nil { - // Same as stdlib http server code. Manually allocate stack trace buffer size - // to prevent excessively large logs - const size = 64 << 10 - stacktrace := make([]byte, size) - stacktrace = stacktrace[:runtime.Stack(stacktrace, false)] - - *err = fmt.Errorf( - "recovered from panic %q. (err=%v) Call stack:\n%s", - r, - *err, - stacktrace) - } -} - -// Must panics on non-nil errors. Useful to handling programmer level errors. -func Must(err error) { - if err != nil { - panic(err) - } -} diff --git a/pkg/kutil/sets/byte.go b/pkg/kutil/sets/byte.go deleted file mode 100644 index 9bfa85d..0000000 --- a/pkg/kutil/sets/byte.go +++ /dev/null @@ -1,205 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by set-gen. DO NOT EDIT. - -package sets - -import ( - "reflect" - "sort" -) - -// sets.Byte is a set of bytes, implemented via map[byte]struct{} for minimal memory consumption. -type Byte map[byte]Empty - -// NewByte creates a Byte from a list of values. -func NewByte(items ...byte) Byte { - ss := Byte{} - ss.Insert(items...) - return ss -} - -// ByteKeySet creates a Byte from a keys of a map[byte](? extends interface{}). -// If the value passed in is not actually a map, this will panic. -func ByteKeySet(theMap interface{}) Byte { - v := reflect.ValueOf(theMap) - ret := Byte{} - - for _, keyValue := range v.MapKeys() { - ret.Insert(keyValue.Interface().(byte)) - } - return ret -} - -// Insert adds items to the set. -func (s Byte) Insert(items ...byte) Byte { - for _, item := range items { - s[item] = Empty{} - } - return s -} - -// Delete removes all items from the set. -func (s Byte) Delete(items ...byte) Byte { - for _, item := range items { - delete(s, item) - } - return s -} - -// Has returns true if and only if item is contained in the set. -func (s Byte) Has(item byte) bool { - _, contained := s[item] - return contained -} - -// HasAll returns true if and only if all items are contained in the set. -func (s Byte) HasAll(items ...byte) bool { - for _, item := range items { - if !s.Has(item) { - return false - } - } - return true -} - -// HasAny returns true if any items are contained in the set. -func (s Byte) HasAny(items ...byte) bool { - for _, item := range items { - if s.Has(item) { - return true - } - } - return false -} - -// Difference returns a set of objects that are not in s2 -// For example: -// s1 = {a1, a2, a3} -// s2 = {a1, a2, a4, a5} -// s1.Difference(s2) = {a3} -// s2.Difference(s1) = {a4, a5} -func (s Byte) Difference(s2 Byte) Byte { - result := NewByte() - for key := range s { - if !s2.Has(key) { - result.Insert(key) - } - } - return result -} - -// Union returns a new set which includes items in either s1 or s2. -// For example: -// s1 = {a1, a2} -// s2 = {a3, a4} -// s1.Union(s2) = {a1, a2, a3, a4} -// s2.Union(s1) = {a1, a2, a3, a4} -func (s1 Byte) Union(s2 Byte) Byte { - result := NewByte() - for key := range s1 { - result.Insert(key) - } - for key := range s2 { - result.Insert(key) - } - return result -} - -// Intersection returns a new set which includes the item in BOTH s1 and s2 -// For example: -// s1 = {a1, a2} -// s2 = {a2, a3} -// s1.Intersection(s2) = {a2} -func (s1 Byte) Intersection(s2 Byte) Byte { - var walk, other Byte - result := NewByte() - if s1.Len() < s2.Len() { - walk = s1 - other = s2 - } else { - walk = s2 - other = s1 - } - for key := range walk { - if other.Has(key) { - result.Insert(key) - } - } - return result -} - -// IsSuperset returns true if and only if s1 is a superset of s2. -func (s1 Byte) IsSuperset(s2 Byte) bool { - for item := range s2 { - if !s1.Has(item) { - return false - } - } - return true -} - -// Equal returns true if and only if s1 is equal (as a set) to s2. -// Two sets are equal if their membership is identical. -// (In practice, this means same elements, order doesn't matter) -func (s1 Byte) Equal(s2 Byte) bool { - return len(s1) == len(s2) && s1.IsSuperset(s2) -} - -type sortableSliceOfByte []byte - -func (s sortableSliceOfByte) Len() int { return len(s) } -func (s sortableSliceOfByte) Less(i, j int) bool { return lessByte(s[i], s[j]) } -func (s sortableSliceOfByte) Swap(i, j int) { s[i], s[j] = s[j], s[i] } - -// List returns the contents as a sorted byte slice. -func (s Byte) List() []byte { - res := make(sortableSliceOfByte, 0, len(s)) - for key := range s { - res = append(res, key) - } - sort.Sort(res) - return []byte(res) -} - -// UnsortedList returns the slice with contents in random order. -func (s Byte) UnsortedList() []byte { - res := make([]byte, 0, len(s)) - for key := range s { - res = append(res, key) - } - return res -} - -// Returns a single element from the set. -func (s Byte) PopAny() (byte, bool) { - for key := range s { - s.Delete(key) - return key, true - } - var zeroValue byte - return zeroValue, false -} - -// Len returns the size of the set. -func (s Byte) Len() int { - return len(s) -} - -func lessByte(lhs, rhs byte) bool { - return lhs < rhs -} diff --git a/pkg/kutil/sets/doc.go b/pkg/kutil/sets/doc.go deleted file mode 100644 index b152a0b..0000000 --- a/pkg/kutil/sets/doc.go +++ /dev/null @@ -1,20 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by set-gen. DO NOT EDIT. - -// Package sets has auto-generated set types. -package sets diff --git a/pkg/kutil/sets/empty.go b/pkg/kutil/sets/empty.go deleted file mode 100644 index e11e622..0000000 --- a/pkg/kutil/sets/empty.go +++ /dev/null @@ -1,23 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by set-gen. DO NOT EDIT. - -package sets - -// Empty is public since it is used by some internal API objects for conversions between external -// string arrays and internal sets, and conversion logic requires public types today. -type Empty struct{} diff --git a/pkg/kutil/sets/int.go b/pkg/kutil/sets/int.go deleted file mode 100644 index 88bd709..0000000 --- a/pkg/kutil/sets/int.go +++ /dev/null @@ -1,205 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by set-gen. DO NOT EDIT. - -package sets - -import ( - "reflect" - "sort" -) - -// sets.Int is a set of ints, implemented via map[int]struct{} for minimal memory consumption. -type Int map[int]Empty - -// NewInt creates a Int from a list of values. -func NewInt(items ...int) Int { - ss := Int{} - ss.Insert(items...) - return ss -} - -// IntKeySet creates a Int from a keys of a map[int](? extends interface{}). -// If the value passed in is not actually a map, this will panic. -func IntKeySet(theMap interface{}) Int { - v := reflect.ValueOf(theMap) - ret := Int{} - - for _, keyValue := range v.MapKeys() { - ret.Insert(keyValue.Interface().(int)) - } - return ret -} - -// Insert adds items to the set. -func (s Int) Insert(items ...int) Int { - for _, item := range items { - s[item] = Empty{} - } - return s -} - -// Delete removes all items from the set. -func (s Int) Delete(items ...int) Int { - for _, item := range items { - delete(s, item) - } - return s -} - -// Has returns true if and only if item is contained in the set. -func (s Int) Has(item int) bool { - _, contained := s[item] - return contained -} - -// HasAll returns true if and only if all items are contained in the set. -func (s Int) HasAll(items ...int) bool { - for _, item := range items { - if !s.Has(item) { - return false - } - } - return true -} - -// HasAny returns true if any items are contained in the set. -func (s Int) HasAny(items ...int) bool { - for _, item := range items { - if s.Has(item) { - return true - } - } - return false -} - -// Difference returns a set of objects that are not in s2 -// For example: -// s1 = {a1, a2, a3} -// s2 = {a1, a2, a4, a5} -// s1.Difference(s2) = {a3} -// s2.Difference(s1) = {a4, a5} -func (s Int) Difference(s2 Int) Int { - result := NewInt() - for key := range s { - if !s2.Has(key) { - result.Insert(key) - } - } - return result -} - -// Union returns a new set which includes items in either s1 or s2. -// For example: -// s1 = {a1, a2} -// s2 = {a3, a4} -// s1.Union(s2) = {a1, a2, a3, a4} -// s2.Union(s1) = {a1, a2, a3, a4} -func (s1 Int) Union(s2 Int) Int { - result := NewInt() - for key := range s1 { - result.Insert(key) - } - for key := range s2 { - result.Insert(key) - } - return result -} - -// Intersection returns a new set which includes the item in BOTH s1 and s2 -// For example: -// s1 = {a1, a2} -// s2 = {a2, a3} -// s1.Intersection(s2) = {a2} -func (s1 Int) Intersection(s2 Int) Int { - var walk, other Int - result := NewInt() - if s1.Len() < s2.Len() { - walk = s1 - other = s2 - } else { - walk = s2 - other = s1 - } - for key := range walk { - if other.Has(key) { - result.Insert(key) - } - } - return result -} - -// IsSuperset returns true if and only if s1 is a superset of s2. -func (s1 Int) IsSuperset(s2 Int) bool { - for item := range s2 { - if !s1.Has(item) { - return false - } - } - return true -} - -// Equal returns true if and only if s1 is equal (as a set) to s2. -// Two sets are equal if their membership is identical. -// (In practice, this means same elements, order doesn't matter) -func (s1 Int) Equal(s2 Int) bool { - return len(s1) == len(s2) && s1.IsSuperset(s2) -} - -type sortableSliceOfInt []int - -func (s sortableSliceOfInt) Len() int { return len(s) } -func (s sortableSliceOfInt) Less(i, j int) bool { return lessInt(s[i], s[j]) } -func (s sortableSliceOfInt) Swap(i, j int) { s[i], s[j] = s[j], s[i] } - -// List returns the contents as a sorted int slice. -func (s Int) List() []int { - res := make(sortableSliceOfInt, 0, len(s)) - for key := range s { - res = append(res, key) - } - sort.Sort(res) - return []int(res) -} - -// UnsortedList returns the slice with contents in random order. -func (s Int) UnsortedList() []int { - res := make([]int, 0, len(s)) - for key := range s { - res = append(res, key) - } - return res -} - -// Returns a single element from the set. -func (s Int) PopAny() (int, bool) { - for key := range s { - s.Delete(key) - return key, true - } - var zeroValue int - return zeroValue, false -} - -// Len returns the size of the set. -func (s Int) Len() int { - return len(s) -} - -func lessInt(lhs, rhs int) bool { - return lhs < rhs -} diff --git a/pkg/kutil/sets/int32.go b/pkg/kutil/sets/int32.go deleted file mode 100644 index 96a4855..0000000 --- a/pkg/kutil/sets/int32.go +++ /dev/null @@ -1,205 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by set-gen. DO NOT EDIT. - -package sets - -import ( - "reflect" - "sort" -) - -// sets.Int32 is a set of int32s, implemented via map[int32]struct{} for minimal memory consumption. -type Int32 map[int32]Empty - -// NewInt32 creates a Int32 from a list of values. -func NewInt32(items ...int32) Int32 { - ss := Int32{} - ss.Insert(items...) - return ss -} - -// Int32KeySet creates a Int32 from a keys of a map[int32](? extends interface{}). -// If the value passed in is not actually a map, this will panic. -func Int32KeySet(theMap interface{}) Int32 { - v := reflect.ValueOf(theMap) - ret := Int32{} - - for _, keyValue := range v.MapKeys() { - ret.Insert(keyValue.Interface().(int32)) - } - return ret -} - -// Insert adds items to the set. -func (s Int32) Insert(items ...int32) Int32 { - for _, item := range items { - s[item] = Empty{} - } - return s -} - -// Delete removes all items from the set. -func (s Int32) Delete(items ...int32) Int32 { - for _, item := range items { - delete(s, item) - } - return s -} - -// Has returns true if and only if item is contained in the set. -func (s Int32) Has(item int32) bool { - _, contained := s[item] - return contained -} - -// HasAll returns true if and only if all items are contained in the set. -func (s Int32) HasAll(items ...int32) bool { - for _, item := range items { - if !s.Has(item) { - return false - } - } - return true -} - -// HasAny returns true if any items are contained in the set. -func (s Int32) HasAny(items ...int32) bool { - for _, item := range items { - if s.Has(item) { - return true - } - } - return false -} - -// Difference returns a set of objects that are not in s2 -// For example: -// s1 = {a1, a2, a3} -// s2 = {a1, a2, a4, a5} -// s1.Difference(s2) = {a3} -// s2.Difference(s1) = {a4, a5} -func (s Int32) Difference(s2 Int32) Int32 { - result := NewInt32() - for key := range s { - if !s2.Has(key) { - result.Insert(key) - } - } - return result -} - -// Union returns a new set which includes items in either s1 or s2. -// For example: -// s1 = {a1, a2} -// s2 = {a3, a4} -// s1.Union(s2) = {a1, a2, a3, a4} -// s2.Union(s1) = {a1, a2, a3, a4} -func (s1 Int32) Union(s2 Int32) Int32 { - result := NewInt32() - for key := range s1 { - result.Insert(key) - } - for key := range s2 { - result.Insert(key) - } - return result -} - -// Intersection returns a new set which includes the item in BOTH s1 and s2 -// For example: -// s1 = {a1, a2} -// s2 = {a2, a3} -// s1.Intersection(s2) = {a2} -func (s1 Int32) Intersection(s2 Int32) Int32 { - var walk, other Int32 - result := NewInt32() - if s1.Len() < s2.Len() { - walk = s1 - other = s2 - } else { - walk = s2 - other = s1 - } - for key := range walk { - if other.Has(key) { - result.Insert(key) - } - } - return result -} - -// IsSuperset returns true if and only if s1 is a superset of s2. -func (s1 Int32) IsSuperset(s2 Int32) bool { - for item := range s2 { - if !s1.Has(item) { - return false - } - } - return true -} - -// Equal returns true if and only if s1 is equal (as a set) to s2. -// Two sets are equal if their membership is identical. -// (In practice, this means same elements, order doesn't matter) -func (s1 Int32) Equal(s2 Int32) bool { - return len(s1) == len(s2) && s1.IsSuperset(s2) -} - -type sortableSliceOfInt32 []int32 - -func (s sortableSliceOfInt32) Len() int { return len(s) } -func (s sortableSliceOfInt32) Less(i, j int) bool { return lessInt32(s[i], s[j]) } -func (s sortableSliceOfInt32) Swap(i, j int) { s[i], s[j] = s[j], s[i] } - -// List returns the contents as a sorted int32 slice. -func (s Int32) List() []int32 { - res := make(sortableSliceOfInt32, 0, len(s)) - for key := range s { - res = append(res, key) - } - sort.Sort(res) - return []int32(res) -} - -// UnsortedList returns the slice with contents in random order. -func (s Int32) UnsortedList() []int32 { - res := make([]int32, 0, len(s)) - for key := range s { - res = append(res, key) - } - return res -} - -// Returns a single element from the set. -func (s Int32) PopAny() (int32, bool) { - for key := range s { - s.Delete(key) - return key, true - } - var zeroValue int32 - return zeroValue, false -} - -// Len returns the size of the set. -func (s Int32) Len() int { - return len(s) -} - -func lessInt32(lhs, rhs int32) bool { - return lhs < rhs -} diff --git a/pkg/kutil/sets/int64.go b/pkg/kutil/sets/int64.go deleted file mode 100644 index b375a1b..0000000 --- a/pkg/kutil/sets/int64.go +++ /dev/null @@ -1,205 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by set-gen. DO NOT EDIT. - -package sets - -import ( - "reflect" - "sort" -) - -// sets.Int64 is a set of int64s, implemented via map[int64]struct{} for minimal memory consumption. -type Int64 map[int64]Empty - -// NewInt64 creates a Int64 from a list of values. -func NewInt64(items ...int64) Int64 { - ss := Int64{} - ss.Insert(items...) - return ss -} - -// Int64KeySet creates a Int64 from a keys of a map[int64](? extends interface{}). -// If the value passed in is not actually a map, this will panic. -func Int64KeySet(theMap interface{}) Int64 { - v := reflect.ValueOf(theMap) - ret := Int64{} - - for _, keyValue := range v.MapKeys() { - ret.Insert(keyValue.Interface().(int64)) - } - return ret -} - -// Insert adds items to the set. -func (s Int64) Insert(items ...int64) Int64 { - for _, item := range items { - s[item] = Empty{} - } - return s -} - -// Delete removes all items from the set. -func (s Int64) Delete(items ...int64) Int64 { - for _, item := range items { - delete(s, item) - } - return s -} - -// Has returns true if and only if item is contained in the set. -func (s Int64) Has(item int64) bool { - _, contained := s[item] - return contained -} - -// HasAll returns true if and only if all items are contained in the set. -func (s Int64) HasAll(items ...int64) bool { - for _, item := range items { - if !s.Has(item) { - return false - } - } - return true -} - -// HasAny returns true if any items are contained in the set. -func (s Int64) HasAny(items ...int64) bool { - for _, item := range items { - if s.Has(item) { - return true - } - } - return false -} - -// Difference returns a set of objects that are not in s2 -// For example: -// s1 = {a1, a2, a3} -// s2 = {a1, a2, a4, a5} -// s1.Difference(s2) = {a3} -// s2.Difference(s1) = {a4, a5} -func (s Int64) Difference(s2 Int64) Int64 { - result := NewInt64() - for key := range s { - if !s2.Has(key) { - result.Insert(key) - } - } - return result -} - -// Union returns a new set which includes items in either s1 or s2. -// For example: -// s1 = {a1, a2} -// s2 = {a3, a4} -// s1.Union(s2) = {a1, a2, a3, a4} -// s2.Union(s1) = {a1, a2, a3, a4} -func (s1 Int64) Union(s2 Int64) Int64 { - result := NewInt64() - for key := range s1 { - result.Insert(key) - } - for key := range s2 { - result.Insert(key) - } - return result -} - -// Intersection returns a new set which includes the item in BOTH s1 and s2 -// For example: -// s1 = {a1, a2} -// s2 = {a2, a3} -// s1.Intersection(s2) = {a2} -func (s1 Int64) Intersection(s2 Int64) Int64 { - var walk, other Int64 - result := NewInt64() - if s1.Len() < s2.Len() { - walk = s1 - other = s2 - } else { - walk = s2 - other = s1 - } - for key := range walk { - if other.Has(key) { - result.Insert(key) - } - } - return result -} - -// IsSuperset returns true if and only if s1 is a superset of s2. -func (s1 Int64) IsSuperset(s2 Int64) bool { - for item := range s2 { - if !s1.Has(item) { - return false - } - } - return true -} - -// Equal returns true if and only if s1 is equal (as a set) to s2. -// Two sets are equal if their membership is identical. -// (In practice, this means same elements, order doesn't matter) -func (s1 Int64) Equal(s2 Int64) bool { - return len(s1) == len(s2) && s1.IsSuperset(s2) -} - -type sortableSliceOfInt64 []int64 - -func (s sortableSliceOfInt64) Len() int { return len(s) } -func (s sortableSliceOfInt64) Less(i, j int) bool { return lessInt64(s[i], s[j]) } -func (s sortableSliceOfInt64) Swap(i, j int) { s[i], s[j] = s[j], s[i] } - -// List returns the contents as a sorted int64 slice. -func (s Int64) List() []int64 { - res := make(sortableSliceOfInt64, 0, len(s)) - for key := range s { - res = append(res, key) - } - sort.Sort(res) - return []int64(res) -} - -// UnsortedList returns the slice with contents in random order. -func (s Int64) UnsortedList() []int64 { - res := make([]int64, 0, len(s)) - for key := range s { - res = append(res, key) - } - return res -} - -// Returns a single element from the set. -func (s Int64) PopAny() (int64, bool) { - for key := range s { - s.Delete(key) - return key, true - } - var zeroValue int64 - return zeroValue, false -} - -// Len returns the size of the set. -func (s Int64) Len() int { - return len(s) -} - -func lessInt64(lhs, rhs int64) bool { - return lhs < rhs -} diff --git a/pkg/kutil/sets/string.go b/pkg/kutil/sets/string.go deleted file mode 100644 index e6f37db..0000000 --- a/pkg/kutil/sets/string.go +++ /dev/null @@ -1,205 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by set-gen. DO NOT EDIT. - -package sets - -import ( - "reflect" - "sort" -) - -// sets.String is a set of strings, implemented via map[string]struct{} for minimal memory consumption. -type String map[string]Empty - -// NewString creates a String from a list of values. -func NewString(items ...string) String { - ss := String{} - ss.Insert(items...) - return ss -} - -// StringKeySet creates a String from a keys of a map[string](? extends interface{}). -// If the value passed in is not actually a map, this will panic. -func StringKeySet(theMap interface{}) String { - v := reflect.ValueOf(theMap) - ret := String{} - - for _, keyValue := range v.MapKeys() { - ret.Insert(keyValue.Interface().(string)) - } - return ret -} - -// Insert adds items to the set. -func (s String) Insert(items ...string) String { - for _, item := range items { - s[item] = Empty{} - } - return s -} - -// Delete removes all items from the set. -func (s String) Delete(items ...string) String { - for _, item := range items { - delete(s, item) - } - return s -} - -// Has returns true if and only if item is contained in the set. -func (s String) Has(item string) bool { - _, contained := s[item] - return contained -} - -// HasAll returns true if and only if all items are contained in the set. -func (s String) HasAll(items ...string) bool { - for _, item := range items { - if !s.Has(item) { - return false - } - } - return true -} - -// HasAny returns true if any items are contained in the set. -func (s String) HasAny(items ...string) bool { - for _, item := range items { - if s.Has(item) { - return true - } - } - return false -} - -// Difference returns a set of objects that are not in s2 -// For example: -// s1 = {a1, a2, a3} -// s2 = {a1, a2, a4, a5} -// s1.Difference(s2) = {a3} -// s2.Difference(s1) = {a4, a5} -func (s String) Difference(s2 String) String { - result := NewString() - for key := range s { - if !s2.Has(key) { - result.Insert(key) - } - } - return result -} - -// Union returns a new set which includes items in either s1 or s2. -// For example: -// s1 = {a1, a2} -// s2 = {a3, a4} -// s1.Union(s2) = {a1, a2, a3, a4} -// s2.Union(s1) = {a1, a2, a3, a4} -func (s1 String) Union(s2 String) String { - result := NewString() - for key := range s1 { - result.Insert(key) - } - for key := range s2 { - result.Insert(key) - } - return result -} - -// Intersection returns a new set which includes the item in BOTH s1 and s2 -// For example: -// s1 = {a1, a2} -// s2 = {a2, a3} -// s1.Intersection(s2) = {a2} -func (s1 String) Intersection(s2 String) String { - var walk, other String - result := NewString() - if s1.Len() < s2.Len() { - walk = s1 - other = s2 - } else { - walk = s2 - other = s1 - } - for key := range walk { - if other.Has(key) { - result.Insert(key) - } - } - return result -} - -// IsSuperset returns true if and only if s1 is a superset of s2. -func (s1 String) IsSuperset(s2 String) bool { - for item := range s2 { - if !s1.Has(item) { - return false - } - } - return true -} - -// Equal returns true if and only if s1 is equal (as a set) to s2. -// Two sets are equal if their membership is identical. -// (In practice, this means same elements, order doesn't matter) -func (s1 String) Equal(s2 String) bool { - return len(s1) == len(s2) && s1.IsSuperset(s2) -} - -type sortableSliceOfString []string - -func (s sortableSliceOfString) Len() int { return len(s) } -func (s sortableSliceOfString) Less(i, j int) bool { return lessString(s[i], s[j]) } -func (s sortableSliceOfString) Swap(i, j int) { s[i], s[j] = s[j], s[i] } - -// List returns the contents as a sorted string slice. -func (s String) List() []string { - res := make(sortableSliceOfString, 0, len(s)) - for key := range s { - res = append(res, key) - } - sort.Sort(res) - return []string(res) -} - -// UnsortedList returns the slice with contents in random order. -func (s String) UnsortedList() []string { - res := make([]string, 0, len(s)) - for key := range s { - res = append(res, key) - } - return res -} - -// Returns a single element from the set. -func (s String) PopAny() (string, bool) { - for key := range s { - s.Delete(key) - return key, true - } - var zeroValue string - return zeroValue, false -} - -// Len returns the size of the set. -func (s String) Len() int { - return len(s) -} - -func lessString(lhs, rhs string) bool { - return lhs < rhs -} diff --git a/pkg/kutil/strategicpatch/OWNERS b/pkg/kutil/strategicpatch/OWNERS deleted file mode 100644 index cfee199..0000000 --- a/pkg/kutil/strategicpatch/OWNERS +++ /dev/null @@ -1,8 +0,0 @@ -# See the OWNERS docs at https://go.k8s.io/owners - -approvers: -- pwittrock -- mengqiy -reviewers: -- mengqiy -- apelisse diff --git a/pkg/kutil/strategicpatch/errors.go b/pkg/kutil/strategicpatch/errors.go deleted file mode 100644 index ab66d04..0000000 --- a/pkg/kutil/strategicpatch/errors.go +++ /dev/null @@ -1,49 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package strategicpatch - -import ( - "fmt" -) - -type LookupPatchMetaError struct { - Path string - Err error -} - -func (e LookupPatchMetaError) Error() string { - return fmt.Sprintf("LookupPatchMetaError(%s): %v", e.Path, e.Err) -} - -type FieldNotFoundError struct { - Path string - Field string -} - -func (e FieldNotFoundError) Error() string { - return fmt.Sprintf("unable to find api field %q in %s", e.Field, e.Path) -} - -type InvalidTypeError struct { - Path string - Expected string - Actual string -} - -func (e InvalidTypeError) Error() string { - return fmt.Sprintf("invalid type for %s: got %q, expected %q", e.Path, e.Actual, e.Expected) -} diff --git a/pkg/kutil/strategicpatch/meta.go b/pkg/kutil/strategicpatch/meta.go deleted file mode 100644 index c31de15..0000000 --- a/pkg/kutil/strategicpatch/meta.go +++ /dev/null @@ -1,194 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package strategicpatch - -import ( - "errors" - "fmt" - "reflect" - - "k8s.io/apimachinery/pkg/util/mergepatch" - forkedjson "k8s.io/apimachinery/third_party/forked/golang/json" - openapi "k8s.io/kube-openapi/pkg/util/proto" -) - -type PatchMeta struct { - patchStrategies []string - patchMergeKey string -} - -func (pm PatchMeta) GetPatchStrategies() []string { - if pm.patchStrategies == nil { - return []string{} - } - return pm.patchStrategies -} - -func (pm PatchMeta) SetPatchStrategies(ps []string) { - pm.patchStrategies = ps -} - -func (pm PatchMeta) GetPatchMergeKey() string { - return pm.patchMergeKey -} - -func (pm PatchMeta) SetPatchMergeKey(pmk string) { - pm.patchMergeKey = pmk -} - -type LookupPatchMeta interface { - // LookupPatchMetadataForStruct gets subschema and the patch metadata (e.g. patch strategy and merge key) for map. - LookupPatchMetadataForStruct(key string) (LookupPatchMeta, PatchMeta, error) - // LookupPatchMetadataForSlice get subschema and the patch metadata for slice. - LookupPatchMetadataForSlice(key string) (LookupPatchMeta, PatchMeta, error) - // Get the type name of the field - Name() string -} - -type PatchMetaFromStruct struct { - T reflect.Type -} - -func NewPatchMetaFromStruct(dataStruct interface{}) (PatchMetaFromStruct, error) { - t, err := getTagStructType(dataStruct) - return PatchMetaFromStruct{T: t}, err -} - -var _ LookupPatchMeta = PatchMetaFromStruct{} - -func (s PatchMetaFromStruct) LookupPatchMetadataForStruct(key string) (LookupPatchMeta, PatchMeta, error) { - fieldType, fieldPatchStrategies, fieldPatchMergeKey, err := forkedjson.LookupPatchMetadataForStruct(s.T, key) - if err != nil { - return nil, PatchMeta{}, err - } - - return PatchMetaFromStruct{T: fieldType}, - PatchMeta{ - patchStrategies: fieldPatchStrategies, - patchMergeKey: fieldPatchMergeKey, - }, nil -} - -func (s PatchMetaFromStruct) LookupPatchMetadataForSlice(key string) (LookupPatchMeta, PatchMeta, error) { - subschema, patchMeta, err := s.LookupPatchMetadataForStruct(key) - if err != nil { - return nil, PatchMeta{}, err - } - elemPatchMetaFromStruct := subschema.(PatchMetaFromStruct) - t := elemPatchMetaFromStruct.T - - var elemType reflect.Type - switch t.Kind() { - // If t is an array or a slice, get the element type. - // If element is still an array or a slice, return an error. - // Otherwise, return element type. - case reflect.Array, reflect.Slice: - elemType = t.Elem() - if elemType.Kind() == reflect.Array || elemType.Kind() == reflect.Slice { - return nil, PatchMeta{}, errors.New("unexpected slice of slice") - } - // If t is an pointer, get the underlying element. - // If the underlying element is neither an array nor a slice, the pointer is pointing to a slice, - // e.g. https://github.com/kubernetes/kubernetes/blob/bc22e206c79282487ea0bf5696d5ccec7e839a76/staging/src/k8s.io/apimachinery/pkg/util/strategicpatch/patch_test.go#L2782-L2822 - // If the underlying element is either an array or a slice, return its element type. - case reflect.Ptr: - t = t.Elem() - if t.Kind() == reflect.Array || t.Kind() == reflect.Slice { - t = t.Elem() - } - elemType = t - default: - return nil, PatchMeta{}, fmt.Errorf("expected slice or array type, but got: %s", s.T.Kind().String()) - } - - return PatchMetaFromStruct{T: elemType}, patchMeta, nil -} - -func (s PatchMetaFromStruct) Name() string { - return s.T.Kind().String() -} - -func getTagStructType(dataStruct interface{}) (reflect.Type, error) { - if dataStruct == nil { - return nil, mergepatch.ErrBadArgKind(struct{}{}, nil) - } - - t := reflect.TypeOf(dataStruct) - // Get the underlying type for pointers - if t.Kind() == reflect.Ptr { - t = t.Elem() - } - - if t.Kind() != reflect.Struct { - return nil, mergepatch.ErrBadArgKind(struct{}{}, dataStruct) - } - - return t, nil -} - -func GetTagStructTypeOrDie(dataStruct interface{}) reflect.Type { - t, err := getTagStructType(dataStruct) - if err != nil { - panic(err) - } - return t -} - -type PatchMetaFromOpenAPI struct { - Schema openapi.Schema -} - -func NewPatchMetaFromOpenAPI(s openapi.Schema) PatchMetaFromOpenAPI { - return PatchMetaFromOpenAPI{Schema: s} -} - -var _ LookupPatchMeta = PatchMetaFromOpenAPI{} - -func (s PatchMetaFromOpenAPI) LookupPatchMetadataForStruct(key string) (LookupPatchMeta, PatchMeta, error) { - if s.Schema == nil { - return nil, PatchMeta{}, nil - } - kindItem := NewKindItem(key, s.Schema.GetPath()) - s.Schema.Accept(kindItem) - - err := kindItem.Error() - if err != nil { - return nil, PatchMeta{}, err - } - return PatchMetaFromOpenAPI{Schema: kindItem.subschema}, - kindItem.patchmeta, nil -} - -func (s PatchMetaFromOpenAPI) LookupPatchMetadataForSlice(key string) (LookupPatchMeta, PatchMeta, error) { - if s.Schema == nil { - return nil, PatchMeta{}, nil - } - sliceItem := NewSliceItem(key, s.Schema.GetPath()) - s.Schema.Accept(sliceItem) - - err := sliceItem.Error() - if err != nil { - return nil, PatchMeta{}, err - } - return PatchMetaFromOpenAPI{Schema: sliceItem.subschema}, - sliceItem.patchmeta, nil -} - -func (s PatchMetaFromOpenAPI) Name() string { - schema := s.Schema - return schema.GetName() -} diff --git a/pkg/kutil/strategicpatch/patch.go b/pkg/kutil/strategicpatch/patch.go deleted file mode 100644 index 600f3be..0000000 --- a/pkg/kutil/strategicpatch/patch.go +++ /dev/null @@ -1,2172 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package strategicpatch - -import ( - "fmt" - "reflect" - "sort" - "strings" - - "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" - "k8s.io/apimachinery/pkg/util/json" - "k8s.io/apimachinery/pkg/util/mergepatch" -) - -// An alternate implementation of JSON Merge Patch -// (https://tools.ietf.org/html/rfc7386) which supports the ability to annotate -// certain fields with metadata that indicates whether the elements of JSON -// lists should be merged or replaced. -// -// For more information, see the PATCH section of docs/devel/api-conventions.md. -// -// Some of the content of this package was borrowed with minor adaptations from -// evanphx/json-patch and openshift/origin. - -const ( - directiveMarker = "$patch" - deleteDirective = "delete" - replaceDirective = "replace" - mergeDirective = "merge" - - retainKeysStrategy = "retainKeys" - - deleteFromPrimitiveListDirectivePrefix = "$deleteFromPrimitiveList" - retainKeysDirective = "$" + retainKeysStrategy - setElementOrderDirectivePrefix = "$setElementOrder" -) - -// JSONMap is a representations of JSON object encoded as map[string]interface{} -// where the children can be either map[string]interface{}, []interface{} or -// primitive type). -// Operating on JSONMap representation is much faster as it doesn't require any -// json marshaling and/or unmarshaling operations. -type JSONMap map[string]interface{} - -type DiffOptions struct { - // SetElementOrder determines whether we generate the $setElementOrder parallel list. - SetElementOrder bool - // IgnoreChangesAndAdditions indicates if we keep the changes and additions in the patch. - IgnoreChangesAndAdditions bool - // IgnoreDeletions indicates if we keep the deletions in the patch. - IgnoreDeletions bool - // We introduce a new value retainKeys for patchStrategy. - // It indicates that all fields needing to be preserved must be - // present in the `retainKeys` list. - // And the fields that are present will be merged with live object. - // All the missing fields will be cleared when patching. - BuildRetainKeysDirective bool -} - -type MergeOptions struct { - // MergeParallelList indicates if we are merging the parallel list. - // We don't merge parallel list when calling mergeMap() in CreateThreeWayMergePatch() - // which is called client-side. - // We merge parallel list iff when calling mergeMap() in StrategicMergeMapPatch() - // which is called server-side - MergeParallelList bool - // IgnoreUnmatchedNulls indicates if we should process the unmatched nulls. - IgnoreUnmatchedNulls bool -} - -// The following code is adapted from github.com/openshift/origin/pkg/util/jsonmerge. -// Instead of defining a Delta that holds an original, a patch and a set of preconditions, -// the reconcile method accepts a set of preconditions as an argument. - -// CreateTwoWayMergePatch creates a patch that can be passed to StrategicMergePatch from an original -// document and a modified document, which are passed to the method as json encoded content. It will -// return a patch that yields the modified document when applied to the original document, or an error -// if either of the two documents is invalid. -func CreateTwoWayMergePatch(original, modified []byte, dataStruct interface{}, fns ...mergepatch.PreconditionFunc) ([]byte, error) { - schema, err := NewPatchMetaFromStruct(dataStruct) - if err != nil { - return nil, err - } - - return CreateTwoWayMergePatchUsingLookupPatchMeta(original, modified, schema, fns...) -} - -func CreateTwoWayMergePatchUsingLookupPatchMeta( - original, modified []byte, schema LookupPatchMeta, fns ...mergepatch.PreconditionFunc) ([]byte, error) { - originalMap := map[string]interface{}{} - if len(original) > 0 { - if err := json.Unmarshal(original, &originalMap); err != nil { - return nil, mergepatch.ErrBadJSONDoc - } - } - - modifiedMap := map[string]interface{}{} - if len(modified) > 0 { - if err := json.Unmarshal(modified, &modifiedMap); err != nil { - return nil, mergepatch.ErrBadJSONDoc - } - } - - patchMap, err := CreateTwoWayMergeMapPatchUsingLookupPatchMeta(originalMap, modifiedMap, schema, fns...) - if err != nil { - return nil, err - } - - return json.Marshal(patchMap) -} - -// CreateTwoWayMergeMapPatch creates a patch from an original and modified JSON objects, -// encoded JSONMap. -// The serialized version of the map can then be passed to StrategicMergeMapPatch. -func CreateTwoWayMergeMapPatch(original, modified JSONMap, dataStruct interface{}, fns ...mergepatch.PreconditionFunc) (JSONMap, error) { - schema, err := NewPatchMetaFromStruct(dataStruct) - if err != nil { - return nil, err - } - - return CreateTwoWayMergeMapPatchUsingLookupPatchMeta(original, modified, schema, fns...) -} - -func CreateTwoWayMergeMapPatchUsingLookupPatchMeta(original, modified JSONMap, schema LookupPatchMeta, fns ...mergepatch.PreconditionFunc) (JSONMap, error) { - diffOptions := DiffOptions{ - SetElementOrder: true, - } - patchMap, err := diffMaps(original, modified, schema, diffOptions) - if err != nil { - return nil, err - } - - // Apply the preconditions to the patch, and return an error if any of them fail. - for _, fn := range fns { - if !fn(patchMap) { - return nil, mergepatch.NewErrPreconditionFailed(patchMap) - } - } - - return patchMap, nil -} - -// Returns a (recursive) strategic merge patch that yields modified when applied to original. -// Including: -// - Adding fields to the patch present in modified, missing from original -// - Setting fields to the patch present in modified and original with different values -// - Delete fields present in original, missing from modified through -// - IFF map field - set to nil in patch -// - IFF list of maps && merge strategy - use deleteDirective for the elements -// - IFF list of primitives && merge strategy - use parallel deletion list -// - IFF list of maps or primitives with replace strategy (default) - set patch value to the value in modified -// - Build $retainKeys directive for fields with retainKeys patch strategy -func diffMaps(original, modified map[string]interface{}, schema LookupPatchMeta, diffOptions DiffOptions) (map[string]interface{}, error) { - patch := map[string]interface{}{} - - // This will be used to build the $retainKeys directive sent in the patch - retainKeysList := make([]interface{}, 0, len(modified)) - - // Compare each value in the modified map against the value in the original map - for key, modifiedValue := range modified { - // Get the underlying type for pointers - if diffOptions.BuildRetainKeysDirective && modifiedValue != nil { - retainKeysList = append(retainKeysList, key) - } - - originalValue, ok := original[key] - if !ok { - // Key was added, so add to patch - if !diffOptions.IgnoreChangesAndAdditions { - patch[key] = modifiedValue - } - continue - } - - // The patch may have a patch directive - // TODO: figure out if we need this. This shouldn't be needed by apply. When would the original map have patch directives in it? - foundDirectiveMarker, err := handleDirectiveMarker(key, originalValue, modifiedValue, patch) - if err != nil { - return nil, err - } - if foundDirectiveMarker { - continue - } - - if reflect.TypeOf(originalValue) != reflect.TypeOf(modifiedValue) { - // Types have changed, so add to patch - if !diffOptions.IgnoreChangesAndAdditions { - patch[key] = modifiedValue - } - continue - } - - // Types are the same, so compare values - switch originalValueTyped := originalValue.(type) { - case map[string]interface{}: - modifiedValueTyped := modifiedValue.(map[string]interface{}) - err = handleMapDiff(key, originalValueTyped, modifiedValueTyped, patch, schema, diffOptions) - case []interface{}: - modifiedValueTyped := modifiedValue.([]interface{}) - err = handleSliceDiff(key, originalValueTyped, modifiedValueTyped, patch, schema, diffOptions) - default: - replacePatchFieldIfNotEqual(key, originalValue, modifiedValue, patch, diffOptions) - } - if err != nil { - return nil, err - } - } - - updatePatchIfMissing(original, modified, patch, diffOptions) - // Insert the retainKeysList iff there are values present in the retainKeysList and - // either of the following is true: - // - the patch is not empty - // - there are additional field in original that need to be cleared - if len(retainKeysList) > 0 && - (len(patch) > 0 || hasAdditionalNewField(original, modified)) { - patch[retainKeysDirective] = sortScalars(retainKeysList) - } - return patch, nil -} - -// handleDirectiveMarker handles how to diff directive marker between 2 objects -func handleDirectiveMarker(key string, originalValue, modifiedValue interface{}, patch map[string]interface{}) (bool, error) { - if key == directiveMarker { - originalString, ok := originalValue.(string) - if !ok { - return false, fmt.Errorf("invalid value for special key: %s", directiveMarker) - } - modifiedString, ok := modifiedValue.(string) - if !ok { - return false, fmt.Errorf("invalid value for special key: %s", directiveMarker) - } - if modifiedString != originalString { - patch[directiveMarker] = modifiedValue - } - return true, nil - } - return false, nil -} - -// handleMapDiff diff between 2 maps `originalValueTyped` and `modifiedValue`, -// puts the diff in the `patch` associated with `key` -// key is the key associated with originalValue and modifiedValue. -// originalValue, modifiedValue are the old and new value respectively.They are both maps -// patch is the patch map that contains key and the updated value, and it is the parent of originalValue, modifiedValue -// diffOptions contains multiple options to control how we do the diff. -func handleMapDiff(key string, originalValue, modifiedValue, patch map[string]interface{}, - schema LookupPatchMeta, diffOptions DiffOptions) error { - subschema, patchMeta, err := schema.LookupPatchMetadataForStruct(key) - - if err != nil { - // We couldn't look up metadata for the field - // If the values are identical, this doesn't matter, no patch is needed - if reflect.DeepEqual(originalValue, modifiedValue) { - return nil - } - // Otherwise, return the error - return err - } - retainKeys, patchStrategy, err := extractRetainKeysPatchStrategy(patchMeta.GetPatchStrategies()) - if err != nil { - return err - } - diffOptions.BuildRetainKeysDirective = retainKeys - switch patchStrategy { - // The patch strategic from metadata tells us to replace the entire object instead of diffing it - case replaceDirective: - if !diffOptions.IgnoreChangesAndAdditions { - patch[key] = modifiedValue - } - default: - patchValue, err := diffMaps(originalValue, modifiedValue, subschema, diffOptions) - if err != nil { - return err - } - // Maps were not identical, use provided patch value - if len(patchValue) > 0 { - patch[key] = patchValue - } - } - return nil -} - -// handleSliceDiff diff between 2 slices `originalValueTyped` and `modifiedValue`, -// puts the diff in the `patch` associated with `key` -// key is the key associated with originalValue and modifiedValue. -// originalValue, modifiedValue are the old and new value respectively.They are both slices -// patch is the patch map that contains key and the updated value, and it is the parent of originalValue, modifiedValue -// diffOptions contains multiple options to control how we do the diff. -func handleSliceDiff(key string, originalValue, modifiedValue []interface{}, patch map[string]interface{}, - schema LookupPatchMeta, diffOptions DiffOptions) error { - subschema, patchMeta, err := schema.LookupPatchMetadataForSlice(key) - if err != nil { - // We couldn't look up metadata for the field - // If the values are identical, this doesn't matter, no patch is needed - if reflect.DeepEqual(originalValue, modifiedValue) { - return nil - } - // Otherwise, return the error - return err - } - retainKeys, patchStrategy, err := extractRetainKeysPatchStrategy(patchMeta.GetPatchStrategies()) - if err != nil { - return err - } - switch patchStrategy { - // Merge the 2 slices using mergePatchKey - case mergeDirective: - diffOptions.BuildRetainKeysDirective = retainKeys - addList, deletionList, setOrderList, err := diffLists(originalValue, modifiedValue, subschema, patchMeta.GetPatchMergeKey(), diffOptions) - if err != nil { - return err - } - if len(addList) > 0 { - patch[key] = addList - } - // generate a parallel list for deletion - if len(deletionList) > 0 { - parallelDeletionListKey := fmt.Sprintf("%s/%s", deleteFromPrimitiveListDirectivePrefix, key) - patch[parallelDeletionListKey] = deletionList - } - if len(setOrderList) > 0 { - parallelSetOrderListKey := fmt.Sprintf("%s/%s", setElementOrderDirectivePrefix, key) - patch[parallelSetOrderListKey] = setOrderList - } - default: - replacePatchFieldIfNotEqual(key, originalValue, modifiedValue, patch, diffOptions) - } - return nil -} - -// replacePatchFieldIfNotEqual updates the patch if original and modified are not deep equal -// if diffOptions.IgnoreChangesAndAdditions is false. -// original is the old value, maybe either the live cluster object or the last applied configuration -// modified is the new value, is always the users new config -func replacePatchFieldIfNotEqual(key string, original, modified interface{}, - patch map[string]interface{}, diffOptions DiffOptions) { - if diffOptions.IgnoreChangesAndAdditions { - // Ignoring changes - do nothing - return - } - if reflect.DeepEqual(original, modified) { - // Contents are identical - do nothing - return - } - // Create a patch to replace the old value with the new one - patch[key] = modified -} - -// updatePatchIfMissing iterates over `original` when ignoreDeletions is false. -// Clear the field whose key is not present in `modified`. -// original is the old value, maybe either the live cluster object or the last applied configuration -// modified is the new value, is always the users new config -func updatePatchIfMissing(original, modified, patch map[string]interface{}, diffOptions DiffOptions) { - if diffOptions.IgnoreDeletions { - // Ignoring deletion - do nothing - return - } - // Add nils for deleted values - for key := range original { - if _, found := modified[key]; !found { - patch[key] = nil - } - } -} - -// validateMergeKeyInLists checks if each map in the list has the mentryerge key. -func validateMergeKeyInLists(mergeKey string, lists ...[]interface{}) error { - for _, list := range lists { - for _, item := range list { - m, ok := item.(map[string]interface{}) - if !ok { - return mergepatch.ErrBadArgType(m, item) - } - if _, ok = m[mergeKey]; !ok { - return mergepatch.ErrNoMergeKey(m, mergeKey) - } - } - } - return nil -} - -// normalizeElementOrder sort `patch` list by `patchOrder` and sort `serverOnly` list by `serverOrder`. -// Then it merges the 2 sorted lists. -// It guarantee the relative order in the patch list and in the serverOnly list is kept. -// `patch` is a list of items in the patch, and `serverOnly` is a list of items in the live object. -// `patchOrder` is the order we want `patch` list to have and -// `serverOrder` is the order we want `serverOnly` list to have. -// kind is the kind of each item in the lists `patch` and `serverOnly`. -func normalizeElementOrder(patch, serverOnly, patchOrder, serverOrder []interface{}, mergeKey string, kind reflect.Kind) ([]interface{}, error) { - patch, err := normalizeSliceOrder(patch, patchOrder, mergeKey, kind) - if err != nil { - return nil, err - } - serverOnly, err = normalizeSliceOrder(serverOnly, serverOrder, mergeKey, kind) - if err != nil { - return nil, err - } - all := mergeSortedSlice(serverOnly, patch, serverOrder, mergeKey, kind) - - return all, nil -} - -// mergeSortedSlice merges the 2 sorted lists by serverOrder with best effort. -// It will insert each item in `left` list to `right` list. In most cases, the 2 lists will be interleaved. -// The relative order of left and right are guaranteed to be kept. -// They have higher precedence than the order in the live list. -// The place for a item in `left` is found by: -// scan from the place of last insertion in `right` to the end of `right`, -// the place is before the first item that is greater than the item we want to insert. -// example usage: using server-only items as left and patch items as right. We insert server-only items -// to patch list. We use the order of live object as record for comparison. -func mergeSortedSlice(left, right, serverOrder []interface{}, mergeKey string, kind reflect.Kind) []interface{} { - // Returns if l is less than r, and if both have been found. - // If l and r both present and l is in front of r, l is less than r. - less := func(l, r interface{}) (bool, bool) { - li := index(serverOrder, l, mergeKey, kind) - ri := index(serverOrder, r, mergeKey, kind) - if li >= 0 && ri >= 0 { - return li < ri, true - } else { - return false, false - } - } - - // left and right should be non-overlapping. - size := len(left) + len(right) - i, j := 0, 0 - s := make([]interface{}, size, size) - - for k := 0; k < size; k++ { - if i >= len(left) && j < len(right) { - // have items left in `right` list - s[k] = right[j] - j++ - } else if j >= len(right) && i < len(left) { - // have items left in `left` list - s[k] = left[i] - i++ - } else { - // compare them if i and j are both in bound - less, foundBoth := less(left[i], right[j]) - if foundBoth && less { - s[k] = left[i] - i++ - } else { - s[k] = right[j] - j++ - } - } - } - return s -} - -// index returns the index of the item in the given items, or -1 if it doesn't exist -// l must NOT be a slice of slices, this should be checked before calling. -func index(l []interface{}, valToLookUp interface{}, mergeKey string, kind reflect.Kind) int { - var getValFn func(interface{}) interface{} - // Get the correct `getValFn` based on item `kind`. - // It should return the value of merge key for maps and - // return the item for other kinds. - switch kind { - case reflect.Map: - getValFn = func(item interface{}) interface{} { - typedItem, ok := item.(map[string]interface{}) - if !ok { - return nil - } - val := typedItem[mergeKey] - return val - } - default: - getValFn = func(item interface{}) interface{} { - return item - } - } - - for i, v := range l { - if getValFn(valToLookUp) == getValFn(v) { - return i - } - } - return -1 -} - -// extractToDeleteItems takes a list and -// returns 2 lists: one contains items that should be kept and the other contains items to be deleted. -func extractToDeleteItems(l []interface{}) ([]interface{}, []interface{}, error) { - var nonDelete, toDelete []interface{} - for _, v := range l { - m, ok := v.(map[string]interface{}) - if !ok { - return nil, nil, mergepatch.ErrBadArgType(m, v) - } - - directive, foundDirective := m[directiveMarker] - if foundDirective && directive == deleteDirective { - toDelete = append(toDelete, v) - } else { - nonDelete = append(nonDelete, v) - } - } - return nonDelete, toDelete, nil -} - -// normalizeSliceOrder sort `toSort` list by `order` -func normalizeSliceOrder(toSort, order []interface{}, mergeKey string, kind reflect.Kind) ([]interface{}, error) { - var toDelete []interface{} - if kind == reflect.Map { - // make sure each item in toSort, order has merge key - err := validateMergeKeyInLists(mergeKey, toSort, order) - if err != nil { - return nil, err - } - toSort, toDelete, err = extractToDeleteItems(toSort) - if err != nil { - return nil, err - } - } - - sort.SliceStable(toSort, func(i, j int) bool { - if ii := index(order, toSort[i], mergeKey, kind); ii >= 0 { - if ij := index(order, toSort[j], mergeKey, kind); ij >= 0 { - return ii < ij - } - } - return true - }) - toSort = append(toSort, toDelete...) - return toSort, nil -} - -// Returns a (recursive) strategic merge patch, a parallel deletion list if necessary and -// another list to set the order of the list -// Only list of primitives with merge strategy will generate a parallel deletion list. -// These two lists should yield modified when applied to original, for lists with merge semantics. -func diffLists(original, modified []interface{}, schema LookupPatchMeta, mergeKey string, diffOptions DiffOptions) ([]interface{}, []interface{}, []interface{}, error) { - if len(original) == 0 { - // Both slices are empty - do nothing - if len(modified) == 0 || diffOptions.IgnoreChangesAndAdditions { - return nil, nil, nil, nil - } - - // Old slice was empty - add all elements from the new slice - return modified, nil, nil, nil - } - - elementType, err := sliceElementType(original, modified) - if err != nil { - return nil, nil, nil, err - } - - var patchList, deleteList, setOrderList []interface{} - kind := elementType.Kind() - switch kind { - case reflect.Map: - patchList, deleteList, err = diffListsOfMaps(original, modified, schema, mergeKey, diffOptions) - if err != nil { - return nil, nil, nil, err - } - patchList, err = normalizeSliceOrder(patchList, modified, mergeKey, kind) - if err != nil { - return nil, nil, nil, err - } - orderSame, err := isOrderSame(original, modified, mergeKey) - if err != nil { - return nil, nil, nil, err - } - // append the deletions to the end of the patch list. - patchList = append(patchList, deleteList...) - deleteList = nil - // generate the setElementOrder list when there are content changes or order changes - if diffOptions.SetElementOrder && - ((!diffOptions.IgnoreChangesAndAdditions && (len(patchList) > 0 || !orderSame)) || - (!diffOptions.IgnoreDeletions && len(patchList) > 0)) { - // Generate a list of maps that each item contains only the merge key. - setOrderList = make([]interface{}, len(modified)) - for i, v := range modified { - typedV := v.(map[string]interface{}) - setOrderList[i] = map[string]interface{}{ - mergeKey: typedV[mergeKey], - } - } - } - case reflect.Slice: - // Lists of Lists are not permitted by the api - return nil, nil, nil, mergepatch.ErrNoListOfLists - default: - patchList, deleteList, err = diffListsOfScalars(original, modified, diffOptions) - if err != nil { - return nil, nil, nil, err - } - patchList, err = normalizeSliceOrder(patchList, modified, mergeKey, kind) - // generate the setElementOrder list when there are content changes or order changes - if diffOptions.SetElementOrder && ((!diffOptions.IgnoreDeletions && len(deleteList) > 0) || - (!diffOptions.IgnoreChangesAndAdditions && !reflect.DeepEqual(original, modified))) { - setOrderList = modified - } - } - return patchList, deleteList, setOrderList, err -} - -// isOrderSame checks if the order in a list has changed -func isOrderSame(original, modified []interface{}, mergeKey string) (bool, error) { - if len(original) != len(modified) { - return false, nil - } - for i, modifiedItem := range modified { - equal, err := mergeKeyValueEqual(original[i], modifiedItem, mergeKey) - if err != nil || !equal { - return equal, err - } - } - return true, nil -} - -// diffListsOfScalars returns 2 lists, the first one is addList and the second one is deletionList. -// Argument diffOptions.IgnoreChangesAndAdditions controls if calculate addList. true means not calculate. -// Argument diffOptions.IgnoreDeletions controls if calculate deletionList. true means not calculate. -// original may be changed, but modified is guaranteed to not be changed -func diffListsOfScalars(original, modified []interface{}, diffOptions DiffOptions) ([]interface{}, []interface{}, error) { - modifiedCopy := make([]interface{}, len(modified)) - copy(modifiedCopy, modified) - // Sort the scalars for easier calculating the diff - originalScalars := sortScalars(original) - modifiedScalars := sortScalars(modifiedCopy) - - originalIndex, modifiedIndex := 0, 0 - addList := []interface{}{} - deletionList := []interface{}{} - - for { - originalInBounds := originalIndex < len(originalScalars) - modifiedInBounds := modifiedIndex < len(modifiedScalars) - if !originalInBounds && !modifiedInBounds { - break - } - // we need to compare the string representation of the scalar, - // because the scalar is an interface which doesn't support either < or > - // And that's how func sortScalars compare scalars. - var originalString, modifiedString string - var originalValue, modifiedValue interface{} - if originalInBounds { - originalValue = originalScalars[originalIndex] - originalString = fmt.Sprintf("%v", originalValue) - } - if modifiedInBounds { - modifiedValue = modifiedScalars[modifiedIndex] - modifiedString = fmt.Sprintf("%v", modifiedValue) - } - - originalV, modifiedV := compareListValuesAtIndex(originalInBounds, modifiedInBounds, originalString, modifiedString) - switch { - case originalV == nil && modifiedV == nil: - originalIndex++ - modifiedIndex++ - case originalV != nil && modifiedV == nil: - if !diffOptions.IgnoreDeletions { - deletionList = append(deletionList, originalValue) - } - originalIndex++ - case originalV == nil && modifiedV != nil: - if !diffOptions.IgnoreChangesAndAdditions { - addList = append(addList, modifiedValue) - } - modifiedIndex++ - default: - return nil, nil, fmt.Errorf("Unexpected returned value from compareListValuesAtIndex: %v and %v", originalV, modifiedV) - } - } - - return addList, deduplicateScalars(deletionList), nil -} - -// If first return value is non-nil, list1 contains an element not present in list2 -// If second return value is non-nil, list2 contains an element not present in list1 -func compareListValuesAtIndex(list1Inbounds, list2Inbounds bool, list1Value, list2Value string) (interface{}, interface{}) { - bothInBounds := list1Inbounds && list2Inbounds - switch { - // scalars are identical - case bothInBounds && list1Value == list2Value: - return nil, nil - // only list2 is in bound - case !list1Inbounds: - fallthrough - // list2 has additional scalar - case bothInBounds && list1Value > list2Value: - return nil, list2Value - // only original is in bound - case !list2Inbounds: - fallthrough - // original has additional scalar - case bothInBounds && list1Value < list2Value: - return list1Value, nil - default: - return nil, nil - } -} - -// diffListsOfMaps takes a pair of lists and -// returns a (recursive) strategic merge patch list contains additions and changes and -// a deletion list contains deletions -func diffListsOfMaps(original, modified []interface{}, schema LookupPatchMeta, mergeKey string, diffOptions DiffOptions) ([]interface{}, []interface{}, error) { - patch := make([]interface{}, 0, len(modified)) - deletionList := make([]interface{}, 0, len(original)) - - originalSorted, err := sortMergeListsByNameArray(original, schema, mergeKey, false) - if err != nil { - return nil, nil, err - } - modifiedSorted, err := sortMergeListsByNameArray(modified, schema, mergeKey, false) - if err != nil { - return nil, nil, err - } - - originalIndex, modifiedIndex := 0, 0 - for { - originalInBounds := originalIndex < len(originalSorted) - modifiedInBounds := modifiedIndex < len(modifiedSorted) - bothInBounds := originalInBounds && modifiedInBounds - if !originalInBounds && !modifiedInBounds { - break - } - - var originalElementMergeKeyValueString, modifiedElementMergeKeyValueString string - var originalElementMergeKeyValue, modifiedElementMergeKeyValue interface{} - var originalElement, modifiedElement map[string]interface{} - if originalInBounds { - originalElement, originalElementMergeKeyValue, err = getMapAndMergeKeyValueByIndex(originalIndex, mergeKey, originalSorted) - if err != nil { - return nil, nil, err - } - originalElementMergeKeyValueString = fmt.Sprintf("%v", originalElementMergeKeyValue) - } - if modifiedInBounds { - modifiedElement, modifiedElementMergeKeyValue, err = getMapAndMergeKeyValueByIndex(modifiedIndex, mergeKey, modifiedSorted) - if err != nil { - return nil, nil, err - } - modifiedElementMergeKeyValueString = fmt.Sprintf("%v", modifiedElementMergeKeyValue) - } - - switch { - case bothInBounds && ItemMatchesOriginalAndModifiedSlice(originalElementMergeKeyValueString, modifiedElementMergeKeyValueString): - // Merge key values are equal, so recurse - patchValue, err := diffMaps(originalElement, modifiedElement, schema, diffOptions) - if err != nil { - return nil, nil, err - } - if len(patchValue) > 0 { - patchValue[mergeKey] = modifiedElementMergeKeyValue - patch = append(patch, patchValue) - } - originalIndex++ - modifiedIndex++ - // only modified is in bound - case !originalInBounds: - fallthrough - // modified has additional map - case bothInBounds && ItemAddedToModifiedSlice(originalElementMergeKeyValueString, modifiedElementMergeKeyValueString): - if !diffOptions.IgnoreChangesAndAdditions { - patch = append(patch, modifiedElement) - } - modifiedIndex++ - // only original is in bound - case !modifiedInBounds: - fallthrough - // original has additional map - case bothInBounds && ItemRemovedFromModifiedSlice(originalElementMergeKeyValueString, modifiedElementMergeKeyValueString): - if !diffOptions.IgnoreDeletions { - // Item was deleted, so add delete directive - deletionList = append(deletionList, CreateDeleteDirective(mergeKey, originalElementMergeKeyValue)) - } - originalIndex++ - } - } - - return patch, deletionList, nil -} - -// getMapAndMergeKeyValueByIndex return a map in the list and its merge key value given the index of the map. -func getMapAndMergeKeyValueByIndex(index int, mergeKey string, listOfMaps []interface{}) (map[string]interface{}, interface{}, error) { - m, ok := listOfMaps[index].(map[string]interface{}) - if !ok { - return nil, nil, mergepatch.ErrBadArgType(m, listOfMaps[index]) - } - - val, ok := m[mergeKey] - if !ok { - return nil, nil, mergepatch.ErrNoMergeKey(m, mergeKey) - } - return m, val, nil -} - -// StrategicMergePatch applies a strategic merge patch. The patch and the original document -// must be json encoded content. A patch can be created from an original and a modified document -// by calling CreateStrategicMergePatch. -func StrategicMergePatch(original, patch []byte, dataStruct interface{}) ([]byte, error) { - schema, err := NewPatchMetaFromStruct(dataStruct) - if err != nil { - return nil, err - } - - return StrategicMergePatchUsingLookupPatchMeta(original, patch, schema) -} - -func StrategicMergePatchUsingLookupPatchMeta(original, patch []byte, schema LookupPatchMeta) ([]byte, error) { - originalMap, err := handleUnmarshal(original) - if err != nil { - return nil, err - } - patchMap, err := handleUnmarshal(patch) - if err != nil { - return nil, err - } - - result, err := StrategicMergeMapPatchUsingLookupPatchMeta(originalMap, patchMap, schema) - if err != nil { - return nil, err - } - - return json.Marshal(result) -} - -func handleUnmarshal(j []byte) (map[string]interface{}, error) { - if j == nil { - j = []byte("{}") - } - - m := map[string]interface{}{} - err := json.Unmarshal(j, &m) - if err != nil { - return nil, mergepatch.ErrBadJSONDoc - } - return m, nil -} - -// StrategicMergeMapPatch applies a strategic merge patch. The original and patch documents -// must be JSONMap. A patch can be created from an original and modified document by -// calling CreateTwoWayMergeMapPatch. -// Warning: the original and patch JSONMap objects are mutated by this function and should not be reused. -func StrategicMergeMapPatch(original, patch JSONMap, dataStruct interface{}) (JSONMap, error) { - schema, err := NewPatchMetaFromStruct(dataStruct) - if err != nil { - return nil, err - } - - // We need the go struct tags `patchMergeKey` and `patchStrategy` for fields that support a strategic merge patch. - // For native resources, we can easily figure out these tags since we know the fields. - - // Because custom resources are decoded as Unstructured and because we're missing the metadata about how to handle - // each field in a strategic merge patch, we can't find the go struct tags. Hence, we can't easily do a strategic merge - // for custom resources. So we should fail fast and return an error. - if _, ok := dataStruct.(*unstructured.Unstructured); ok { - return nil, mergepatch.ErrUnsupportedStrategicMergePatchFormat - } - - return StrategicMergeMapPatchUsingLookupPatchMeta(original, patch, schema) -} - -func StrategicMergeMapPatchUsingLookupPatchMeta(original, patch JSONMap, schema LookupPatchMeta) (JSONMap, error) { - mergeOptions := MergeOptions{ - MergeParallelList: true, - IgnoreUnmatchedNulls: true, - } - return mergeMap(original, patch, schema, mergeOptions) -} - -// MergeStrategicMergeMapPatchUsingLookupPatchMeta merges strategic merge -// patches retaining `null` fields and parallel lists. If 2 patches change the -// same fields and the latter one will override the former one. If you don't -// want that happen, you need to run func MergingMapsHaveConflicts before -// merging these patches. Applying the resulting merged merge patch to a JSONMap -// yields the same as merging each strategic merge patch to the JSONMap in -// succession. -func MergeStrategicMergeMapPatchUsingLookupPatchMeta(schema LookupPatchMeta, patches ...JSONMap) (JSONMap, error) { - mergeOptions := MergeOptions{ - MergeParallelList: false, - IgnoreUnmatchedNulls: false, - } - merged := JSONMap{} - var err error - for _, patch := range patches { - merged, err = mergeMap(merged, patch, schema, mergeOptions) - if err != nil { - return nil, err - } - } - return merged, nil -} - -// handleDirectiveInMergeMap handles the patch directive when merging 2 maps. -func handleDirectiveInMergeMap(directive interface{}, patch map[string]interface{}) (map[string]interface{}, error) { - if directive == replaceDirective { - // If the patch contains "$patch: replace", don't merge it, just use the - // patch directly. Later on, we can add a single level replace that only - // affects the map that the $patch is in. - delete(patch, directiveMarker) - return patch, nil - } - - if directive == deleteDirective { - // If the patch contains "$patch: delete", don't merge it, just return - // an empty map. - return map[string]interface{}{}, nil - } - - return nil, mergepatch.ErrBadPatchType(directive, patch) -} - -func containsDirectiveMarker(item interface{}) bool { - m, ok := item.(map[string]interface{}) - if ok { - if _, foundDirectiveMarker := m[directiveMarker]; foundDirectiveMarker { - return true - } - } - return false -} - -func mergeKeyValueEqual(left, right interface{}, mergeKey string) (bool, error) { - if len(mergeKey) == 0 { - return left == right, nil - } - typedLeft, ok := left.(map[string]interface{}) - if !ok { - return false, mergepatch.ErrBadArgType(typedLeft, left) - } - typedRight, ok := right.(map[string]interface{}) - if !ok { - return false, mergepatch.ErrBadArgType(typedRight, right) - } - mergeKeyLeft, ok := typedLeft[mergeKey] - if !ok { - return false, mergepatch.ErrNoMergeKey(typedLeft, mergeKey) - } - mergeKeyRight, ok := typedRight[mergeKey] - if !ok { - return false, mergepatch.ErrNoMergeKey(typedRight, mergeKey) - } - return mergeKeyLeft == mergeKeyRight, nil -} - -// extractKey trims the prefix and return the original key -func extractKey(s, prefix string) (string, error) { - substrings := strings.SplitN(s, "/", 2) - if len(substrings) <= 1 || substrings[0] != prefix { - switch prefix { - case deleteFromPrimitiveListDirectivePrefix: - return "", mergepatch.ErrBadPatchFormatForPrimitiveList - case setElementOrderDirectivePrefix: - return "", mergepatch.ErrBadPatchFormatForSetElementOrderList - default: - return "", fmt.Errorf("fail to find unknown prefix %q in %s\n", prefix, s) - } - } - return substrings[1], nil -} - -// validatePatchUsingSetOrderList verifies: -// the relative order of any two items in the setOrderList list matches that in the patch list. -// the items in the patch list must be a subset or the same as the $setElementOrder list (deletions are ignored). -func validatePatchWithSetOrderList(patchList, setOrderList interface{}, mergeKey string) error { - typedSetOrderList, ok := setOrderList.([]interface{}) - if !ok { - return mergepatch.ErrBadPatchFormatForSetElementOrderList - } - typedPatchList, ok := patchList.([]interface{}) - if !ok { - return mergepatch.ErrBadPatchFormatForSetElementOrderList - } - if len(typedSetOrderList) == 0 || len(typedPatchList) == 0 { - return nil - } - - var nonDeleteList, toDeleteList []interface{} - var err error - if len(mergeKey) > 0 { - nonDeleteList, toDeleteList, err = extractToDeleteItems(typedPatchList) - if err != nil { - return err - } - } else { - nonDeleteList = typedPatchList - } - - patchIndex, setOrderIndex := 0, 0 - for patchIndex < len(nonDeleteList) && setOrderIndex < len(typedSetOrderList) { - if containsDirectiveMarker(nonDeleteList[patchIndex]) { - patchIndex++ - continue - } - mergeKeyEqual, err := mergeKeyValueEqual(nonDeleteList[patchIndex], typedSetOrderList[setOrderIndex], mergeKey) - if err != nil { - return err - } - if mergeKeyEqual { - patchIndex++ - } - setOrderIndex++ - } - // If patchIndex is inbound but setOrderIndex if out of bound mean there are items mismatching between the patch list and setElementOrder list. - // the second check is a sanity check, and should always be true if the first is true. - if patchIndex < len(nonDeleteList) && setOrderIndex >= len(typedSetOrderList) { - return fmt.Errorf("The order in patch list:\n%v\n doesn't match %s list:\n%v\n", typedPatchList, setElementOrderDirectivePrefix, setOrderList) - } - typedPatchList = append(nonDeleteList, toDeleteList...) - return nil -} - -// preprocessDeletionListForMerging preprocesses the deletion list. -// it returns shouldContinue, isDeletionList, noPrefixKey -func preprocessDeletionListForMerging(key string, original map[string]interface{}, - patchVal interface{}, mergeDeletionList bool) (bool, bool, string, error) { - // If found a parallel list for deletion and we are going to merge the list, - // overwrite the key to the original key and set flag isDeleteList - foundParallelListPrefix := strings.HasPrefix(key, deleteFromPrimitiveListDirectivePrefix) - if foundParallelListPrefix { - if !mergeDeletionList { - original[key] = patchVal - return true, false, "", nil - } - originalKey, err := extractKey(key, deleteFromPrimitiveListDirectivePrefix) - return false, true, originalKey, err - } - return false, false, "", nil -} - -// applyRetainKeysDirective looks for a retainKeys directive and applies to original -// - if no directive exists do nothing -// - if directive is found, clear keys in original missing from the directive list -// - validate that all keys present in the patch are present in the retainKeys directive -// note: original may be another patch request, e.g. applying the add+modified patch to the deletions patch. In this case it may have directives -func applyRetainKeysDirective(original, patch map[string]interface{}, options MergeOptions) error { - retainKeysInPatch, foundInPatch := patch[retainKeysDirective] - if !foundInPatch { - return nil - } - // cleanup the directive - delete(patch, retainKeysDirective) - - if !options.MergeParallelList { - // If original is actually a patch, make sure the retainKeys directives are the same in both patches if present in both. - // If not present in the original patch, copy from the modified patch. - retainKeysInOriginal, foundInOriginal := original[retainKeysDirective] - if foundInOriginal { - if !reflect.DeepEqual(retainKeysInOriginal, retainKeysInPatch) { - // This error actually should never happen. - return fmt.Errorf("%v and %v are not deep equal: this may happen when calculating the 3-way diff patch", retainKeysInOriginal, retainKeysInPatch) - } - } else { - original[retainKeysDirective] = retainKeysInPatch - } - return nil - } - - retainKeysList, ok := retainKeysInPatch.([]interface{}) - if !ok { - return mergepatch.ErrBadPatchFormatForRetainKeys - } - - // validate patch to make sure all fields in the patch are present in the retainKeysList. - // The map is used only as a set, the value is never referenced - m := map[interface{}]struct{}{} - for _, v := range retainKeysList { - m[v] = struct{}{} - } - for k, v := range patch { - if v == nil || strings.HasPrefix(k, deleteFromPrimitiveListDirectivePrefix) || - strings.HasPrefix(k, setElementOrderDirectivePrefix) { - continue - } - // If there is an item present in the patch but not in the retainKeys list, - // the patch is invalid. - if _, found := m[k]; !found { - return mergepatch.ErrBadPatchFormatForRetainKeys - } - } - - // clear not present fields - for k := range original { - if _, found := m[k]; !found { - delete(original, k) - } - } - return nil -} - -// mergePatchIntoOriginal processes $setElementOrder list. -// When not merging the directive, it will make sure $setElementOrder list exist only in original. -// When merging the directive, it will try to find the $setElementOrder list and -// its corresponding patch list, validate it and merge it. -// Then, sort them by the relative order in setElementOrder, patch list and live list. -// The precedence is $setElementOrder > order in patch list > order in live list. -// This function will delete the item after merging it to prevent process it again in the future. -// Ref: https://git.k8s.io/community/contributors/design-proposals/cli/preserve-order-in-strategic-merge-patch.md -func mergePatchIntoOriginal(original, patch map[string]interface{}, schema LookupPatchMeta, mergeOptions MergeOptions) error { - for key, patchV := range patch { - // Do nothing if there is no ordering directive - if !strings.HasPrefix(key, setElementOrderDirectivePrefix) { - continue - } - - setElementOrderInPatch := patchV - // Copies directive from the second patch (`patch`) to the first patch (`original`) - // and checks they are equal and delete the directive in the second patch - if !mergeOptions.MergeParallelList { - setElementOrderListInOriginal, ok := original[key] - if ok { - // check if the setElementOrder list in original and the one in patch matches - if !reflect.DeepEqual(setElementOrderListInOriginal, setElementOrderInPatch) { - return mergepatch.ErrBadPatchFormatForSetElementOrderList - } - } else { - // move the setElementOrder list from patch to original - original[key] = setElementOrderInPatch - } - } - delete(patch, key) - - var ( - ok bool - originalFieldValue, patchFieldValue, merged []interface{} - patchStrategy string - patchMeta PatchMeta - subschema LookupPatchMeta - ) - typedSetElementOrderList, ok := setElementOrderInPatch.([]interface{}) - if !ok { - return mergepatch.ErrBadArgType(typedSetElementOrderList, setElementOrderInPatch) - } - // Trim the setElementOrderDirectivePrefix to get the key of the list field in original. - originalKey, err := extractKey(key, setElementOrderDirectivePrefix) - if err != nil { - return err - } - // try to find the list with `originalKey` in `original` and `modified` and merge them. - originalList, foundOriginal := original[originalKey] - patchList, foundPatch := patch[originalKey] - if foundOriginal { - originalFieldValue, ok = originalList.([]interface{}) - if !ok { - return mergepatch.ErrBadArgType(originalFieldValue, originalList) - } - } - if foundPatch { - patchFieldValue, ok = patchList.([]interface{}) - if !ok { - return mergepatch.ErrBadArgType(patchFieldValue, patchList) - } - } - subschema, patchMeta, err = schema.LookupPatchMetadataForSlice(originalKey) - if err != nil { - return err - } - _, patchStrategy, err = extractRetainKeysPatchStrategy(patchMeta.GetPatchStrategies()) - if err != nil { - return err - } - // Check for consistency between the element order list and the field it applies to - err = validatePatchWithSetOrderList(patchFieldValue, typedSetElementOrderList, patchMeta.GetPatchMergeKey()) - if err != nil { - return err - } - - switch { - case foundOriginal && !foundPatch: - // no change to list contents - merged = originalFieldValue - case !foundOriginal && foundPatch: - // list was added - merged = patchFieldValue - case foundOriginal && foundPatch: - merged, err = mergeSliceHandler(originalList, patchList, subschema, - patchStrategy, patchMeta.GetPatchMergeKey(), false, mergeOptions) - if err != nil { - return err - } - case !foundOriginal && !foundPatch: - continue - } - - // Split all items into patch items and server-only items and then enforce the order. - var patchItems, serverOnlyItems []interface{} - if len(patchMeta.GetPatchMergeKey()) == 0 { - // Primitives doesn't need merge key to do partitioning. - patchItems, serverOnlyItems = partitionPrimitivesByPresentInList(merged, typedSetElementOrderList) - - } else { - // Maps need merge key to do partitioning. - patchItems, serverOnlyItems, err = partitionMapsByPresentInList(merged, typedSetElementOrderList, patchMeta.GetPatchMergeKey()) - if err != nil { - return err - } - } - - elementType, err := sliceElementType(originalFieldValue, patchFieldValue) - if err != nil { - return err - } - kind := elementType.Kind() - // normalize merged list - // typedSetElementOrderList contains all the relative order in typedPatchList, - // so don't need to use typedPatchList - both, err := normalizeElementOrder(patchItems, serverOnlyItems, typedSetElementOrderList, originalFieldValue, patchMeta.GetPatchMergeKey(), kind) - if err != nil { - return err - } - original[originalKey] = both - // delete patch list from patch to prevent process again in the future - delete(patch, originalKey) - } - return nil -} - -// partitionPrimitivesByPresentInList partitions elements into 2 slices, the first containing items present in partitionBy, the other not. -func partitionPrimitivesByPresentInList(original, partitionBy []interface{}) ([]interface{}, []interface{}) { - patch := make([]interface{}, 0, len(original)) - serverOnly := make([]interface{}, 0, len(original)) - inPatch := map[interface{}]bool{} - for _, v := range partitionBy { - inPatch[v] = true - } - for _, v := range original { - if !inPatch[v] { - serverOnly = append(serverOnly, v) - } else { - patch = append(patch, v) - } - } - return patch, serverOnly -} - -// partitionMapsByPresentInList partitions elements into 2 slices, the first containing items present in partitionBy, the other not. -func partitionMapsByPresentInList(original, partitionBy []interface{}, mergeKey string) ([]interface{}, []interface{}, error) { - patch := make([]interface{}, 0, len(original)) - serverOnly := make([]interface{}, 0, len(original)) - for _, v := range original { - typedV, ok := v.(map[string]interface{}) - if !ok { - return nil, nil, mergepatch.ErrBadArgType(typedV, v) - } - mergeKeyValue, foundMergeKey := typedV[mergeKey] - if !foundMergeKey { - return nil, nil, mergepatch.ErrNoMergeKey(typedV, mergeKey) - } - _, _, found, err := findMapInSliceBasedOnKeyValue(partitionBy, mergeKey, mergeKeyValue) - if err != nil { - return nil, nil, err - } - if !found { - serverOnly = append(serverOnly, v) - } else { - patch = append(patch, v) - } - } - return patch, serverOnly, nil -} - -// Merge fields from a patch map into the original map. Note: This may modify -// both the original map and the patch because getting a deep copy of a map in -// golang is highly non-trivial. -// flag mergeOptions.MergeParallelList controls if using the parallel list to delete or keeping the list. -// If patch contains any null field (e.g. field_1: null) that is not -// present in original, then to propagate it to the end result use -// mergeOptions.IgnoreUnmatchedNulls == false. -func mergeMap(original, patch map[string]interface{}, schema LookupPatchMeta, mergeOptions MergeOptions) (map[string]interface{}, error) { - if v, ok := patch[directiveMarker]; ok { - return handleDirectiveInMergeMap(v, patch) - } - - // nil is an accepted value for original to simplify logic in other places. - // If original is nil, replace it with an empty map and then apply the patch. - if original == nil { - original = map[string]interface{}{} - } - - err := applyRetainKeysDirective(original, patch, mergeOptions) - if err != nil { - return nil, err - } - - // Process $setElementOrder list and other lists sharing the same key. - // When not merging the directive, it will make sure $setElementOrder list exist only in original. - // When merging the directive, it will process $setElementOrder and its patch list together. - // This function will delete the merged elements from patch so they will not be reprocessed - err = mergePatchIntoOriginal(original, patch, schema, mergeOptions) - if err != nil { - return nil, err - } - - // Start merging the patch into the original. - for k, patchV := range patch { - skipProcessing, isDeleteList, noPrefixKey, err := preprocessDeletionListForMerging(k, original, patchV, mergeOptions.MergeParallelList) - if err != nil { - return nil, err - } - if skipProcessing { - continue - } - if len(noPrefixKey) > 0 { - k = noPrefixKey - } - - // If the value of this key is null, delete the key if it exists in the - // original. Otherwise, check if we want to preserve it or skip it. - // Preserving the null value is useful when we want to send an explicit - // delete to the API server. - if patchV == nil { - delete(original, k) - if mergeOptions.IgnoreUnmatchedNulls { - continue - } - } - - _, ok := original[k] - if !ok { - // If it's not in the original document, just take the patch value. - original[k] = patchV - continue - } - - originalType := reflect.TypeOf(original[k]) - patchType := reflect.TypeOf(patchV) - if originalType != patchType { - original[k] = patchV - continue - } - // If they're both maps or lists, recurse into the value. - switch originalType.Kind() { - case reflect.Map: - subschema, patchMeta, err2 := schema.LookupPatchMetadataForStruct(k) - if err2 != nil { - return nil, err2 - } - _, patchStrategy, err2 := extractRetainKeysPatchStrategy(patchMeta.GetPatchStrategies()) - if err2 != nil { - return nil, err2 - } - original[k], err = mergeMapHandler(original[k], patchV, subschema, patchStrategy, mergeOptions) - case reflect.Slice: - subschema, patchMeta, err2 := schema.LookupPatchMetadataForSlice(k) - if err2 != nil { - return nil, err2 - } - _, patchStrategy, err2 := extractRetainKeysPatchStrategy(patchMeta.GetPatchStrategies()) - if err2 != nil { - return nil, err2 - } - original[k], err = mergeSliceHandler(original[k], patchV, subschema, patchStrategy, patchMeta.GetPatchMergeKey(), isDeleteList, mergeOptions) - default: - original[k] = patchV - } - if err != nil { - return nil, err - } - } - return original, nil -} - -// mergeMapHandler handles how to merge `patchV` whose key is `key` with `original` respecting -// fieldPatchStrategy and mergeOptions. -func mergeMapHandler(original, patch interface{}, schema LookupPatchMeta, - fieldPatchStrategy string, mergeOptions MergeOptions) (map[string]interface{}, error) { - typedOriginal, typedPatch, err := mapTypeAssertion(original, patch) - if err != nil { - return nil, err - } - - if fieldPatchStrategy != replaceDirective { - return mergeMap(typedOriginal, typedPatch, schema, mergeOptions) - } else { - return typedPatch, nil - } -} - -// mergeSliceHandler handles how to merge `patchV` whose key is `key` with `original` respecting -// fieldPatchStrategy, fieldPatchMergeKey, isDeleteList and mergeOptions. -func mergeSliceHandler(original, patch interface{}, schema LookupPatchMeta, - fieldPatchStrategy, fieldPatchMergeKey string, isDeleteList bool, mergeOptions MergeOptions) ([]interface{}, error) { - typedOriginal, typedPatch, err := sliceTypeAssertion(original, patch) - if err != nil { - return nil, err - } - - if fieldPatchStrategy == mergeDirective { - return mergeSlice(typedOriginal, typedPatch, schema, fieldPatchMergeKey, mergeOptions, isDeleteList) - } else { - return typedPatch, nil - } -} - -// Merge two slices together. Note: This may modify both the original slice and -// the patch because getting a deep copy of a slice in golang is highly -// non-trivial. -func mergeSlice(original, patch []interface{}, schema LookupPatchMeta, mergeKey string, mergeOptions MergeOptions, isDeleteList bool) ([]interface{}, error) { - if len(original) == 0 && len(patch) == 0 { - return original, nil - } - - // All the values must be of the same type, but not a list. - t, err := sliceElementType(original, patch) - if err != nil { - return nil, err - } - - var merged []interface{} - kind := t.Kind() - // If the elements are not maps, merge the slices of scalars. - if kind != reflect.Map { - if mergeOptions.MergeParallelList && isDeleteList { - return deleteFromSlice(original, patch), nil - } - // Maybe in the future add a "concat" mode that doesn't - // deduplicate. - both := append(original, patch...) - merged = deduplicateScalars(both) - - } else { - if mergeKey == "" { - return nil, fmt.Errorf("cannot merge lists without merge key for %s", schema.Name()) - } - - original, patch, err = mergeSliceWithSpecialElements(original, patch, mergeKey) - if err != nil { - return nil, err - } - - merged, err = mergeSliceWithoutSpecialElements(original, patch, mergeKey, schema, mergeOptions) - if err != nil { - return nil, err - } - } - - // enforce the order - var patchItems, serverOnlyItems []interface{} - if len(mergeKey) == 0 { - patchItems, serverOnlyItems = partitionPrimitivesByPresentInList(merged, patch) - } else { - patchItems, serverOnlyItems, err = partitionMapsByPresentInList(merged, patch, mergeKey) - if err != nil { - return nil, err - } - } - return normalizeElementOrder(patchItems, serverOnlyItems, patch, original, mergeKey, kind) -} - -// mergeSliceWithSpecialElements handles special elements with directiveMarker -// before merging the slices. It returns a updated `original` and a patch without special elements. -// original and patch must be slices of maps, they should be checked before calling this function. -func mergeSliceWithSpecialElements(original, patch []interface{}, mergeKey string) ([]interface{}, []interface{}, error) { - patchWithoutSpecialElements := []interface{}{} - replace := false - for _, v := range patch { - typedV := v.(map[string]interface{}) - patchType, ok := typedV[directiveMarker] - if !ok { - patchWithoutSpecialElements = append(patchWithoutSpecialElements, v) - } else { - switch patchType { - case deleteDirective: - mergeValue, ok := typedV[mergeKey] - if ok { - var err error - original, err = deleteMatchingEntries(original, mergeKey, mergeValue) - if err != nil { - return nil, nil, err - } - } else { - return nil, nil, mergepatch.ErrNoMergeKey(typedV, mergeKey) - } - case replaceDirective: - replace = true - // Continue iterating through the array to prune any other $patch elements. - case mergeDirective: - return nil, nil, fmt.Errorf("merging lists cannot yet be specified in the patch") - default: - return nil, nil, mergepatch.ErrBadPatchType(patchType, typedV) - } - } - } - if replace { - return patchWithoutSpecialElements, nil, nil - } - return original, patchWithoutSpecialElements, nil -} - -// delete all matching entries (based on merge key) from a merging list -func deleteMatchingEntries(original []interface{}, mergeKey string, mergeValue interface{}) ([]interface{}, error) { - for { - _, originalKey, found, err := findMapInSliceBasedOnKeyValue(original, mergeKey, mergeValue) - if err != nil { - return nil, err - } - - if !found { - break - } - // Delete the element at originalKey. - original = append(original[:originalKey], original[originalKey+1:]...) - } - return original, nil -} - -// mergeSliceWithoutSpecialElements merges slices with non-special elements. -// original and patch must be slices of maps, they should be checked before calling this function. -func mergeSliceWithoutSpecialElements(original, patch []interface{}, mergeKey string, schema LookupPatchMeta, mergeOptions MergeOptions) ([]interface{}, error) { - for _, v := range patch { - typedV := v.(map[string]interface{}) - mergeValue, ok := typedV[mergeKey] - if !ok { - return nil, mergepatch.ErrNoMergeKey(typedV, mergeKey) - } - - // If we find a value with this merge key value in original, merge the - // maps. Otherwise append onto original. - originalMap, originalKey, found, err := findMapInSliceBasedOnKeyValue(original, mergeKey, mergeValue) - if err != nil { - return nil, err - } - - if found { - var mergedMaps interface{} - var err error - // Merge into original. - mergedMaps, err = mergeMap(originalMap, typedV, schema, mergeOptions) - if err != nil { - return nil, err - } - - original[originalKey] = mergedMaps - } else { - original = append(original, v) - } - } - return original, nil -} - -// deleteFromSlice uses the parallel list to delete the items in a list of scalars -func deleteFromSlice(current, toDelete []interface{}) []interface{} { - toDeleteMap := map[interface{}]interface{}{} - processed := make([]interface{}, 0, len(current)) - for _, v := range toDelete { - toDeleteMap[v] = true - } - for _, v := range current { - if _, found := toDeleteMap[v]; !found { - processed = append(processed, v) - } - } - return processed -} - -// This method no longer panics if any element of the slice is not a map. -func findMapInSliceBasedOnKeyValue(m []interface{}, key string, value interface{}) (map[string]interface{}, int, bool, error) { - for k, v := range m { - typedV, ok := v.(map[string]interface{}) - if !ok { - return nil, 0, false, fmt.Errorf("value for key %v is not a map", k) - } - - valueToMatch, ok := typedV[key] - if ok && valueToMatch == value { - return typedV, k, true, nil - } - } - - return nil, 0, false, nil -} - -// This function takes a JSON map and sorts all the lists that should be merged -// by key. This is needed by tests because in JSON, list order is significant, -// but in Strategic Merge Patch, merge lists do not have significant order. -// Sorting the lists allows for order-insensitive comparison of patched maps. -func sortMergeListsByName(mapJSON []byte, schema LookupPatchMeta) ([]byte, error) { - var m map[string]interface{} - err := json.Unmarshal(mapJSON, &m) - if err != nil { - return nil, mergepatch.ErrBadJSONDoc - } - - newM, err := sortMergeListsByNameMap(m, schema) - if err != nil { - return nil, err - } - - return json.Marshal(newM) -} - -// Function sortMergeListsByNameMap recursively sorts the merge lists by its mergeKey in a map. -func sortMergeListsByNameMap(s map[string]interface{}, schema LookupPatchMeta) (map[string]interface{}, error) { - newS := map[string]interface{}{} - for k, v := range s { - if k == retainKeysDirective { - typedV, ok := v.([]interface{}) - if !ok { - return nil, mergepatch.ErrBadPatchFormatForRetainKeys - } - v = sortScalars(typedV) - } else if strings.HasPrefix(k, deleteFromPrimitiveListDirectivePrefix) { - typedV, ok := v.([]interface{}) - if !ok { - return nil, mergepatch.ErrBadPatchFormatForPrimitiveList - } - v = sortScalars(typedV) - } else if strings.HasPrefix(k, setElementOrderDirectivePrefix) { - _, ok := v.([]interface{}) - if !ok { - return nil, mergepatch.ErrBadPatchFormatForSetElementOrderList - } - } else if k != directiveMarker { - // recurse for map and slice. - switch typedV := v.(type) { - case map[string]interface{}: - subschema, _, err := schema.LookupPatchMetadataForStruct(k) - if err != nil { - return nil, err - } - v, err = sortMergeListsByNameMap(typedV, subschema) - if err != nil { - return nil, err - } - case []interface{}: - subschema, patchMeta, err := schema.LookupPatchMetadataForSlice(k) - if err != nil { - return nil, err - } - _, patchStrategy, err := extractRetainKeysPatchStrategy(patchMeta.GetPatchStrategies()) - if err != nil { - return nil, err - } - if patchStrategy == mergeDirective { - var err error - v, err = sortMergeListsByNameArray(typedV, subschema, patchMeta.GetPatchMergeKey(), true) - if err != nil { - return nil, err - } - } - } - } - - newS[k] = v - } - - return newS, nil -} - -// Function sortMergeListsByNameMap recursively sorts the merge lists by its mergeKey in an array. -func sortMergeListsByNameArray(s []interface{}, schema LookupPatchMeta, mergeKey string, recurse bool) ([]interface{}, error) { - if len(s) == 0 { - return s, nil - } - - // We don't support lists of lists yet. - t, err := sliceElementType(s) - if err != nil { - return nil, err - } - - // If the elements are not maps... - if t.Kind() != reflect.Map { - // Sort the elements, because they may have been merged out of order. - return deduplicateAndSortScalars(s), nil - } - - // Elements are maps - if one of the keys of the map is a map or a - // list, we may need to recurse into it. - newS := []interface{}{} - for _, elem := range s { - if recurse { - typedElem := elem.(map[string]interface{}) - newElem, err := sortMergeListsByNameMap(typedElem, schema) - if err != nil { - return nil, err - } - - newS = append(newS, newElem) - } else { - newS = append(newS, elem) - } - } - - // Sort the maps. - newS = sortMapsBasedOnField(newS, mergeKey) - return newS, nil -} - -func sortMapsBasedOnField(m []interface{}, fieldName string) []interface{} { - mapM := mapSliceFromSlice(m) - ss := SortableSliceOfMaps{mapM, fieldName} - sort.Sort(ss) - newS := sliceFromMapSlice(ss.s) - return newS -} - -func mapSliceFromSlice(m []interface{}) []map[string]interface{} { - newM := []map[string]interface{}{} - for _, v := range m { - vt := v.(map[string]interface{}) - newM = append(newM, vt) - } - - return newM -} - -func sliceFromMapSlice(s []map[string]interface{}) []interface{} { - newS := []interface{}{} - for _, v := range s { - newS = append(newS, v) - } - - return newS -} - -type SortableSliceOfMaps struct { - s []map[string]interface{} - k string // key to sort on -} - -func (ss SortableSliceOfMaps) Len() int { - return len(ss.s) -} - -func (ss SortableSliceOfMaps) Less(i, j int) bool { - iStr := fmt.Sprintf("%v", ss.s[i][ss.k]) - jStr := fmt.Sprintf("%v", ss.s[j][ss.k]) - return sort.StringsAreSorted([]string{iStr, jStr}) -} - -func (ss SortableSliceOfMaps) Swap(i, j int) { - tmp := ss.s[i] - ss.s[i] = ss.s[j] - ss.s[j] = tmp -} - -func deduplicateAndSortScalars(s []interface{}) []interface{} { - s = deduplicateScalars(s) - return sortScalars(s) -} - -func sortScalars(s []interface{}) []interface{} { - ss := SortableSliceOfScalars{s} - sort.Sort(ss) - return ss.s -} - -func deduplicateScalars(s []interface{}) []interface{} { - // Clever algorithm to deduplicate. - length := len(s) - 1 - for i := 0; i < length; i++ { - for j := i + 1; j <= length; j++ { - if s[i] == s[j] { - s[j] = s[length] - s = s[0:length] - length-- - j-- - } - } - } - - return s -} - -type SortableSliceOfScalars struct { - s []interface{} -} - -func (ss SortableSliceOfScalars) Len() int { - return len(ss.s) -} - -func (ss SortableSliceOfScalars) Less(i, j int) bool { - iStr := fmt.Sprintf("%v", ss.s[i]) - jStr := fmt.Sprintf("%v", ss.s[j]) - return sort.StringsAreSorted([]string{iStr, jStr}) -} - -func (ss SortableSliceOfScalars) Swap(i, j int) { - tmp := ss.s[i] - ss.s[i] = ss.s[j] - ss.s[j] = tmp -} - -// Returns the type of the elements of N slice(s). If the type is different, -// another slice or undefined, returns an error. -func sliceElementType(slices ...[]interface{}) (reflect.Type, error) { - var prevType reflect.Type - for _, s := range slices { - // Go through elements of all given slices and make sure they are all the same type. - for _, v := range s { - currentType := reflect.TypeOf(v) - if prevType == nil { - prevType = currentType - // We don't support lists of lists yet. - if prevType.Kind() == reflect.Slice { - return nil, mergepatch.ErrNoListOfLists - } - } else { - if prevType != currentType { - return nil, fmt.Errorf("list element types are not identical: %v", fmt.Sprint(slices)) - } - prevType = currentType - } - } - } - - if prevType == nil { - return nil, fmt.Errorf("no elements in any of the given slices") - } - - return prevType, nil -} - -// MergingMapsHaveConflicts returns true if the left and right JSON interface -// objects overlap with different values in any key. All keys are required to be -// strings. Since patches of the same Type have congruent keys, this is valid -// for multiple patch types. This method supports strategic merge patch semantics. -func MergingMapsHaveConflicts(left, right map[string]interface{}, schema LookupPatchMeta) (bool, error) { - return mergingMapFieldsHaveConflicts(left, right, schema, "", "") -} - -func mergingMapFieldsHaveConflicts( - left, right interface{}, - schema LookupPatchMeta, - fieldPatchStrategy, fieldPatchMergeKey string, -) (bool, error) { - switch leftType := left.(type) { - case map[string]interface{}: - rightType, ok := right.(map[string]interface{}) - if !ok { - return true, nil - } - leftMarker, okLeft := leftType[directiveMarker] - rightMarker, okRight := rightType[directiveMarker] - // if one or the other has a directive marker, - // then we need to consider that before looking at the individual keys, - // since a directive operates on the whole map. - if okLeft || okRight { - // if one has a directive marker and the other doesn't, - // then we have a conflict, since one is deleting or replacing the whole map, - // and the other is doing things to individual keys. - if okLeft != okRight { - return true, nil - } - // if they both have markers, but they are not the same directive, - // then we have a conflict because they're doing different things to the map. - if leftMarker != rightMarker { - return true, nil - } - } - if fieldPatchStrategy == replaceDirective { - return false, nil - } - // Check the individual keys. - return mapsHaveConflicts(leftType, rightType, schema) - - case []interface{}: - rightType, ok := right.([]interface{}) - if !ok { - return true, nil - } - return slicesHaveConflicts(leftType, rightType, schema, fieldPatchStrategy, fieldPatchMergeKey) - case string, float64, bool, int64, nil: - return !reflect.DeepEqual(left, right), nil - default: - return true, fmt.Errorf("unknown type: %v", reflect.TypeOf(left)) - } -} - -func mapsHaveConflicts(typedLeft, typedRight map[string]interface{}, schema LookupPatchMeta) (bool, error) { - for key, leftValue := range typedLeft { - if key != directiveMarker && key != retainKeysDirective { - if rightValue, ok := typedRight[key]; ok { - var subschema LookupPatchMeta - var patchMeta PatchMeta - var patchStrategy string - var err error - switch leftValue.(type) { - case []interface{}: - subschema, patchMeta, err = schema.LookupPatchMetadataForSlice(key) - if err != nil { - return true, err - } - _, patchStrategy, err = extractRetainKeysPatchStrategy(patchMeta.patchStrategies) - if err != nil { - return true, err - } - case map[string]interface{}: - subschema, patchMeta, err = schema.LookupPatchMetadataForStruct(key) - if err != nil { - return true, err - } - _, patchStrategy, err = extractRetainKeysPatchStrategy(patchMeta.patchStrategies) - if err != nil { - return true, err - } - } - - if hasConflicts, err := mergingMapFieldsHaveConflicts(leftValue, rightValue, - subschema, patchStrategy, patchMeta.GetPatchMergeKey()); hasConflicts { - return true, err - } - } - } - } - - return false, nil -} - -func slicesHaveConflicts( - typedLeft, typedRight []interface{}, - schema LookupPatchMeta, - fieldPatchStrategy, fieldPatchMergeKey string, -) (bool, error) { - elementType, err := sliceElementType(typedLeft, typedRight) - if err != nil { - return true, err - } - - if fieldPatchStrategy == mergeDirective { - // Merging lists of scalars have no conflicts by definition - // So we only need to check further if the elements are maps - if elementType.Kind() != reflect.Map { - return false, nil - } - - // Build a map for each slice and then compare the two maps - leftMap, err := sliceOfMapsToMapOfMaps(typedLeft, fieldPatchMergeKey) - if err != nil { - return true, err - } - - rightMap, err := sliceOfMapsToMapOfMaps(typedRight, fieldPatchMergeKey) - if err != nil { - return true, err - } - - return mapsOfMapsHaveConflicts(leftMap, rightMap, schema) - } - - // Either we don't have type information, or these are non-merging lists - if len(typedLeft) != len(typedRight) { - return true, nil - } - - // Sort scalar slices to prevent ordering issues - // We have no way to sort non-merging lists of maps - if elementType.Kind() != reflect.Map { - typedLeft = deduplicateAndSortScalars(typedLeft) - typedRight = deduplicateAndSortScalars(typedRight) - } - - // Compare the slices element by element in order - // This test will fail if the slices are not sorted - for i := range typedLeft { - if hasConflicts, err := mergingMapFieldsHaveConflicts(typedLeft[i], typedRight[i], schema, "", ""); hasConflicts { - return true, err - } - } - - return false, nil -} - -func sliceOfMapsToMapOfMaps(slice []interface{}, mergeKey string) (map[string]interface{}, error) { - result := make(map[string]interface{}, len(slice)) - for _, value := range slice { - typedValue, ok := value.(map[string]interface{}) - if !ok { - return nil, fmt.Errorf("invalid element type in merging list:%v", slice) - } - - mergeValue, ok := typedValue[mergeKey] - if !ok { - return nil, fmt.Errorf("cannot find merge key `%s` in merging list element:%v", mergeKey, typedValue) - } - - result[fmt.Sprintf("%s", mergeValue)] = typedValue - } - - return result, nil -} - -func mapsOfMapsHaveConflicts(typedLeft, typedRight map[string]interface{}, schema LookupPatchMeta) (bool, error) { - for key, leftValue := range typedLeft { - if rightValue, ok := typedRight[key]; ok { - if hasConflicts, err := mergingMapFieldsHaveConflicts(leftValue, rightValue, schema, "", ""); hasConflicts { - return true, err - } - } - } - - return false, nil -} - -// CreateThreeWayMergePatch reconciles a modified configuration with an original configuration, -// while preserving any changes or deletions made to the original configuration in the interim, -// and not overridden by the current configuration. All three documents must be passed to the -// method as json encoded content. It will return a strategic merge patch, or an error if any -// of the documents is invalid, or if there are any preconditions that fail against the modified -// configuration, or, if overwrite is false and there are conflicts between the modified and current -// configurations. Conflicts are defined as keys changed differently from original to modified -// than from original to current. In other words, a conflict occurs if modified changes any key -// in a way that is different from how it is changed in current (e.g., deleting it, changing its -// value). We also propagate values fields that do not exist in original but are explicitly -// defined in modified. -func CreateThreeWayMergePatch(original, modified, current []byte, schema LookupPatchMeta, overwrite bool, fns ...mergepatch.PreconditionFunc) ([]byte, error) { - originalMap := map[string]interface{}{} - if len(original) > 0 { - if err := json.Unmarshal(original, &originalMap); err != nil { - return nil, mergepatch.ErrBadJSONDoc - } - } - - modifiedMap := map[string]interface{}{} - if len(modified) > 0 { - if err := json.Unmarshal(modified, &modifiedMap); err != nil { - return nil, mergepatch.ErrBadJSONDoc - } - } - - currentMap := map[string]interface{}{} - if len(current) > 0 { - if err := json.Unmarshal(current, ¤tMap); err != nil { - return nil, mergepatch.ErrBadJSONDoc - } - } - - // The patch is the difference from current to modified without deletions, plus deletions - // from original to modified. To find it, we compute deletions, which are the deletions from - // original to modified, and delta, which is the difference from current to modified without - // deletions, and then apply delta to deletions as a patch, which should be strictly additive. - deltaMapDiffOptions := DiffOptions{ - IgnoreDeletions: true, - SetElementOrder: true, - } - deltaMap, err := diffMaps(currentMap, modifiedMap, schema, deltaMapDiffOptions) - if err != nil { - return nil, err - } - deletionsMapDiffOptions := DiffOptions{ - SetElementOrder: true, - IgnoreChangesAndAdditions: true, - } - deletionsMap, err := diffMaps(originalMap, modifiedMap, schema, deletionsMapDiffOptions) - if err != nil { - return nil, err - } - - mergeOptions := MergeOptions{} - patchMap, err := mergeMap(deletionsMap, deltaMap, schema, mergeOptions) - if err != nil { - return nil, err - } - - // Apply the preconditions to the patch, and return an error if any of them fail. - for _, fn := range fns { - if !fn(patchMap) { - return nil, mergepatch.NewErrPreconditionFailed(patchMap) - } - } - - // If overwrite is false, and the patch contains any keys that were changed differently, - // then return a conflict error. - if !overwrite { - changeMapDiffOptions := DiffOptions{} - changedMap, err := diffMaps(originalMap, currentMap, schema, changeMapDiffOptions) - if err != nil { - return nil, err - } - - hasConflicts, err := MergingMapsHaveConflicts(patchMap, changedMap, schema) - if err != nil { - return nil, err - } - - if hasConflicts { - return nil, mergepatch.NewErrConflict(mergepatch.ToYAMLOrError(patchMap), mergepatch.ToYAMLOrError(changedMap)) - } - } - - return json.Marshal(patchMap) -} - -func ItemAddedToModifiedSlice(original, modified string) bool { return original > modified } - -func ItemRemovedFromModifiedSlice(original, modified string) bool { return original < modified } - -func ItemMatchesOriginalAndModifiedSlice(original, modified string) bool { return original == modified } - -func CreateDeleteDirective(mergeKey string, mergeKeyValue interface{}) map[string]interface{} { - return map[string]interface{}{mergeKey: mergeKeyValue, directiveMarker: deleteDirective} -} - -func mapTypeAssertion(original, patch interface{}) (map[string]interface{}, map[string]interface{}, error) { - typedOriginal, ok := original.(map[string]interface{}) - if !ok { - return nil, nil, mergepatch.ErrBadArgType(typedOriginal, original) - } - typedPatch, ok := patch.(map[string]interface{}) - if !ok { - return nil, nil, mergepatch.ErrBadArgType(typedPatch, patch) - } - return typedOriginal, typedPatch, nil -} - -func sliceTypeAssertion(original, patch interface{}) ([]interface{}, []interface{}, error) { - typedOriginal, ok := original.([]interface{}) - if !ok { - return nil, nil, mergepatch.ErrBadArgType(typedOriginal, original) - } - typedPatch, ok := patch.([]interface{}) - if !ok { - return nil, nil, mergepatch.ErrBadArgType(typedPatch, patch) - } - return typedOriginal, typedPatch, nil -} - -// extractRetainKeysPatchStrategy process patch strategy, which is a string may contains multiple -// patch strategies separated by ",". It returns a boolean var indicating if it has -// retainKeys strategies and a string for the other strategy. -func extractRetainKeysPatchStrategy(strategies []string) (bool, string, error) { - switch len(strategies) { - case 0: - return false, "", nil - case 1: - singleStrategy := strategies[0] - switch singleStrategy { - case retainKeysStrategy: - return true, "", nil - default: - return false, singleStrategy, nil - } - case 2: - switch { - case strategies[0] == retainKeysStrategy: - return true, strategies[1], nil - case strategies[1] == retainKeysStrategy: - return true, strategies[0], nil - default: - return false, "", fmt.Errorf("unexpected patch strategy: %v", strategies) - } - default: - return false, "", fmt.Errorf("unexpected patch strategy: %v", strategies) - } -} - -// hasAdditionalNewField returns if original map has additional key with non-nil value than modified. -func hasAdditionalNewField(original, modified map[string]interface{}) bool { - for k, v := range original { - if v == nil { - continue - } - if _, found := modified[k]; !found { - return true - } - } - return false -} diff --git a/pkg/kutil/strategicpatch/types.go b/pkg/kutil/strategicpatch/types.go deleted file mode 100644 index f84d65a..0000000 --- a/pkg/kutil/strategicpatch/types.go +++ /dev/null @@ -1,193 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package strategicpatch - -import ( - "errors" - "strings" - - "k8s.io/apimachinery/pkg/util/mergepatch" - openapi "k8s.io/kube-openapi/pkg/util/proto" -) - -const ( - patchStrategyOpenapiextensionKey = "x-kubernetes-patch-strategy" - patchMergeKeyOpenapiextensionKey = "x-kubernetes-patch-merge-key" -) - -type LookupPatchItem interface { - openapi.SchemaVisitor - - Error() error - Path() *openapi.Path -} - -type kindItem struct { - key string - path *openapi.Path - err error - patchmeta PatchMeta - subschema openapi.Schema - hasVisitKind bool -} - -func NewKindItem(key string, path *openapi.Path) *kindItem { - return &kindItem{ - key: key, - path: path, - } -} - -var _ LookupPatchItem = &kindItem{} - -func (item *kindItem) Error() error { - return item.err -} - -func (item *kindItem) Path() *openapi.Path { - return item.path -} - -func (item *kindItem) VisitPrimitive(schema *openapi.Primitive) { - item.err = errors.New("expected kind, but got primitive") -} - -func (item *kindItem) VisitArray(schema *openapi.Array) { - item.err = errors.New("expected kind, but got slice") -} - -func (item *kindItem) VisitMap(schema *openapi.Map) { - item.err = errors.New("expected kind, but got map") -} - -func (item *kindItem) VisitReference(schema openapi.Reference) { - if !item.hasVisitKind { - schema.SubSchema().Accept(item) - } -} - -func (item *kindItem) VisitKind(schema *openapi.Kind) { - subschema, ok := schema.Fields[item.key] - if !ok { - item.err = FieldNotFoundError{Path: schema.GetPath().String(), Field: item.key} - return - } - - mergeKey, patchStrategies, err := parsePatchMetadata(subschema.GetExtensions()) - if err != nil { - item.err = err - return - } - item.patchmeta = PatchMeta{ - patchStrategies: patchStrategies, - patchMergeKey: mergeKey, - } - item.subschema = subschema -} - -type sliceItem struct { - key string - path *openapi.Path - err error - patchmeta PatchMeta - subschema openapi.Schema - hasVisitKind bool -} - -func NewSliceItem(key string, path *openapi.Path) *sliceItem { - return &sliceItem{ - key: key, - path: path, - } -} - -var _ LookupPatchItem = &sliceItem{} - -func (item *sliceItem) Error() error { - return item.err -} - -func (item *sliceItem) Path() *openapi.Path { - return item.path -} - -func (item *sliceItem) VisitPrimitive(schema *openapi.Primitive) { - item.err = errors.New("expected slice, but got primitive") -} - -func (item *sliceItem) VisitArray(schema *openapi.Array) { - if !item.hasVisitKind { - item.err = errors.New("expected visit kind first, then visit array") - } - subschema := schema.SubType - item.subschema = subschema -} - -func (item *sliceItem) VisitMap(schema *openapi.Map) { - item.err = errors.New("expected slice, but got map") -} - -func (item *sliceItem) VisitReference(schema openapi.Reference) { - if !item.hasVisitKind { - schema.SubSchema().Accept(item) - } else { - item.subschema = schema.SubSchema() - } -} - -func (item *sliceItem) VisitKind(schema *openapi.Kind) { - subschema, ok := schema.Fields[item.key] - if !ok { - item.err = FieldNotFoundError{Path: schema.GetPath().String(), Field: item.key} - return - } - - mergeKey, patchStrategies, err := parsePatchMetadata(subschema.GetExtensions()) - if err != nil { - item.err = err - return - } - item.patchmeta = PatchMeta{ - patchStrategies: patchStrategies, - patchMergeKey: mergeKey, - } - item.hasVisitKind = true - subschema.Accept(item) -} - -func parsePatchMetadata(extensions map[string]interface{}) (string, []string, error) { - ps, foundPS := extensions[patchStrategyOpenapiextensionKey] - var patchStrategies []string - var mergeKey, patchStrategy string - var ok bool - if foundPS { - patchStrategy, ok = ps.(string) - if ok { - patchStrategies = strings.Split(patchStrategy, ",") - } else { - return "", nil, mergepatch.ErrBadArgType(patchStrategy, ps) - } - } - mk, foundMK := extensions[patchMergeKeyOpenapiextensionKey] - if foundMK { - mergeKey, ok = mk.(string) - if !ok { - return "", nil, mergepatch.ErrBadArgType(mergeKey, mk) - } - } - return mergeKey, patchStrategies, nil -} diff --git a/pkg/kutil/validation/field/errors.go b/pkg/kutil/validation/field/errors.go deleted file mode 100644 index 0cd5d65..0000000 --- a/pkg/kutil/validation/field/errors.go +++ /dev/null @@ -1,272 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package field - -import ( - "fmt" - "reflect" - "strconv" - "strings" - - utilerrors "k8s.io/apimachinery/pkg/util/errors" - "k8s.io/apimachinery/pkg/util/sets" -) - -// Error is an implementation of the 'error' interface, which represents a -// field-level validation error. -type Error struct { - Type ErrorType - Field string - BadValue interface{} - Detail string -} - -var _ error = &Error{} - -// Error implements the error interface. -func (v *Error) Error() string { - return fmt.Sprintf("%s: %s", v.Field, v.ErrorBody()) -} - -// ErrorBody returns the error message without the field name. This is useful -// for building nice-looking higher-level error reporting. -func (v *Error) ErrorBody() string { - var s string - switch v.Type { - case ErrorTypeRequired, ErrorTypeForbidden, ErrorTypeTooLong, ErrorTypeInternal: - s = v.Type.String() - default: - value := v.BadValue - valueType := reflect.TypeOf(value) - if value == nil || valueType == nil { - value = "null" - } else if valueType.Kind() == reflect.Ptr { - if reflectValue := reflect.ValueOf(value); reflectValue.IsNil() { - value = "null" - } else { - value = reflectValue.Elem().Interface() - } - } - switch t := value.(type) { - case int64, int32, float64, float32, bool: - // use simple printer for simple types - s = fmt.Sprintf("%s: %v", v.Type, value) - case string: - s = fmt.Sprintf("%s: %q", v.Type, t) - case fmt.Stringer: - // anything that defines String() is better than raw struct - s = fmt.Sprintf("%s: %s", v.Type, t.String()) - default: - // fallback to raw struct - // TODO: internal types have panic guards against json.Marshalling to prevent - // accidental use of internal types in external serialized form. For now, use - // %#v, although it would be better to show a more expressive output in the future - s = fmt.Sprintf("%s: %#v", v.Type, value) - } - } - if len(v.Detail) != 0 { - s += fmt.Sprintf(": %s", v.Detail) - } - return s -} - -// ErrorType is a machine readable value providing more detail about why -// a field is invalid. These values are expected to match 1-1 with -// CauseType in api/types.go. -type ErrorType string - -// TODO: These values are duplicated in api/types.go, but there's a circular dep. Fix it. -const ( - // ErrorTypeNotFound is used to report failure to find a requested value - // (e.g. looking up an ID). See NotFound(). - ErrorTypeNotFound ErrorType = "FieldValueNotFound" - // ErrorTypeRequired is used to report required values that are not - // provided (e.g. empty strings, null values, or empty arrays). See - // Required(). - ErrorTypeRequired ErrorType = "FieldValueRequired" - // ErrorTypeDuplicate is used to report collisions of values that must be - // unique (e.g. unique IDs). See Duplicate(). - ErrorTypeDuplicate ErrorType = "FieldValueDuplicate" - // ErrorTypeInvalid is used to report malformed values (e.g. failed regex - // match, too long, out of bounds). See Invalid(). - ErrorTypeInvalid ErrorType = "FieldValueInvalid" - // ErrorTypeNotSupported is used to report unknown values for enumerated - // fields (e.g. a list of valid values). See NotSupported(). - ErrorTypeNotSupported ErrorType = "FieldValueNotSupported" - // ErrorTypeForbidden is used to report valid (as per formatting rules) - // values which would be accepted under some conditions, but which are not - // permitted by the current conditions (such as security policy). See - // Forbidden(). - ErrorTypeForbidden ErrorType = "FieldValueForbidden" - // ErrorTypeTooLong is used to report that the given value is too long. - // This is similar to ErrorTypeInvalid, but the error will not include the - // too-long value. See TooLong(). - ErrorTypeTooLong ErrorType = "FieldValueTooLong" - // ErrorTypeTooMany is used to report "too many". This is used to - // report that a given list has too many items. This is similar to FieldValueTooLong, - // but the error indicates quantity instead of length. - ErrorTypeTooMany ErrorType = "FieldValueTooMany" - // ErrorTypeInternal is used to report other errors that are not related - // to user input. See InternalError(). - ErrorTypeInternal ErrorType = "InternalError" -) - -// String converts a ErrorType into its corresponding canonical error message. -func (t ErrorType) String() string { - switch t { - case ErrorTypeNotFound: - return "Not found" - case ErrorTypeRequired: - return "Required value" - case ErrorTypeDuplicate: - return "Duplicate value" - case ErrorTypeInvalid: - return "Invalid value" - case ErrorTypeNotSupported: - return "Unsupported value" - case ErrorTypeForbidden: - return "Forbidden" - case ErrorTypeTooLong: - return "Too long" - case ErrorTypeTooMany: - return "Too many" - case ErrorTypeInternal: - return "Internal error" - default: - panic(fmt.Sprintf("unrecognized validation error: %q", string(t))) - } -} - -// NotFound returns a *Error indicating "value not found". This is -// used to report failure to find a requested value (e.g. looking up an ID). -func NotFound(field *Path, value interface{}) *Error { - return &Error{ErrorTypeNotFound, field.String(), value, ""} -} - -// Required returns a *Error indicating "value required". This is used -// to report required values that are not provided (e.g. empty strings, null -// values, or empty arrays). -func Required(field *Path, detail string) *Error { - return &Error{ErrorTypeRequired, field.String(), "", detail} -} - -// Duplicate returns a *Error indicating "duplicate value". This is -// used to report collisions of values that must be unique (e.g. names or IDs). -func Duplicate(field *Path, value interface{}) *Error { - return &Error{ErrorTypeDuplicate, field.String(), value, ""} -} - -// Invalid returns a *Error indicating "invalid value". This is used -// to report malformed values (e.g. failed regex match, too long, out of bounds). -func Invalid(field *Path, value interface{}, detail string) *Error { - return &Error{ErrorTypeInvalid, field.String(), value, detail} -} - -// NotSupported returns a *Error indicating "unsupported value". -// This is used to report unknown values for enumerated fields (e.g. a list of -// valid values). -func NotSupported(field *Path, value interface{}, validValues []string) *Error { - detail := "" - if validValues != nil && len(validValues) > 0 { - quotedValues := make([]string, len(validValues)) - for i, v := range validValues { - quotedValues[i] = strconv.Quote(v) - } - detail = "supported values: " + strings.Join(quotedValues, ", ") - } - return &Error{ErrorTypeNotSupported, field.String(), value, detail} -} - -// Forbidden returns a *Error indicating "forbidden". This is used to -// report valid (as per formatting rules) values which would be accepted under -// some conditions, but which are not permitted by current conditions (e.g. -// security policy). -func Forbidden(field *Path, detail string) *Error { - return &Error{ErrorTypeForbidden, field.String(), "", detail} -} - -// TooLong returns a *Error indicating "too long". This is used to -// report that the given value is too long. This is similar to -// Invalid, but the returned error will not include the too-long -// value. -func TooLong(field *Path, value interface{}, maxLength int) *Error { - return &Error{ErrorTypeTooLong, field.String(), value, fmt.Sprintf("must have at most %d bytes", maxLength)} -} - -// TooMany returns a *Error indicating "too many". This is used to -// report that a given list has too many items. This is similar to TooLong, -// but the returned error indicates quantity instead of length. -func TooMany(field *Path, actualQuantity, maxQuantity int) *Error { - return &Error{ErrorTypeTooMany, field.String(), actualQuantity, fmt.Sprintf("must have at most %d items", maxQuantity)} -} - -// InternalError returns a *Error indicating "internal error". This is used -// to signal that an error was found that was not directly related to user -// input. The err argument must be non-nil. -func InternalError(field *Path, err error) *Error { - return &Error{ErrorTypeInternal, field.String(), nil, err.Error()} -} - -// ErrorList holds a set of Errors. It is plausible that we might one day have -// non-field errors in this same umbrella package, but for now we don't, so -// we can keep it simple and leave ErrorList here. -type ErrorList []*Error - -// NewErrorTypeMatcher returns an errors.Matcher that returns true -// if the provided error is a Error and has the provided ErrorType. -func NewErrorTypeMatcher(t ErrorType) utilerrors.Matcher { - return func(err error) bool { - if e, ok := err.(*Error); ok { - return e.Type == t - } - return false - } -} - -// ToAggregate converts the ErrorList into an errors.Aggregate. -func (list ErrorList) ToAggregate() utilerrors.Aggregate { - errs := make([]error, 0, len(list)) - errorMsgs := sets.NewString() - for _, err := range list { - msg := fmt.Sprintf("%v", err) - if errorMsgs.Has(msg) { - continue - } - errorMsgs.Insert(msg) - errs = append(errs, err) - } - return utilerrors.NewAggregate(errs) -} - -func fromAggregate(agg utilerrors.Aggregate) ErrorList { - errs := agg.Errors() - list := make(ErrorList, len(errs)) - for i := range errs { - list[i] = errs[i].(*Error) - } - return list -} - -// Filter removes items from the ErrorList that match the provided fns. -func (list ErrorList) Filter(fns ...utilerrors.Matcher) ErrorList { - err := utilerrors.FilterOut(list.ToAggregate(), fns...) - if err == nil { - return nil - } - // FilterOut takes an Aggregate and returns an Aggregate - return fromAggregate(err.(utilerrors.Aggregate)) -} diff --git a/pkg/kutil/validation/field/path.go b/pkg/kutil/validation/field/path.go deleted file mode 100644 index 2efc8ee..0000000 --- a/pkg/kutil/validation/field/path.go +++ /dev/null @@ -1,91 +0,0 @@ -/* -Copyright 2015 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package field - -import ( - "bytes" - "fmt" - "strconv" -) - -// Path represents the path from some root to a particular field. -type Path struct { - name string // the name of this field or "" if this is an index - index string // if name == "", this is a subscript (index or map key) of the previous element - parent *Path // nil if this is the root element -} - -// NewPath creates a root Path object. -func NewPath(name string, moreNames ...string) *Path { - r := &Path{name: name, parent: nil} - for _, anotherName := range moreNames { - r = &Path{name: anotherName, parent: r} - } - return r -} - -// Root returns the root element of this Path. -func (p *Path) Root() *Path { - for ; p.parent != nil; p = p.parent { - // Do nothing. - } - return p -} - -// Child creates a new Path that is a child of the method receiver. -func (p *Path) Child(name string, moreNames ...string) *Path { - r := NewPath(name, moreNames...) - r.Root().parent = p - return r -} - -// Index indicates that the previous Path is to be subscripted by an int. -// This sets the same underlying value as Key. -func (p *Path) Index(index int) *Path { - return &Path{index: strconv.Itoa(index), parent: p} -} - -// Key indicates that the previous Path is to be subscripted by a string. -// This sets the same underlying value as Index. -func (p *Path) Key(key string) *Path { - return &Path{index: key, parent: p} -} - -// String produces a string representation of the Path. -func (p *Path) String() string { - // make a slice to iterate - elems := []*Path{} - for ; p != nil; p = p.parent { - elems = append(elems, p) - } - - // iterate, but it has to be backwards - buf := bytes.NewBuffer(nil) - for i := range elems { - p := elems[len(elems)-1-i] - if p.parent != nil && len(p.name) > 0 { - // This is either the root or it is a subscript. - buf.WriteString(".") - } - if len(p.name) > 0 { - buf.WriteString(p.name) - } else { - fmt.Fprintf(buf, "[%s]", p.index) - } - } - return buf.String() -} diff --git a/pkg/kutil/validation/validation.go b/pkg/kutil/validation/validation.go deleted file mode 100644 index 4752b29..0000000 --- a/pkg/kutil/validation/validation.go +++ /dev/null @@ -1,503 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package validation - -import ( - "fmt" - "math" - "net" - "regexp" - "strconv" - "strings" - - "k8s.io/apimachinery/pkg/util/validation/field" -) - -const qnameCharFmt string = "[A-Za-z0-9]" -const qnameExtCharFmt string = "[-A-Za-z0-9_.]" -const qualifiedNameFmt string = "(" + qnameCharFmt + qnameExtCharFmt + "*)?" + qnameCharFmt -const qualifiedNameErrMsg string = "must consist of alphanumeric characters, '-', '_' or '.', and must start and end with an alphanumeric character" -const qualifiedNameMaxLength int = 63 - -var qualifiedNameRegexp = regexp.MustCompile("^" + qualifiedNameFmt + "$") - -// IsQualifiedName tests whether the value passed is what Kubernetes calls a -// "qualified name". This is a format used in various places throughout the -// system. If the value is not valid, a list of error strings is returned. -// Otherwise an empty list (or nil) is returned. -func IsQualifiedName(value string) []string { - var errs []string - parts := strings.Split(value, "/") - var name string - switch len(parts) { - case 1: - name = parts[0] - case 2: - var prefix string - prefix, name = parts[0], parts[1] - if len(prefix) == 0 { - errs = append(errs, "prefix part "+EmptyError()) - } else if msgs := IsDNS1123Subdomain(prefix); len(msgs) != 0 { - errs = append(errs, prefixEach(msgs, "prefix part ")...) - } - default: - return append(errs, "a qualified name "+RegexError(qualifiedNameErrMsg, qualifiedNameFmt, "MyName", "my.name", "123-abc")+ - " with an optional DNS subdomain prefix and '/' (e.g. 'example.com/MyName')") - } - - if len(name) == 0 { - errs = append(errs, "name part "+EmptyError()) - } else if len(name) > qualifiedNameMaxLength { - errs = append(errs, "name part "+MaxLenError(qualifiedNameMaxLength)) - } - if !qualifiedNameRegexp.MatchString(name) { - errs = append(errs, "name part "+RegexError(qualifiedNameErrMsg, qualifiedNameFmt, "MyName", "my.name", "123-abc")) - } - return errs -} - -// IsFullyQualifiedName checks if the name is fully qualified. This is similar -// to IsFullyQualifiedDomainName but requires a minimum of 3 segments instead of -// 2 and does not accept a trailing . as valid. -// TODO: This function is deprecated and preserved until all callers migrate to -// IsFullyQualifiedDomainName; please don't add new callers. -func IsFullyQualifiedName(fldPath *field.Path, name string) field.ErrorList { - var allErrors field.ErrorList - if len(name) == 0 { - return append(allErrors, field.Required(fldPath, "")) - } - if errs := IsDNS1123Subdomain(name); len(errs) > 0 { - return append(allErrors, field.Invalid(fldPath, name, strings.Join(errs, ","))) - } - if len(strings.Split(name, ".")) < 3 { - return append(allErrors, field.Invalid(fldPath, name, "should be a domain with at least three segments separated by dots")) - } - return allErrors -} - -// IsFullyQualifiedDomainName checks if the domain name is fully qualified. This -// is similar to IsFullyQualifiedName but only requires a minimum of 2 segments -// instead of 3 and accepts a trailing . as valid. -func IsFullyQualifiedDomainName(fldPath *field.Path, name string) field.ErrorList { - var allErrors field.ErrorList - if len(name) == 0 { - return append(allErrors, field.Required(fldPath, "")) - } - if strings.HasSuffix(name, ".") { - name = name[:len(name)-1] - } - if errs := IsDNS1123Subdomain(name); len(errs) > 0 { - return append(allErrors, field.Invalid(fldPath, name, strings.Join(errs, ","))) - } - if len(strings.Split(name, ".")) < 2 { - return append(allErrors, field.Invalid(fldPath, name, "should be a domain with at least two segments separated by dots")) - } - for _, label := range strings.Split(name, ".") { - if errs := IsDNS1123Label(label); len(errs) > 0 { - return append(allErrors, field.Invalid(fldPath, label, strings.Join(errs, ","))) - } - } - return allErrors -} - -// Allowed characters in an HTTP Path as defined by RFC 3986. A HTTP path may -// contain: -// * unreserved characters (alphanumeric, '-', '.', '_', '~') -// * percent-encoded octets -// * sub-delims ("!", "$", "&", "'", "(", ")", "*", "+", ",", ";", "=") -// * a colon character (":") -const httpPathFmt string = `[A-Za-z0-9/\-._~%!$&'()*+,;=:]+` - -var httpPathRegexp = regexp.MustCompile("^" + httpPathFmt + "$") - -// IsDomainPrefixedPath checks if the given string is a domain-prefixed path -// (e.g. acme.io/foo). All characters before the first "/" must be a valid -// subdomain as defined by RFC 1123. All characters trailing the first "/" must -// be valid HTTP Path characters as defined by RFC 3986. -func IsDomainPrefixedPath(fldPath *field.Path, dpPath string) field.ErrorList { - var allErrs field.ErrorList - if len(dpPath) == 0 { - return append(allErrs, field.Required(fldPath, "")) - } - - segments := strings.SplitN(dpPath, "/", 2) - if len(segments) != 2 || len(segments[0]) == 0 || len(segments[1]) == 0 { - return append(allErrs, field.Invalid(fldPath, dpPath, "must be a domain-prefixed path (such as \"acme.io/foo\")")) - } - - host := segments[0] - for _, err := range IsDNS1123Subdomain(host) { - allErrs = append(allErrs, field.Invalid(fldPath, host, err)) - } - - path := segments[1] - if !httpPathRegexp.MatchString(path) { - return append(allErrs, field.Invalid(fldPath, path, RegexError("Invalid path", httpPathFmt))) - } - - return allErrs -} - -const labelValueFmt string = "(" + qualifiedNameFmt + ")?" -const labelValueErrMsg string = "a valid label must be an empty string or consist of alphanumeric characters, '-', '_' or '.', and must start and end with an alphanumeric character" - -// LabelValueMaxLength is a label's max length -const LabelValueMaxLength int = 63 - -var labelValueRegexp = regexp.MustCompile("^" + labelValueFmt + "$") - -// IsValidLabelValue tests whether the value passed is a valid label value. If -// the value is not valid, a list of error strings is returned. Otherwise an -// empty list (or nil) is returned. -func IsValidLabelValue(value string) []string { - var errs []string - if len(value) > LabelValueMaxLength { - errs = append(errs, MaxLenError(LabelValueMaxLength)) - } - if !labelValueRegexp.MatchString(value) { - errs = append(errs, RegexError(labelValueErrMsg, labelValueFmt, "MyValue", "my_value", "12345")) - } - return errs -} - -const dns1123LabelFmt string = "[a-z0-9]([-a-z0-9]*[a-z0-9])?" -const dns1123LabelErrMsg string = "a DNS-1123 label must consist of lower case alphanumeric characters or '-', and must start and end with an alphanumeric character" - -// DNS1123LabelMaxLength is a label's max length in DNS (RFC 1123) -const DNS1123LabelMaxLength int = 63 - -var dns1123LabelRegexp = regexp.MustCompile("^" + dns1123LabelFmt + "$") - -// IsDNS1123Label tests for a string that conforms to the definition of a label in -// DNS (RFC 1123). -func IsDNS1123Label(value string) []string { - var errs []string - if len(value) > DNS1123LabelMaxLength { - errs = append(errs, MaxLenError(DNS1123LabelMaxLength)) - } - if !dns1123LabelRegexp.MatchString(value) { - errs = append(errs, RegexError(dns1123LabelErrMsg, dns1123LabelFmt, "my-name", "123-abc")) - } - return errs -} - -const dns1123SubdomainFmt string = dns1123LabelFmt + "(\\." + dns1123LabelFmt + ")*" -const dns1123SubdomainErrorMsg string = "a DNS-1123 subdomain must consist of lower case alphanumeric characters, '-' or '.', and must start and end with an alphanumeric character" - -// DNS1123SubdomainMaxLength is a subdomain's max length in DNS (RFC 1123) -const DNS1123SubdomainMaxLength int = 253 - -var dns1123SubdomainRegexp = regexp.MustCompile("^" + dns1123SubdomainFmt + "$") - -// IsDNS1123Subdomain tests for a string that conforms to the definition of a -// subdomain in DNS (RFC 1123). -func IsDNS1123Subdomain(value string) []string { - var errs []string - if len(value) > DNS1123SubdomainMaxLength { - errs = append(errs, MaxLenError(DNS1123SubdomainMaxLength)) - } - if !dns1123SubdomainRegexp.MatchString(value) { - errs = append(errs, RegexError(dns1123SubdomainErrorMsg, dns1123SubdomainFmt, "example.com")) - } - return errs -} - -const dns1035LabelFmt string = "[a-z]([-a-z0-9]*[a-z0-9])?" -const dns1035LabelErrMsg string = "a DNS-1035 label must consist of lower case alphanumeric characters or '-', start with an alphabetic character, and end with an alphanumeric character" - -// DNS1035LabelMaxLength is a label's max length in DNS (RFC 1035) -const DNS1035LabelMaxLength int = 63 - -var dns1035LabelRegexp = regexp.MustCompile("^" + dns1035LabelFmt + "$") - -// IsDNS1035Label tests for a string that conforms to the definition of a label in -// DNS (RFC 1035). -func IsDNS1035Label(value string) []string { - var errs []string - if len(value) > DNS1035LabelMaxLength { - errs = append(errs, MaxLenError(DNS1035LabelMaxLength)) - } - if !dns1035LabelRegexp.MatchString(value) { - errs = append(errs, RegexError(dns1035LabelErrMsg, dns1035LabelFmt, "my-name", "abc-123")) - } - return errs -} - -// wildcard definition - RFC 1034 section 4.3.3. -// examples: -// - valid: *.bar.com, *.foo.bar.com -// - invalid: *.*.bar.com, *.foo.*.com, *bar.com, f*.bar.com, * -const wildcardDNS1123SubdomainFmt = "\\*\\." + dns1123SubdomainFmt -const wildcardDNS1123SubdomainErrMsg = "a wildcard DNS-1123 subdomain must start with '*.', followed by a valid DNS subdomain, which must consist of lower case alphanumeric characters, '-' or '.' and end with an alphanumeric character" - -// IsWildcardDNS1123Subdomain tests for a string that conforms to the definition of a -// wildcard subdomain in DNS (RFC 1034 section 4.3.3). -func IsWildcardDNS1123Subdomain(value string) []string { - wildcardDNS1123SubdomainRegexp := regexp.MustCompile("^" + wildcardDNS1123SubdomainFmt + "$") - - var errs []string - if len(value) > DNS1123SubdomainMaxLength { - errs = append(errs, MaxLenError(DNS1123SubdomainMaxLength)) - } - if !wildcardDNS1123SubdomainRegexp.MatchString(value) { - errs = append(errs, RegexError(wildcardDNS1123SubdomainErrMsg, wildcardDNS1123SubdomainFmt, "*.example.com")) - } - return errs -} - -const cIdentifierFmt string = "[A-Za-z_][A-Za-z0-9_]*" -const identifierErrMsg string = "a valid C identifier must start with alphabetic character or '_', followed by a string of alphanumeric characters or '_'" - -var cIdentifierRegexp = regexp.MustCompile("^" + cIdentifierFmt + "$") - -// IsCIdentifier tests for a string that conforms the definition of an identifier -// in C. This checks the format, but not the length. -func IsCIdentifier(value string) []string { - if !cIdentifierRegexp.MatchString(value) { - return []string{RegexError(identifierErrMsg, cIdentifierFmt, "my_name", "MY_NAME", "MyName")} - } - return nil -} - -// IsValidPortNum tests that the argument is a valid, non-zero port number. -func IsValidPortNum(port int) []string { - if 1 <= port && port <= 65535 { - return nil - } - return []string{InclusiveRangeError(1, 65535)} -} - -// IsInRange tests that the argument is in an inclusive range. -func IsInRange(value int, min int, max int) []string { - if value >= min && value <= max { - return nil - } - return []string{InclusiveRangeError(min, max)} -} - -// Now in libcontainer UID/GID limits is 0 ~ 1<<31 - 1 -// TODO: once we have a type for UID/GID we should make these that type. -const ( - minUserID = 0 - maxUserID = math.MaxInt32 - minGroupID = 0 - maxGroupID = math.MaxInt32 -) - -// IsValidGroupID tests that the argument is a valid Unix GID. -func IsValidGroupID(gid int64) []string { - if minGroupID <= gid && gid <= maxGroupID { - return nil - } - return []string{InclusiveRangeError(minGroupID, maxGroupID)} -} - -// IsValidUserID tests that the argument is a valid Unix UID. -func IsValidUserID(uid int64) []string { - if minUserID <= uid && uid <= maxUserID { - return nil - } - return []string{InclusiveRangeError(minUserID, maxUserID)} -} - -var portNameCharsetRegex = regexp.MustCompile("^[-a-z0-9]+$") -var portNameOneLetterRegexp = regexp.MustCompile("[a-z]") - -// IsValidPortName check that the argument is valid syntax. It must be -// non-empty and no more than 15 characters long. It may contain only [-a-z0-9] -// and must contain at least one letter [a-z]. It must not start or end with a -// hyphen, nor contain adjacent hyphens. -// -// Note: We only allow lower-case characters, even though RFC 6335 is case -// insensitive. -func IsValidPortName(port string) []string { - var errs []string - if len(port) > 15 { - errs = append(errs, MaxLenError(15)) - } - if !portNameCharsetRegex.MatchString(port) { - errs = append(errs, "must contain only alpha-numeric characters (a-z, 0-9), and hyphens (-)") - } - if !portNameOneLetterRegexp.MatchString(port) { - errs = append(errs, "must contain at least one letter or number (a-z, 0-9)") - } - if strings.Contains(port, "--") { - errs = append(errs, "must not contain consecutive hyphens") - } - if len(port) > 0 && (port[0] == '-' || port[len(port)-1] == '-') { - errs = append(errs, "must not begin or end with a hyphen") - } - return errs -} - -// IsValidIP tests that the argument is a valid IP address. -func IsValidIP(value string) []string { - if net.ParseIP(value) == nil { - return []string{"must be a valid IP address, (e.g. 10.9.8.7)"} - } - return nil -} - -// IsValidIPv4Address tests that the argument is a valid IPv4 address. -func IsValidIPv4Address(fldPath *field.Path, value string) field.ErrorList { - var allErrors field.ErrorList - ip := net.ParseIP(value) - if ip == nil || ip.To4() == nil { - allErrors = append(allErrors, field.Invalid(fldPath, value, "must be a valid IPv4 address")) - } - return allErrors -} - -// IsValidIPv6Address tests that the argument is a valid IPv6 address. -func IsValidIPv6Address(fldPath *field.Path, value string) field.ErrorList { - var allErrors field.ErrorList - ip := net.ParseIP(value) - if ip == nil || ip.To4() != nil { - allErrors = append(allErrors, field.Invalid(fldPath, value, "must be a valid IPv6 address")) - } - return allErrors -} - -const percentFmt string = "[0-9]+%" -const percentErrMsg string = "a valid percent string must be a numeric string followed by an ending '%'" - -var percentRegexp = regexp.MustCompile("^" + percentFmt + "$") - -// IsValidPercent checks that string is in the form of a percentage -func IsValidPercent(percent string) []string { - if !percentRegexp.MatchString(percent) { - return []string{RegexError(percentErrMsg, percentFmt, "1%", "93%")} - } - return nil -} - -const httpHeaderNameFmt string = "[-A-Za-z0-9]+" -const httpHeaderNameErrMsg string = "a valid HTTP header must consist of alphanumeric characters or '-'" - -var httpHeaderNameRegexp = regexp.MustCompile("^" + httpHeaderNameFmt + "$") - -// IsHTTPHeaderName checks that a string conforms to the Go HTTP library's -// definition of a valid header field name (a stricter subset than RFC7230). -func IsHTTPHeaderName(value string) []string { - if !httpHeaderNameRegexp.MatchString(value) { - return []string{RegexError(httpHeaderNameErrMsg, httpHeaderNameFmt, "X-Header-Name")} - } - return nil -} - -const envVarNameFmt = "[-._a-zA-Z][-._a-zA-Z0-9]*" -const envVarNameFmtErrMsg string = "a valid environment variable name must consist of alphabetic characters, digits, '_', '-', or '.', and must not start with a digit" - -var envVarNameRegexp = regexp.MustCompile("^" + envVarNameFmt + "$") - -// IsEnvVarName tests if a string is a valid environment variable name. -func IsEnvVarName(value string) []string { - var errs []string - if !envVarNameRegexp.MatchString(value) { - errs = append(errs, RegexError(envVarNameFmtErrMsg, envVarNameFmt, "my.env-name", "MY_ENV.NAME", "MyEnvName1")) - } - - errs = append(errs, hasChDirPrefix(value)...) - return errs -} - -const configMapKeyFmt = `[-._a-zA-Z0-9]+` -const configMapKeyErrMsg string = "a valid config key must consist of alphanumeric characters, '-', '_' or '.'" - -var configMapKeyRegexp = regexp.MustCompile("^" + configMapKeyFmt + "$") - -// IsConfigMapKey tests for a string that is a valid key for a ConfigMap or Secret -func IsConfigMapKey(value string) []string { - var errs []string - if len(value) > DNS1123SubdomainMaxLength { - errs = append(errs, MaxLenError(DNS1123SubdomainMaxLength)) - } - if !configMapKeyRegexp.MatchString(value) { - errs = append(errs, RegexError(configMapKeyErrMsg, configMapKeyFmt, "key.name", "KEY_NAME", "key-name")) - } - errs = append(errs, hasChDirPrefix(value)...) - return errs -} - -// MaxLenError returns a string explanation of a "string too long" validation -// failure. -func MaxLenError(length int) string { - return fmt.Sprintf("must be no more than %d characters", length) -} - -// RegexError returns a string explanation of a regex validation failure. -func RegexError(msg string, fmt string, examples ...string) string { - if len(examples) == 0 { - return msg + " (regex used for validation is '" + fmt + "')" - } - msg += " (e.g. " - for i := range examples { - if i > 0 { - msg += " or " - } - msg += "'" + examples[i] + "', " - } - msg += "regex used for validation is '" + fmt + "')" - return msg -} - -// EmptyError returns a string explanation of a "must not be empty" validation -// failure. -func EmptyError() string { - return "must be non-empty" -} - -func prefixEach(msgs []string, prefix string) []string { - for i := range msgs { - msgs[i] = prefix + msgs[i] - } - return msgs -} - -// InclusiveRangeError returns a string explanation of a numeric "must be -// between" validation failure. -func InclusiveRangeError(lo, hi int) string { - return fmt.Sprintf(`must be between %d and %d, inclusive`, lo, hi) -} - -func hasChDirPrefix(value string) []string { - var errs []string - switch { - case value == ".": - errs = append(errs, `must not be '.'`) - case value == "..": - errs = append(errs, `must not be '..'`) - case strings.HasPrefix(value, ".."): - errs = append(errs, `must not start with '..'`) - } - return errs -} - -// IsValidSocketAddr checks that string represents a valid socket address -// as defined in RFC 789. (e.g 0.0.0.0:10254 or [::]:10254)) -func IsValidSocketAddr(value string) []string { - var errs []string - ip, port, err := net.SplitHostPort(value) - if err != nil { - errs = append(errs, "must be a valid socket address format, (e.g. 0.0.0.0:10254 or [::]:10254)") - return errs - } - portInt, _ := strconv.Atoi(port) - errs = append(errs, IsValidPortNum(portInt)...) - errs = append(errs, IsValidIP(ip)...) - return errs -} diff --git a/pkg/kutil/wait/doc.go b/pkg/kutil/wait/doc.go deleted file mode 100644 index 3f0c968..0000000 --- a/pkg/kutil/wait/doc.go +++ /dev/null @@ -1,19 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Package wait provides tools for polling or listening for changes -// to a condition. -package wait // import "k8s.io/apimachinery/pkg/util/wait" diff --git a/pkg/kutil/wait/wait.go b/pkg/kutil/wait/wait.go deleted file mode 100644 index 1c95dbc..0000000 --- a/pkg/kutil/wait/wait.go +++ /dev/null @@ -1,606 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package wait - -import ( - "context" - "errors" - "math" - "math/rand" - "sync" - "time" - - "gitlab.oneitfarm.com/bifrost/capitalizone/pkg/kutil/clock" - "gitlab.oneitfarm.com/bifrost/capitalizone/pkg/kutil/runtime" -) - -// For any test of the style: -// ... -// <- time.After(timeout): -// t.Errorf("Timed out") -// The value for timeout should effectively be "forever." Obviously we don't want our tests to truly lock up forever, but 30s -// is long enough that it is effectively forever for the things that can slow down a run on a heavily contended machine -// (GC, seeks, etc), but not so long as to make a developer ctrl-c a test run if they do happen to break that test. -var ForeverTestTimeout = time.Second * 30 - -// NeverStop may be passed to Until to make it never stop. -var NeverStop <-chan struct{} = make(chan struct{}) - -// Group allows to start a group of goroutines and wait for their completion. -type Group struct { - wg sync.WaitGroup -} - -func (g *Group) Wait() { - g.wg.Wait() -} - -// StartWithChannel starts f in a new goroutine in the group. -// stopCh is passed to f as an argument. f should stop when stopCh is available. -func (g *Group) StartWithChannel(stopCh <-chan struct{}, f func(stopCh <-chan struct{})) { - g.Start(func() { - f(stopCh) - }) -} - -// StartWithContext starts f in a new goroutine in the group. -// ctx is passed to f as an argument. f should stop when ctx.Done() is available. -func (g *Group) StartWithContext(ctx context.Context, f func(context.Context)) { - g.Start(func() { - f(ctx) - }) -} - -// Start starts f in a new goroutine in the group. -func (g *Group) Start(f func()) { - g.wg.Add(1) - go func() { - defer g.wg.Done() - f() - }() -} - -// Forever calls f every period for ever. -// -// Forever is syntactic sugar on top of Until. -func Forever(f func(), period time.Duration) { - Until(f, period, NeverStop) -} - -// Until loops until stop channel is closed, running f every period. -// -// Until is syntactic sugar on top of JitterUntil with zero jitter factor and -// with sliding = true (which means the timer for period starts after the f -// completes). -func Until(f func(), period time.Duration, stopCh <-chan struct{}) { - JitterUntil(f, period, 0.0, true, stopCh) -} - -// UntilWithContext loops until context is done, running f every period. -// -// UntilWithContext is syntactic sugar on top of JitterUntilWithContext -// with zero jitter factor and with sliding = true (which means the timer -// for period starts after the f completes). -func UntilWithContext(ctx context.Context, f func(context.Context), period time.Duration) { - JitterUntilWithContext(ctx, f, period, 0.0, true) -} - -// NonSlidingUntil loops until stop channel is closed, running f every -// period. -// -// NonSlidingUntil is syntactic sugar on top of JitterUntil with zero jitter -// factor, with sliding = false (meaning the timer for period starts at the same -// time as the function starts). -func NonSlidingUntil(f func(), period time.Duration, stopCh <-chan struct{}) { - JitterUntil(f, period, 0.0, false, stopCh) -} - -// NonSlidingUntilWithContext loops until context is done, running f every -// period. -// -// NonSlidingUntilWithContext is syntactic sugar on top of JitterUntilWithContext -// with zero jitter factor, with sliding = false (meaning the timer for period -// starts at the same time as the function starts). -func NonSlidingUntilWithContext(ctx context.Context, f func(context.Context), period time.Duration) { - JitterUntilWithContext(ctx, f, period, 0.0, false) -} - -// JitterUntil loops until stop channel is closed, running f every period. -// -// If jitterFactor is positive, the period is jittered before every run of f. -// If jitterFactor is not positive, the period is unchanged and not jittered. -// -// If sliding is true, the period is computed after f runs. If it is false then -// period includes the runtime for f. -// -// Close stopCh to stop. f may not be invoked if stop channel is already -// closed. Pass NeverStop to if you don't want it stop. -func JitterUntil(f func(), period time.Duration, jitterFactor float64, sliding bool, stopCh <-chan struct{}) { - BackoffUntil(f, NewJitteredBackoffManager(period, jitterFactor, &clock.RealClock{}), sliding, stopCh) -} - -// BackoffUntil loops until stop channel is closed, run f every duration given by BackoffManager. -// -// If sliding is true, the period is computed after f runs. If it is false then -// period includes the runtime for f. -func BackoffUntil(f func(), backoff BackoffManager, sliding bool, stopCh <-chan struct{}) { - var t clock.Timer - for { - select { - case <-stopCh: - return - default: - } - - if !sliding { - t = backoff.Backoff() - } - - func() { - defer runtime.HandleCrash() - f() - }() - - if sliding { - t = backoff.Backoff() - } - - // NOTE: b/c there is no priority selection in golang - // it is possible for this to race, meaning we could - // trigger t.C and stopCh, and t.C select falls through. - // In order to mitigate we re-check stopCh at the beginning - // of every loop to prevent extra executions of f(). - select { - case <-stopCh: - return - case <-t.C(): - } - } -} - -// JitterUntilWithContext loops until context is done, running f every period. -// -// If jitterFactor is positive, the period is jittered before every run of f. -// If jitterFactor is not positive, the period is unchanged and not jittered. -// -// If sliding is true, the period is computed after f runs. If it is false then -// period includes the runtime for f. -// -// Cancel context to stop. f may not be invoked if context is already expired. -func JitterUntilWithContext(ctx context.Context, f func(context.Context), period time.Duration, jitterFactor float64, sliding bool) { - JitterUntil(func() { f(ctx) }, period, jitterFactor, sliding, ctx.Done()) -} - -// Jitter returns a time.Duration between duration and duration + maxFactor * -// duration. -// -// This allows clients to avoid converging on periodic behavior. If maxFactor -// is 0.0, a suggested default value will be chosen. -func Jitter(duration time.Duration, maxFactor float64) time.Duration { - if maxFactor <= 0.0 { - maxFactor = 1.0 - } - wait := duration + time.Duration(rand.Float64()*maxFactor*float64(duration)) - return wait -} - -// ErrWaitTimeout is returned when the condition exited without success. -var ErrWaitTimeout = errors.New("timed out waiting for the condition") - -// ConditionFunc returns true if the condition is satisfied, or an error -// if the loop should be aborted. -type ConditionFunc func() (done bool, err error) - -// runConditionWithCrashProtection runs a ConditionFunc with crash protection -func runConditionWithCrashProtection(condition ConditionFunc) (bool, error) { - defer runtime.HandleCrash() - return condition() -} - -// Backoff holds parameters applied to a Backoff function. -type Backoff struct { - // The initial duration. - Duration time.Duration - // Duration is multiplied by factor each iteration, if factor is not zero - // and the limits imposed by Steps and Cap have not been reached. - // Should not be negative. - // The jitter does not contribute to the updates to the duration parameter. - Factor float64 - // The sleep at each iteration is the duration plus an additional - // amount chosen uniformly at random from the interval between - // zero and `jitter*duration`. - Jitter float64 - // The remaining number of iterations in which the duration - // parameter may change (but progress can be stopped earlier by - // hitting the cap). If not positive, the duration is not - // changed. Used for exponential backoff in combination with - // Factor and Cap. - Steps int - // A limit on revised values of the duration parameter. If a - // multiplication by the factor parameter would make the duration - // exceed the cap then the duration is set to the cap and the - // steps parameter is set to zero. - Cap time.Duration -} - -// Step (1) returns an amount of time to sleep determined by the -// original Duration and Jitter and (2) mutates the provided Backoff -// to update its Steps and Duration. -func (b *Backoff) Step() time.Duration { - if b.Steps < 1 { - if b.Jitter > 0 { - return Jitter(b.Duration, b.Jitter) - } - return b.Duration - } - b.Steps-- - - duration := b.Duration - - // calculate the next step - if b.Factor != 0 { - b.Duration = time.Duration(float64(b.Duration) * b.Factor) - if b.Cap > 0 && b.Duration > b.Cap { - b.Duration = b.Cap - b.Steps = 0 - } - } - - if b.Jitter > 0 { - duration = Jitter(duration, b.Jitter) - } - return duration -} - -// contextForChannel derives a child context from a parent channel. -// -// The derived context's Done channel is closed when the returned cancel function -// is called or when the parent channel is closed, whichever happens first. -// -// Note the caller must *always* call the CancelFunc, otherwise resources may be leaked. -func contextForChannel(parentCh <-chan struct{}) (context.Context, context.CancelFunc) { - ctx, cancel := context.WithCancel(context.Background()) - - go func() { - select { - case <-parentCh: - cancel() - case <-ctx.Done(): - } - }() - return ctx, cancel -} - -// BackoffManager manages backoff with a particular scheme based on its underlying implementation. It provides -// an interface to return a timer for backoff, and caller shall backoff until Timer.C() drains. If the second Backoff() -// is called before the timer from the first Backoff() call finishes, the first timer will NOT be drained and result in -// undetermined behavior. -// The BackoffManager is supposed to be called in a single-threaded environment. -type BackoffManager interface { - Backoff() clock.Timer -} - -type exponentialBackoffManagerImpl struct { - backoff *Backoff - backoffTimer clock.Timer - lastBackoffStart time.Time - initialBackoff time.Duration - backoffResetDuration time.Duration - clock clock.Clock -} - -// NewExponentialBackoffManager returns a manager for managing exponential backoff. Each backoff is jittered and -// backoff will not exceed the given max. If the backoff is not called within resetDuration, the backoff is reset. -// This backoff manager is used to reduce load during upstream unhealthiness. -func NewExponentialBackoffManager(initBackoff, maxBackoff, resetDuration time.Duration, backoffFactor, jitter float64, c clock.Clock) BackoffManager { - return &exponentialBackoffManagerImpl{ - backoff: &Backoff{ - Duration: initBackoff, - Factor: backoffFactor, - Jitter: jitter, - - // the current impl of wait.Backoff returns Backoff.Duration once steps are used up, which is not - // what we ideally need here, we set it to max int and assume we will never use up the steps - Steps: math.MaxInt32, - Cap: maxBackoff, - }, - backoffTimer: nil, - initialBackoff: initBackoff, - lastBackoffStart: c.Now(), - backoffResetDuration: resetDuration, - clock: c, - } -} - -func (b *exponentialBackoffManagerImpl) getNextBackoff() time.Duration { - if b.clock.Now().Sub(b.lastBackoffStart) > b.backoffResetDuration { - b.backoff.Steps = math.MaxInt32 - b.backoff.Duration = b.initialBackoff - } - b.lastBackoffStart = b.clock.Now() - return b.backoff.Step() -} - -// Backoff implements BackoffManager.Backoff, it returns a timer so caller can block on the timer for exponential backoff. -// The returned timer must be drained before calling Backoff() the second time -func (b *exponentialBackoffManagerImpl) Backoff() clock.Timer { - if b.backoffTimer == nil { - b.backoffTimer = b.clock.NewTimer(b.getNextBackoff()) - } else { - b.backoffTimer.Reset(b.getNextBackoff()) - } - return b.backoffTimer -} - -type jitteredBackoffManagerImpl struct { - clock clock.Clock - duration time.Duration - jitter float64 - backoffTimer clock.Timer -} - -// NewJitteredBackoffManager returns a BackoffManager that backoffs with given duration plus given jitter. If the jitter -// is negative, backoff will not be jittered. -func NewJitteredBackoffManager(duration time.Duration, jitter float64, c clock.Clock) BackoffManager { - return &jitteredBackoffManagerImpl{ - clock: c, - duration: duration, - jitter: jitter, - backoffTimer: nil, - } -} - -func (j *jitteredBackoffManagerImpl) getNextBackoff() time.Duration { - jitteredPeriod := j.duration - if j.jitter > 0.0 { - jitteredPeriod = Jitter(j.duration, j.jitter) - } - return jitteredPeriod -} - -// Backoff implements BackoffManager.Backoff, it returns a timer so caller can block on the timer for jittered backoff. -// The returned timer must be drained before calling Backoff() the second time -func (j *jitteredBackoffManagerImpl) Backoff() clock.Timer { - backoff := j.getNextBackoff() - if j.backoffTimer == nil { - j.backoffTimer = j.clock.NewTimer(backoff) - } else { - j.backoffTimer.Reset(backoff) - } - return j.backoffTimer -} - -// ExponentialBackoff repeats a condition check with exponential backoff. -// -// It repeatedly checks the condition and then sleeps, using `backoff.Step()` -// to determine the length of the sleep and adjust Duration and Steps. -// Stops and returns as soon as: -// 1. the condition check returns true or an error, -// 2. `backoff.Steps` checks of the condition have been done, or -// 3. a sleep truncated by the cap on duration has been completed. -// In case (1) the returned error is what the condition function returned. -// In all other cases, ErrWaitTimeout is returned. -func ExponentialBackoff(backoff Backoff, condition ConditionFunc) error { - for backoff.Steps > 0 { - if ok, err := runConditionWithCrashProtection(condition); err != nil || ok { - return err - } - if backoff.Steps == 1 { - break - } - time.Sleep(backoff.Step()) - } - return ErrWaitTimeout -} - -// Poll tries a condition func until it returns true, an error, or the timeout -// is reached. -// -// Poll always waits the interval before the run of 'condition'. -// 'condition' will always be invoked at least once. -// -// Some intervals may be missed if the condition takes too long or the time -// window is too short. -// -// If you want to Poll something forever, see PollInfinite. -func Poll(interval, timeout time.Duration, condition ConditionFunc) error { - return pollInternal(poller(interval, timeout), condition) -} - -func pollInternal(wait WaitFunc, condition ConditionFunc) error { - done := make(chan struct{}) - defer close(done) - return WaitFor(wait, condition, done) -} - -// PollImmediate tries a condition func until it returns true, an error, or the timeout -// is reached. -// -// PollImmediate always checks 'condition' before waiting for the interval. 'condition' -// will always be invoked at least once. -// -// Some intervals may be missed if the condition takes too long or the time -// window is too short. -// -// If you want to immediately Poll something forever, see PollImmediateInfinite. -func PollImmediate(interval, timeout time.Duration, condition ConditionFunc) error { - return pollImmediateInternal(poller(interval, timeout), condition) -} - -func pollImmediateInternal(wait WaitFunc, condition ConditionFunc) error { - done, err := runConditionWithCrashProtection(condition) - if err != nil { - return err - } - if done { - return nil - } - return pollInternal(wait, condition) -} - -// PollInfinite tries a condition func until it returns true or an error -// -// PollInfinite always waits the interval before the run of 'condition'. -// -// Some intervals may be missed if the condition takes too long or the time -// window is too short. -func PollInfinite(interval time.Duration, condition ConditionFunc) error { - done := make(chan struct{}) - defer close(done) - return PollUntil(interval, condition, done) -} - -// PollImmediateInfinite tries a condition func until it returns true or an error -// -// PollImmediateInfinite runs the 'condition' before waiting for the interval. -// -// Some intervals may be missed if the condition takes too long or the time -// window is too short. -func PollImmediateInfinite(interval time.Duration, condition ConditionFunc) error { - done, err := runConditionWithCrashProtection(condition) - if err != nil { - return err - } - if done { - return nil - } - return PollInfinite(interval, condition) -} - -// PollUntil tries a condition func until it returns true, an error or stopCh is -// closed. -// -// PollUntil always waits interval before the first run of 'condition'. -// 'condition' will always be invoked at least once. -func PollUntil(interval time.Duration, condition ConditionFunc, stopCh <-chan struct{}) error { - ctx, cancel := contextForChannel(stopCh) - defer cancel() - return WaitFor(poller(interval, 0), condition, ctx.Done()) -} - -// PollImmediateUntil tries a condition func until it returns true, an error or stopCh is closed. -// -// PollImmediateUntil runs the 'condition' before waiting for the interval. -// 'condition' will always be invoked at least once. -func PollImmediateUntil(interval time.Duration, condition ConditionFunc, stopCh <-chan struct{}) error { - done, err := condition() - if err != nil { - return err - } - if done { - return nil - } - select { - case <-stopCh: - return ErrWaitTimeout - default: - return PollUntil(interval, condition, stopCh) - } -} - -// WaitFunc creates a channel that receives an item every time a test -// should be executed and is closed when the last test should be invoked. -type WaitFunc func(done <-chan struct{}) <-chan struct{} - -// WaitFor continually checks 'fn' as driven by 'wait'. -// -// WaitFor gets a channel from 'wait()'', and then invokes 'fn' once for every value -// placed on the channel and once more when the channel is closed. If the channel is closed -// and 'fn' returns false without error, WaitFor returns ErrWaitTimeout. -// -// If 'fn' returns an error the loop ends and that error is returned. If -// 'fn' returns true the loop ends and nil is returned. -// -// ErrWaitTimeout will be returned if the 'done' channel is closed without fn ever -// returning true. -// -// When the done channel is closed, because the golang `select` statement is -// "uniform pseudo-random", the `fn` might still run one or multiple time, -// though eventually `WaitFor` will return. -func WaitFor(wait WaitFunc, fn ConditionFunc, done <-chan struct{}) error { - stopCh := make(chan struct{}) - defer close(stopCh) - c := wait(stopCh) - for { - select { - case _, open := <-c: - ok, err := runConditionWithCrashProtection(fn) - if err != nil { - return err - } - if ok { - return nil - } - if !open { - return ErrWaitTimeout - } - case <-done: - return ErrWaitTimeout - } - } -} - -// poller returns a WaitFunc that will send to the channel every interval until -// timeout has elapsed and then closes the channel. -// -// Over very short intervals you may receive no ticks before the channel is -// closed. A timeout of 0 is interpreted as an infinity, and in such a case -// it would be the caller's responsibility to close the done channel. -// Failure to do so would result in a leaked goroutine. -// -// Output ticks are not buffered. If the channel is not ready to receive an -// item, the tick is skipped. -func poller(interval, timeout time.Duration) WaitFunc { - return WaitFunc(func(done <-chan struct{}) <-chan struct{} { - ch := make(chan struct{}) - - go func() { - defer close(ch) - - tick := time.NewTicker(interval) - defer tick.Stop() - - var after <-chan time.Time - if timeout != 0 { - // time.After is more convenient, but it - // potentially leaves timers around much longer - // than necessary if we exit early. - timer := time.NewTimer(timeout) - after = timer.C - defer timer.Stop() - } - - for { - select { - case <-tick.C: - // If the consumer isn't ready for this signal drop it and - // check the other channels. - select { - case ch <- struct{}{}: - default: - } - case <-after: - return - case <-done: - return - } - } - }() - - return ch - }) -} diff --git a/pkg/kutil/yaml/decoder.go b/pkg/kutil/yaml/decoder.go deleted file mode 100644 index 492171f..0000000 --- a/pkg/kutil/yaml/decoder.go +++ /dev/null @@ -1,348 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package yaml - -import ( - "bufio" - "bytes" - "encoding/json" - "fmt" - "io" - "io/ioutil" - "strings" - "unicode" - - "k8s.io/klog/v2" - "sigs.k8s.io/yaml" -) - -// ToJSON converts a single YAML document into a JSON document -// or returns an error. If the document appears to be JSON the -// YAML decoding path is not used (so that error messages are -// JSON specific). -func ToJSON(data []byte) ([]byte, error) { - if hasJSONPrefix(data) { - return data, nil - } - return yaml.YAMLToJSON(data) -} - -// YAMLToJSONDecoder decodes YAML documents from an io.Reader by -// separating individual documents. It first converts the YAML -// body to JSON, then unmarshals the JSON. -type YAMLToJSONDecoder struct { - reader Reader -} - -// NewYAMLToJSONDecoder decodes YAML documents from the provided -// stream in chunks by converting each document (as defined by -// the YAML spec) into its own chunk, converting it to JSON via -// yaml.YAMLToJSON, and then passing it to json.Decoder. -func NewYAMLToJSONDecoder(r io.Reader) *YAMLToJSONDecoder { - reader := bufio.NewReader(r) - return &YAMLToJSONDecoder{ - reader: NewYAMLReader(reader), - } -} - -// Decode reads a YAML document as JSON from the stream or returns -// an error. The decoding rules match json.Unmarshal, not -// yaml.Unmarshal. -func (d *YAMLToJSONDecoder) Decode(into interface{}) error { - bytes, err := d.reader.Read() - if err != nil && err != io.EOF { - return err - } - - if len(bytes) != 0 { - err := yaml.Unmarshal(bytes, into) - if err != nil { - return YAMLSyntaxError{err} - } - } - return err -} - -// YAMLDecoder reads chunks of objects and returns ErrShortBuffer if -// the data is not sufficient. -type YAMLDecoder struct { - r io.ReadCloser - scanner *bufio.Scanner - remaining []byte -} - -// NewDocumentDecoder decodes YAML documents from the provided -// stream in chunks by converting each document (as defined by -// the YAML spec) into its own chunk. io.ErrShortBuffer will be -// returned if the entire buffer could not be read to assist -// the caller in framing the chunk. -func NewDocumentDecoder(r io.ReadCloser) io.ReadCloser { - scanner := bufio.NewScanner(r) - // the size of initial allocation for buffer 4k - buf := make([]byte, 4*1024) - // the maximum size used to buffer a token 5M - scanner.Buffer(buf, 5*1024*1024) - scanner.Split(splitYAMLDocument) - return &YAMLDecoder{ - r: r, - scanner: scanner, - } -} - -// Read reads the previous slice into the buffer, or attempts to read -// the next chunk. -// TODO: switch to readline approach. -func (d *YAMLDecoder) Read(data []byte) (n int, err error) { - left := len(d.remaining) - if left == 0 { - // return the next chunk from the stream - if !d.scanner.Scan() { - err := d.scanner.Err() - if err == nil { - err = io.EOF - } - return 0, err - } - out := d.scanner.Bytes() - d.remaining = out - left = len(out) - } - - // fits within data - if left <= len(data) { - copy(data, d.remaining) - d.remaining = nil - return left, nil - } - - // caller will need to reread - copy(data, d.remaining[:len(data)]) - d.remaining = d.remaining[len(data):] - return len(data), io.ErrShortBuffer -} - -func (d *YAMLDecoder) Close() error { - return d.r.Close() -} - -const yamlSeparator = "\n---" -const separator = "---" - -// splitYAMLDocument is a bufio.SplitFunc for splitting YAML streams into individual documents. -func splitYAMLDocument(data []byte, atEOF bool) (advance int, token []byte, err error) { - if atEOF && len(data) == 0 { - return 0, nil, nil - } - sep := len([]byte(yamlSeparator)) - if i := bytes.Index(data, []byte(yamlSeparator)); i >= 0 { - // We have a potential document terminator - i += sep - after := data[i:] - if len(after) == 0 { - // we can't read any more characters - if atEOF { - return len(data), data[:len(data)-sep], nil - } - return 0, nil, nil - } - if j := bytes.IndexByte(after, '\n'); j >= 0 { - return i + j + 1, data[0 : i-sep], nil - } - return 0, nil, nil - } - // If we're at EOF, we have a final, non-terminated line. Return it. - if atEOF { - return len(data), data, nil - } - // Request more data. - return 0, nil, nil -} - -// decoder is a convenience interface for Decode. -type decoder interface { - Decode(into interface{}) error -} - -// YAMLOrJSONDecoder attempts to decode a stream of JSON documents or -// YAML documents by sniffing for a leading { character. -type YAMLOrJSONDecoder struct { - r io.Reader - bufferSize int - - decoder decoder - rawData []byte -} - -type JSONSyntaxError struct { - Line int - Err error -} - -func (e JSONSyntaxError) Error() string { - return fmt.Sprintf("json: line %d: %s", e.Line, e.Err.Error()) -} - -type YAMLSyntaxError struct { - err error -} - -func (e YAMLSyntaxError) Error() string { - return e.err.Error() -} - -// NewYAMLOrJSONDecoder returns a decoder that will process YAML documents -// or JSON documents from the given reader as a stream. bufferSize determines -// how far into the stream the decoder will look to figure out whether this -// is a JSON stream (has whitespace followed by an open brace). -func NewYAMLOrJSONDecoder(r io.Reader, bufferSize int) *YAMLOrJSONDecoder { - return &YAMLOrJSONDecoder{ - r: r, - bufferSize: bufferSize, - } -} - -// Decode unmarshals the next object from the underlying stream into the -// provide object, or returns an error. -func (d *YAMLOrJSONDecoder) Decode(into interface{}) error { - if d.decoder == nil { - buffer, origData, isJSON := GuessJSONStream(d.r, d.bufferSize) - if isJSON { - d.decoder = json.NewDecoder(buffer) - d.rawData = origData - } else { - d.decoder = NewYAMLToJSONDecoder(buffer) - } - } - err := d.decoder.Decode(into) - if jsonDecoder, ok := d.decoder.(*json.Decoder); ok { - if syntax, ok := err.(*json.SyntaxError); ok { - data, readErr := ioutil.ReadAll(jsonDecoder.Buffered()) - if readErr != nil { - klog.V(4).Infof("reading stream failed: %v", readErr) - } - js := string(data) - - // if contents from io.Reader are not complete, - // use the original raw data to prevent panic - if int64(len(js)) <= syntax.Offset { - js = string(d.rawData) - } - - start := strings.LastIndex(js[:syntax.Offset], "\n") + 1 - line := strings.Count(js[:start], "\n") - return JSONSyntaxError{ - Line: line, - Err: fmt.Errorf(syntax.Error()), - } - } - } - return err -} - -type Reader interface { - Read() ([]byte, error) -} - -type YAMLReader struct { - reader Reader -} - -func NewYAMLReader(r *bufio.Reader) *YAMLReader { - return &YAMLReader{ - reader: &LineReader{reader: r}, - } -} - -// Read returns a full YAML document. -func (r *YAMLReader) Read() ([]byte, error) { - var buffer bytes.Buffer - for { - line, err := r.reader.Read() - if err != nil && err != io.EOF { - return nil, err - } - - sep := len([]byte(separator)) - if i := bytes.Index(line, []byte(separator)); i == 0 { - // We have a potential document terminator - i += sep - after := line[i:] - if len(strings.TrimRightFunc(string(after), unicode.IsSpace)) == 0 { - if buffer.Len() != 0 { - return buffer.Bytes(), nil - } - if err == io.EOF { - return nil, err - } - } - } - if err == io.EOF { - if buffer.Len() != 0 { - // If we're at EOF, we have a final, non-terminated line. Return it. - return buffer.Bytes(), nil - } - return nil, err - } - buffer.Write(line) - } -} - -type LineReader struct { - reader *bufio.Reader -} - -// Read returns a single line (with '\n' ended) from the underlying reader. -// An error is returned iff there is an error with the underlying reader. -func (r *LineReader) Read() ([]byte, error) { - var ( - isPrefix bool = true - err error = nil - line []byte - buffer bytes.Buffer - ) - - for isPrefix && err == nil { - line, isPrefix, err = r.reader.ReadLine() - buffer.Write(line) - } - buffer.WriteByte('\n') - return buffer.Bytes(), err -} - -// GuessJSONStream scans the provided reader up to size, looking -// for an open brace indicating this is JSON. It will return the -// bufio.Reader it creates for the consumer. -func GuessJSONStream(r io.Reader, size int) (io.Reader, []byte, bool) { - buffer := bufio.NewReaderSize(r, size) - b, _ := buffer.Peek(size) - return buffer, b, hasJSONPrefix(b) -} - -var jsonPrefix = []byte("{") - -// hasJSONPrefix returns true if the provided buffer appears to start with -// a JSON open brace. -func hasJSONPrefix(buf []byte) bool { - return hasPrefix(buf, jsonPrefix) -} - -// Return true if the first non-whitespace bytes in buf is -// prefix. -func hasPrefix(buf []byte, prefix []byte) bool { - trim := bytes.TrimLeftFunc(buf, unicode.IsSpace) - return bytes.HasPrefix(trim, prefix) -} diff --git a/pkg/logger/example/single_test.go b/pkg/logger/example/single_test.go new file mode 100644 index 0000000..87048f3 --- /dev/null +++ b/pkg/logger/example/single_test.go @@ -0,0 +1,46 @@ +package example + +import ( + "github.com/ztalab/ZACA/pkg/logger" + "github.com/ztalab/ZACA/pkg/logger/redis_hook" + "go.uber.org/zap/zapcore" + "log" +) + +var ( + EnvEnableRedisOutput bool // Simulated environment variables + EnvDebug bool +) + +func init() { + EnvEnableRedisOutput = true + EnvDebug = true + initLogger() +} + +func initLogger() { + conf := &logger.Conf{ + Level: zapcore.DebugLevel, // Output log level + Caller: true, //Whether to open record calling folder + number of lines + function name + Debug: true, // Enable debug + // All logs output to redis are above info level + AppInfo: &logger.ConfigAppData{ + AppVersion: "1.0", + Language: "zh-cn", + }, + } + if !EnvDebug || EnvEnableRedisOutput { + // In case of production environment + conf.Level = zapcore.InfoLevel + conf.HookConfig = &redis_hook.HookConfig{ + Key: "log_key", + Host: "redis.msp", + Port: 6380, + } + } + err := logger.GlobalConfig(*conf) + if err != nil { + log.Print("[ERR] Logger init error: ", err) + } + logger.Infof("info test: %v", "data") +} diff --git a/pkg/logger/log.go b/pkg/logger/log.go new file mode 100644 index 0000000..daa6ef4 --- /dev/null +++ b/pkg/logger/log.go @@ -0,0 +1,168 @@ +package logger + +// Named Tag type name +func Named(name string) *Logger { + l := Clone(std) + return &Logger{ + SugaredLogger: l.Named(name), + conf: l.conf, + } +} + +// With adds a variadic number of fields to the logging context. It accepts a +// mix of strongly-typed Field objects and loosely-typed key-value pairs. When +// processing pairs, the first element of the pair is used as the field key +// and the second as the field value. +// +// For example, +// Logger.With( +// "hello", "world", +// "failure", errors.New("oh no"), +// Stack(), +// "count", 42, +// "user", User{Name: "alice"}, +// ) +// is the equivalent of +// unsugared.With( +// String("hello", "world"), +// String("failure", "oh no"), +// Stack(), +// Int("count", 42), +// Object("user", User{Name: "alice"}), +// ) +// +// Note that the keys in key-value pairs should be strings. In development, +// passing a non-string key panics. In production, the logger is more +// forgiving: a separate error is logged, but the key-value pair is skipped +// and execution continues. Passing an orphaned key triggers similar behavior: +// panics in development and errors in production. +func With(args ...interface{}) *Logger { + l := Clone(std) + return &Logger{ + SugaredLogger: l.With(args...), + conf: l.conf, + } +} + +// Debug uses fmt.Sprint to construct and log a message. +func Debug(args ...interface{}) { + stdCallerFix.Debug(args...) +} + +// Info uses fmt.Sprint to construct and log a message. +func Info(args ...interface{}) { + stdCallerFix.Info(args...) +} + +// Warn uses fmt.Sprint to construct and log a message. +func Warn(args ...interface{}) { + stdCallerFix.Warn(args...) +} + +// Error uses fmt.Sprint to construct and log a message. +func Error(args ...interface{}) { + stdCallerFix.Error(args...) +} + +// DPanic uses fmt.Sprint to construct and log a message. In development, the +// logger then panics. (See DPanicLevel for details.) +func DPanic(args ...interface{}) { + stdCallerFix.DPanic(args...) +} + +// Panic uses fmt.Sprint to construct and log a message, then panics. +func Panic(args ...interface{}) { + stdCallerFix.Panic(args...) +} + +// Fatal uses fmt.Sprint to construct and log a message, then calls os.Exit. +func Fatal(args ...interface{}) { + stdCallerFix.Fatal(args...) +} + +// Debugf uses fmt.Sprintf to log a templated message. +func Debugf(template string, args ...interface{}) { + stdCallerFix.Debugf(template, args...) +} + +// Infof uses fmt.Sprintf to log a templated message. +func Infof(template string, args ...interface{}) { + stdCallerFix.Infof(template, args...) +} + +// Warnf uses fmt.Sprintf to log a templated message. +func Warnf(template string, args ...interface{}) { + stdCallerFix.Warnf(template, args...) +} + +// Errorf uses fmt.Sprintf to log a templated message. +func Errorf(template string, args ...interface{}) { + stdCallerFix.Errorf(template, args...) +} + +// DPanicf uses fmt.Sprintf to log a templated message. In development, the +// logger then panics. (See DPanicLevel for details.) +func DPanicf(template string, args ...interface{}) { + stdCallerFix.DPanicf(template, args...) +} + +// Panicf uses fmt.Sprintf to log a templated message, then panics. +func Panicf(template string, args ...interface{}) { + stdCallerFix.Panicf(template, args...) +} + +// Fatalf uses fmt.Sprintf to log a templated message, then calls os.Exit. +func Fatalf(template string, args ...interface{}) { + stdCallerFix.Fatalf(template, args...) +} + +// Debugw logs a message with some additional context. The variadic key-value +// pairs are treated as they are in With. +// +// When debug-level logging is disabled, this is much faster than +// s.With(keysAndValues).Debug(msg) +func Debugw(msg string, keysAndValues ...interface{}) { + stdCallerFix.Debugw(msg, keysAndValues...) +} + +// Infow logs a message with some additional context. The variadic key-value +// pairs are treated as they are in With. +func Infow(msg string, keysAndValues ...interface{}) { + stdCallerFix.Infow(msg, keysAndValues...) +} + +// Warnw logs a message with some additional context. The variadic key-value +// pairs are treated as they are in With. +func Warnw(msg string, keysAndValues ...interface{}) { + stdCallerFix.Warnw(msg, keysAndValues...) +} + +// Errorw logs a message with some additional context. The variadic key-value +// pairs are treated as they are in With. +func Errorw(msg string, keysAndValues ...interface{}) { + stdCallerFix.Errorw(msg, keysAndValues...) +} + +// DPanicw logs a message with some additional context. In development, the +// logger then panics. (See DPanicLevel for details.) The variadic key-value +// pairs are treated as they are in With. +func DPanicw(msg string, keysAndValues ...interface{}) { + stdCallerFix.DPanicw(msg, keysAndValues...) +} + +// Panicw logs a message with some additional context, then panics. The +// variadic key-value pairs are treated as they are in With. +func Panicw(msg string, keysAndValues ...interface{}) { + stdCallerFix.Panicw(msg, keysAndValues...) +} + +// Fatalw logs a message with some additional context, then calls os.Exit. The +// variadic key-value pairs are treated as they are in With. +func Fatalw(msg string, keysAndValues ...interface{}) { + stdCallerFix.Fatalw(msg, keysAndValues...) +} + +// Sync flushes any buffered log entries. +func Sync() error { + return stdCallerFix.Sync() +} diff --git a/pkg/logger/logger.go b/pkg/logger/logger.go new file mode 100644 index 0000000..226eb73 --- /dev/null +++ b/pkg/logger/logger.go @@ -0,0 +1,139 @@ +package logger + +import ( + "github.com/pkg/errors" + "github.com/ztalab/ZACA/pkg/logger/redis_hook" + "go.uber.org/zap" + "go.uber.org/zap/zapcore" +) + +var _ciCore zapcore.Core + +var ( + std *Logger + stdCallerFix *Logger + + n *zap.Logger +) + +// Logger +type Logger struct { + *zap.SugaredLogger + conf *Conf +} + +// Conf to configure +type Conf struct { + Caller bool + Debug bool + Level zapcore.Level + Encoding string // json, console + AppInfo *ConfigAppData // fixed fields + HookConfig *redis_hook.HookConfig // set to nil if disabled + ZapConfig *zap.Config // for custom +} + +type ConfigAppData struct { + AppName string + AppID string + AppVersion string + AppKey string + Channel string + SubOrgKey string + Language string +} + +// Clone ... +func Clone(l *Logger) *Logger { + c := *l.conf + return &Logger{ + SugaredLogger: l.SugaredLogger, + conf: &c, + } +} + +// S Get singleton +func S() *Logger { + return std +} + +// N Zap Logger +func N() *zap.Logger { + return n +} + +// GlobalConfig init +func GlobalConfig(conf Conf) error { + c := conf + l, err := newLogger(&c) + if err != nil { + return err + } + std = &Logger{ + SugaredLogger: l.Sugar(), + conf: &c, + } + stdCallerFix = &Logger{ + SugaredLogger: l.WithOptions(zap.AddCallerSkip(1)).Sugar(), + conf: &c, + } + n = std.Desugar() + return nil +} + +func init() { + l, _ := newLogger(&Conf{ + Level: zapcore.InfoLevel, + }) + std = &Logger{ + SugaredLogger: l.Sugar(), + conf: &Conf{}, + } + stdCallerFix = &Logger{ + SugaredLogger: l.WithOptions(zap.AddCallerSkip(1)).Sugar(), + conf: &Conf{}, + } + n = std.Desugar() +} + +// NewZapLogger Create custom Logger +func NewZapLogger(c *Conf) (l *zap.Logger, err error) { + return newLogger(c) +} + +func newLogger(c *Conf) (l *zap.Logger, err error) { + var conf zap.Config + if c.ZapConfig != nil { + conf = *c.ZapConfig + } else { + conf = zap.NewProductionConfig() + conf.EncoderConfig = zap.NewDevelopmentEncoderConfig() + if c.Debug { + conf = zap.NewDevelopmentConfig() + conf.EncoderConfig.EncodeLevel = zapcore.CapitalColorLevelEncoder + } + if c.Encoding != "" { + conf.Encoding = c.Encoding + } else { + conf.Encoding = "console" + } + } + conf.Level = zap.NewAtomicLevelAt(c.Level) + if c.HookConfig != nil { + hook, _ := redis_hook.NewHook(*c.HookConfig) + _ciCore = NewCiCore(hook) + l, err = conf.Build(zap.WrapCore(func(core zapcore.Core) zapcore.Core { + return zapcore.NewTee(core, _ciCore) + })) + if err != nil { + return nil, errors.Wrap(err, "zap core init error") + } + } else { + l, err = conf.Build() + } + if err != nil { + return nil, errors.Wrap(err, "zap core init error") + } + l = l.WithOptions(zap.WithCaller(c.Caller), zap.AddStacktrace(zapcore.ErrorLevel)) + return +} diff --git a/pkg/logger/logger_test.go b/pkg/logger/logger_test.go new file mode 100644 index 0000000..dcf5355 --- /dev/null +++ b/pkg/logger/logger_test.go @@ -0,0 +1,33 @@ +package logger + +import ( + "go.uber.org/zap" + "go.uber.org/zap/zapcore" + "testing" +) + +func TestNewLogger(t *testing.T) { + defer Sync() + GlobalConfig(Conf{ + Debug: true, + Caller: true, + AppInfo: &ConfigAppData{ + AppName: "test", + AppID: "test", + AppVersion: "1.0", + AppKey: "test", + Channel: "1", + SubOrgKey: "key", + Language: "zh", + }, + }) + S().Info("test") +} + +func TestColorLogger(t *testing.T) { + config := zap.NewDevelopmentConfig() + config.EncoderConfig.EncodeLevel = zapcore.CapitalColorLevelEncoder + logger, _ := config.Build() + + logger.Info("Now logs should be colored") +} diff --git a/pkg/logger/redis_hook/loggers.go b/pkg/logger/redis_hook/loggers.go new file mode 100644 index 0000000..d6c461b --- /dev/null +++ b/pkg/logger/redis_hook/loggers.go @@ -0,0 +1,29 @@ +package redis_hook + +import ( + "go.uber.org/zap/zapcore" + "strings" +) + +// zap need extra data for fields +func CreateZapOriginLogMessage(entry *zapcore.Entry, data map[string]interface{}) map[string]interface{} { + fields := make(map[string]interface{}, len(data)) + if data != nil { + for k, v := range data { + fields[k] = v + } + } + var level = strings.ToUpper(entry.Level.String()) + if level == "ERROR" { + level = "ERR" + } + if level == "WARN" { + level = "WARNING" + } + if level == "FATAL" { + level = "CRIT" + } + fields["level"] = level + fields["message"] = entry.Message + return fields +} diff --git a/pkg/logger/redis_hook/redis_hook.go b/pkg/logger/redis_hook/redis_hook.go new file mode 100644 index 0000000..d42e123 --- /dev/null +++ b/pkg/logger/redis_hook/redis_hook.go @@ -0,0 +1,74 @@ +package redis_hook + +import ( + "fmt" + "github.com/garyburd/redigo/redis" + "time" +) + +// HookConfig stores configuration needed to setup the hook +type HookConfig struct { + Key string + Host string + Password string + Port int + TTL int +} + +// RedisHook to sends logs to Redis server +type RedisHook struct { + RedisPool *redis.Pool + RedisHost string + RedisKey string + LogstashFormat string + AppName string + Hostname string + RedisPort int + TTL int +} + +// NewHook creates a hook to be added to an instance of logger +func NewHook(config HookConfig) (redisHook *RedisHook, err error) { + pool := newRedisConnectionPool(config.Host, config.Password, config.Port, 0) + + // test if connection with REDIS can be established + conn := pool.Get() + defer conn.Close() + + // check connection + _, err = conn.Do("PING") + if err != nil { + err = fmt.Errorf("unable to connect to REDIS: %s", err) + } + redisHook = &RedisHook{ + RedisHost: config.Host, + RedisPool: pool, + RedisKey: config.Key, + LogstashFormat: "origin", + TTL: config.TTL, + } + return +} + +func newRedisConnectionPool(server, password string, port int, db int) *redis.Pool { + hostPort := fmt.Sprintf("%s:%d", server, port) + return &redis.Pool{ + MaxIdle: 3, + IdleTimeout: 240 * time.Second, + Dial: func() (redis.Conn, error) { + c, err := redis.Dial("tcp", hostPort, redis.DialDatabase(db), + redis.DialPassword(password), + redis.DialConnectTimeout(time.Second), + redis.DialReadTimeout(time.Millisecond*100), + redis.DialWriteTimeout(time.Millisecond*100)) + if err != nil { + return nil, err + } + return c, err + }, + TestOnBorrow: func(c redis.Conn, t time.Time) error { + _, err := c.Do("PING") + return err + }, + } +} diff --git a/pkg/logger/zapcore_impl.go b/pkg/logger/zapcore_impl.go new file mode 100644 index 0000000..6b4b16a --- /dev/null +++ b/pkg/logger/zapcore_impl.go @@ -0,0 +1,166 @@ +package logger + +import ( + "fmt" + jsoniter "github.com/json-iterator/go" + "github.com/ztalab/ZACA/pkg/logger/redis_hook" + "go.uber.org/zap" + "go.uber.org/zap/zapcore" + "os" + "strings" +) + +type ciCore struct { + zapcore.LevelEnabler + + RedisHook *redis_hook.RedisHook + + fields map[string]interface{} +} + +var _ zapcore.Core = (*ciCore)(nil) + +// NewCiCore Create custom CiCore +func NewCiCore(hook *redis_hook.RedisHook) zapcore.Core { + return newCiCore(hook) +} + +func newCiCore(hook *redis_hook.RedisHook) *ciCore { + core := &ciCore{ + LevelEnabler: zapcore.InfoLevel, + RedisHook: hook, + fields: make(map[string]interface{}), + } + return core +} + +func (c *ciCore) getAllFields() map[string]interface{} { + return c.fields +} + +func (c *ciCore) Enabled(lvl zapcore.Level) bool { + if lvl < zapcore.InfoLevel { + return false + } + return true +} + +func (c *ciCore) combineFields(fields []zapcore.Field) map[string]interface{} { + // Copy our map. + m := make(map[string]interface{}, len(c.fields)+len(fields)) + for k, v := range c.fields { + m[k] = v + } + + // Add fields to an in-memory encoder. + enc := zapcore.NewMapObjectEncoder() + for _, f := range fields { + f.AddTo(enc) + } + + // Merge the two maps. + for k, v := range enc.Fields { + m[k] = v + } + + return m +} + +func (c *ciCore) With(fields []zapcore.Field) zapcore.Core { + m := c.combineFields(fields) + return &ciCore{ + LevelEnabler: c.LevelEnabler, + RedisHook: c.RedisHook, + fields: m, + } +} + +func (c *ciCore) Check(e zapcore.Entry, ce *zapcore.CheckedEntry) *zapcore.CheckedEntry { + if c.Enabled(e.Level) { + return ce.AddCore(e, c) + } + return ce +} + +func (c *ciCore) Write(e zapcore.Entry, fields []zapcore.Field) error { + var callerStack strings.Builder + if e.Caller.Defined { + callerStack.WriteString(e.Caller.TrimmedPath()) + callerStack.WriteString(" ") + callerStack.WriteString(e.Caller.Function) + } + if e.Stack != "" { + callerStack.WriteString("\n") + callerStack.WriteString(e.Stack) + } + fields = append(fields, + zap.String("stack", callerStack.String()), + ) + + loggerName := "default" + if e.LoggerName != "" { + loggerName = e.LoggerName + } + + var message strings.Builder + message.WriteString(loggerName) + message.WriteString(": ") + message.WriteString(e.Message) + + combinedFields := c.combineFields(fields) + + additionFields := make(map[string]interface{}, len(combinedFields)) + for field, value := range combinedFields { + additionFields[field] = value + } + + // Optimize extra output form [extra1]: data, [extra2]: data2 + if len(additionFields) > 0 { + for k, v := range additionFields { + var output string + if str, ok := v.(string); ok { + output = str + } else { + output, _ = jsoniter.MarshalToString(v) + } + message.WriteString(" [") + message.WriteString(k) + message.WriteString("]: ") + message.WriteString(output) + } + } + + (&e).Message = message.String() + + msg := redis_hook.CreateZapOriginLogMessage(&e, combinedFields) + + // Marshal into json message + js, err := jsoniter.Marshal(msg) + if err != nil { + return fmt.Errorf("error creating message for REDIS: %s", err) + } + + // get connection from pool + conn := c.RedisHook.RedisPool.Get() + defer conn.Close() + + // send message + _, err = conn.Do("RPUSH", c.RedisHook.RedisKey, js) + if err != nil { + fmt.Fprintln(os.Stdout, "stash log: ", string(js)) + return fmt.Errorf("error sending message to REDIS: %s", err) + } + + if c.RedisHook.TTL != 0 { + _, err = conn.Do("EXPIRE", c.RedisHook.RedisKey, c.RedisHook.TTL) + if err != nil { + return fmt.Errorf("error setting TTL to key: %s, %s", c.RedisHook.RedisKey, err) + } + } + + return nil +} + +func (c *ciCore) Sync() error { + return nil +} diff --git a/pkg/memorycacher/README.md b/pkg/memorycacher/README.md new file mode 100644 index 0000000..7411ca1 --- /dev/null +++ b/pkg/memorycacher/README.md @@ -0,0 +1,83 @@ + + +# Base on go-cache + +go-cache is an in-memory key:value store/cache similar to memcached that is +suitable for applications running on a single machine. Its major advantage is +that, being essentially a thread-safe `map[string]interface{}` with expiration +times, it doesn't need to serialize or transmit its contents over the network. + +Any object can be stored, for a given duration or forever, and the cache can be +safely used by multiple goroutines. + +Although go-cache isn't meant to be used as a persistent datastore, the entire +cache can be saved to and loaded from a file (using `c.Items()` to retrieve the +items map to serialize, and `NewFrom()` to create a cache from a deserialized +one) to recover from downtime quickly. (See the docs for `NewFrom()` for caveats.) + +### Usage + +```go +import ( + "fmt" + "memorycache" + "time" +) + +func main() { + // Create a cache with a default expiration time of 5 minutes, and which + // purges expired items every 10 minutes + c := memorycache.New(5*time.Minute, 10*time.Minute) + + // Set the value of the key "foo" to "bar", with the default expiration time + c.Set("foo", "bar", cache.DefaultExpiration) + + // Set the value of the key "baz" to 42, with no expiration time + // (the item won't be removed until it is re-set, or removed using + // c.Delete("baz") + c.Set("baz", 42, cache.NoExpiration) + + // Get the string associated with the key "foo" from the cache + foo, found := c.Get("foo") + if found { + fmt.Println(foo) + } + + // Since Go is statically typed, and cache values can be anything, type + // assertion is needed when values are being passed to functions that don't + // take arbitrary types, (i.e. interface{}). The simplest way to do this for + // values which will only be used once--e.g. for passing to another + // function--is: + foo, found := c.Get("foo") + if found { + MyFunction(foo.(string)) + } + + // This gets tedious if the value is used several times in the same function. + // You might do either of the following instead: + if x, found := c.Get("foo"); found { + foo := x.(string) + // ... + } + // or + var foo string + if x, found := c.Get("foo"); found { + foo = x.(string) + } + // ... + // foo can then be passed around freely as a string + + // Want performance? Store pointers! + c.Set("foo", &MyStruct, cache.DefaultExpiration) + if x, found := c.Get("foo"); found { + foo := x.(*MyStruct) + // ... + } +} +``` \ No newline at end of file diff --git a/pkg/memorycacher/cache.go b/pkg/memorycacher/cache.go new file mode 100644 index 0000000..6dcca72 --- /dev/null +++ b/pkg/memorycacher/cache.go @@ -0,0 +1,1221 @@ +/* + * @Author: patrickmn,gitsrc + * @Date: 2020-07-09 13:17:30 + * @LastEditors: gitsrc + * @LastEditTime: 2020-07-09 13:22:16 + * @FilePath: /ServiceCar/utils/memorycache/cache.go + */ + +package memorycacher + +import ( + "encoding/gob" + "errors" + "fmt" + "io" + "os" + "runtime" + "sync" + "time" +) + +type Item struct { + Object interface{} + Expiration int64 +} + +// const ( +// cleanStatusPending = 0 +// cleanStatusRunning = 1 +// ) + +var ( + maxItemsCountErr = errors.New("reach max items count.") +) + +// Returns true if the item has expired. +func (item Item) Expired() bool { + if item.Expiration == 0 { + return false + } + return time.Now().UnixNano() > item.Expiration +} + +const ( + // For use with functions that take an expiration time. + NoExpiration time.Duration = -1 + // For use with functions that take an expiration time. Equivalent to + // passing in the same expiration duration as was given to New() or + // NewFrom() when the cache was created (e.g. 5 minutes.) + DefaultExpiration time.Duration = 0 +) + +type Cache struct { + *cache + // If this is confusing, see the comment at the bottom of New() +} + +type cache struct { + defaultExpiration time.Duration + maxItemsCount int //Maximum number of items + items map[string]Item + mu sync.RWMutex + onEvicted func(string, interface{}) + lastCleanTime time.Time //Last cleaning time + janitor *janitor +} + +//Determine whether the amount of schema inside the map schema structure has reached the maximum number limit +func (c *cache) IsReachMaxItemsCount() bool { + return c.ItemCount() >= c.maxItemsCount +} + +// Add an item to the cache, replacing any existing item. If the duration is 0 +// (DefaultExpiration), the cache's default expiration time is used. If it is -1 +// (NoExpiration), the item never expires. +func (c *cache) Set(k string, x interface{}, d time.Duration) { + if c.IsReachMaxItemsCount() { + c.ShoudClean() + return + } + + // "Inlining" of set + var e int64 + if d == DefaultExpiration { + d = c.defaultExpiration + } + if d > 0 { + e = time.Now().Add(d).UnixNano() + } + c.mu.Lock() + c.items[k] = Item{ + Object: x, + Expiration: e, + } + // TODO: Calls to mu.Unlock are currently not deferred because defer + // adds ~200 ns (as of go1.) + c.mu.Unlock() +} + +func (c *cache) set(k string, x interface{}, d time.Duration) { + var e int64 + if d == DefaultExpiration { + d = c.defaultExpiration + } + if d > 0 { + e = time.Now().Add(d).UnixNano() + } + c.items[k] = Item{ + Object: x, + Expiration: e, + } +} + +// Add an item to the cache, replacing any existing item, using the default +// expiration. +func (c *cache) SetDefault(k string, x interface{}) { + c.Set(k, x, DefaultExpiration) +} + +// Add an item to the cache only if an item doesn't already exist for the given +// key, or if the existing item has expired. Returns an error otherwise. +func (c *cache) Add(k string, x interface{}, d time.Duration) error { + if c.IsReachMaxItemsCount() { + c.ShoudClean() + return maxItemsCountErr + } + + c.mu.Lock() + _, found := c.get(k) + if found { + c.mu.Unlock() + return fmt.Errorf("Item %s already exists", k) + } + c.set(k, x, d) + c.mu.Unlock() + return nil +} + +// Set a new value for the cache key only if it already exists, and the existing +// item hasn't expired. Returns an error otherwise. +func (c *cache) Replace(k string, x interface{}, d time.Duration) error { + c.mu.Lock() + _, found := c.get(k) + if !found { + c.mu.Unlock() + return fmt.Errorf("Item %s doesn't exist", k) + } + c.set(k, x, d) + c.mu.Unlock() + return nil +} + +// Get an item from the cache. Returns the item or nil, and a bool indicating +// whether the key was found. +func (c *cache) Get(k string) (interface{}, bool) { + c.mu.RLock() + // "Inlining" of get and Expired + item, found := c.items[k] + if !found { + c.mu.RUnlock() + return nil, false + } + if item.Expiration > 0 { + if time.Now().UnixNano() > item.Expiration { + c.mu.RUnlock() + return nil, false + } + } + c.mu.RUnlock() + return item.Object, true +} + +// GetWithExpiration returns an item and its expiration time from the cache. +// It returns the item or nil, the expiration time if one is set (if the item +// never expires a zero value for time.Time is returned), and a bool indicating +// whether the key was found. +func (c *cache) GetWithExpiration(k string) (interface{}, time.Time, bool) { + c.mu.RLock() + // "Inlining" of get and Expired + item, found := c.items[k] + if !found { + c.mu.RUnlock() + return nil, time.Time{}, false + } + + if item.Expiration > 0 { + if time.Now().UnixNano() > item.Expiration { + c.mu.RUnlock() + return nil, time.Time{}, false + } + + // Return the item and the expiration time + c.mu.RUnlock() + return item.Object, time.Unix(0, item.Expiration), true + } + + // If expiration <= 0 (i.e. no expiration time set) then return the item + // and a zeroed time.Time + c.mu.RUnlock() + return item.Object, time.Time{}, true +} + +func (c *cache) get(k string) (interface{}, bool) { + item, found := c.items[k] + if !found { + return nil, false + } + // "Inlining" of Expired + if item.Expiration > 0 { + if time.Now().UnixNano() > item.Expiration { + return nil, false + } + } + return item.Object, true +} + +//send map clean to cache janitor +func (c *cache) ShoudClean() { + if c.janitor.shoudClean == nil { + return + } + select { + case c.janitor.shoudClean <- true: + default: + } +} + +// Increment an item of type int, int8, int16, int32, int64, uintptr, uint, +// uint8, uint32, or uint64, float32 or float64 by n. Returns an error if the +// item's value is not an integer, if it was not found, or if it is not +// possible to increment it by n. To retrieve the incremented value, use one +// of the specialized methods, e.g. IncrementInt64. +func (c *cache) Increment(k string, n int64) error { + c.mu.Lock() + v, found := c.items[k] + if !found || v.Expired() { + c.mu.Unlock() + return fmt.Errorf("Item %s not found", k) + } + switch v.Object.(type) { + case int: + v.Object = v.Object.(int) + int(n) + case int8: + v.Object = v.Object.(int8) + int8(n) + case int16: + v.Object = v.Object.(int16) + int16(n) + case int32: + v.Object = v.Object.(int32) + int32(n) + case int64: + v.Object = v.Object.(int64) + n + case uint: + v.Object = v.Object.(uint) + uint(n) + case uintptr: + v.Object = v.Object.(uintptr) + uintptr(n) + case uint8: + v.Object = v.Object.(uint8) + uint8(n) + case uint16: + v.Object = v.Object.(uint16) + uint16(n) + case uint32: + v.Object = v.Object.(uint32) + uint32(n) + case uint64: + v.Object = v.Object.(uint64) + uint64(n) + case float32: + v.Object = v.Object.(float32) + float32(n) + case float64: + v.Object = v.Object.(float64) + float64(n) + default: + c.mu.Unlock() + return fmt.Errorf("The value for %s is not an integer", k) + } + c.items[k] = v + c.mu.Unlock() + return nil +} + +// Increment an item of type float32 or float64 by n. Returns an error if the +// item's value is not floating point, if it was not found, or if it is not +// possible to increment it by n. Pass a negative number to decrement the +// value. To retrieve the incremented value, use one of the specialized methods, +// e.g. IncrementFloat64. +func (c *cache) IncrementFloat(k string, n float64) error { + c.mu.Lock() + v, found := c.items[k] + if !found || v.Expired() { + c.mu.Unlock() + return fmt.Errorf("Item %s not found", k) + } + switch v.Object.(type) { + case float32: + v.Object = v.Object.(float32) + float32(n) + case float64: + v.Object = v.Object.(float64) + n + default: + c.mu.Unlock() + return fmt.Errorf("The value for %s does not have type float32 or float64", k) + } + c.items[k] = v + c.mu.Unlock() + return nil +} + +// Increment an item of type int by n. Returns an error if the item's value is +// not an int, or if it was not found. If there is no error, the incremented +// value is returned. +func (c *cache) IncrementInt(k string, n int) (int, error) { + c.mu.Lock() + v, found := c.items[k] + if !found || v.Expired() { + c.mu.Unlock() + return 0, fmt.Errorf("Item %s not found", k) + } + rv, ok := v.Object.(int) + if !ok { + c.mu.Unlock() + return 0, fmt.Errorf("The value for %s is not an int", k) + } + nv := rv + n + v.Object = nv + c.items[k] = v + c.mu.Unlock() + return nv, nil +} + +// Increment an item of type int8 by n. Returns an error if the item's value is +// not an int8, or if it was not found. If there is no error, the incremented +// value is returned. +func (c *cache) IncrementInt8(k string, n int8) (int8, error) { + c.mu.Lock() + v, found := c.items[k] + if !found || v.Expired() { + c.mu.Unlock() + return 0, fmt.Errorf("Item %s not found", k) + } + rv, ok := v.Object.(int8) + if !ok { + c.mu.Unlock() + return 0, fmt.Errorf("The value for %s is not an int8", k) + } + nv := rv + n + v.Object = nv + c.items[k] = v + c.mu.Unlock() + return nv, nil +} + +// Increment an item of type int16 by n. Returns an error if the item's value is +// not an int16, or if it was not found. If there is no error, the incremented +// value is returned. +func (c *cache) IncrementInt16(k string, n int16) (int16, error) { + c.mu.Lock() + v, found := c.items[k] + if !found || v.Expired() { + c.mu.Unlock() + return 0, fmt.Errorf("Item %s not found", k) + } + rv, ok := v.Object.(int16) + if !ok { + c.mu.Unlock() + return 0, fmt.Errorf("The value for %s is not an int16", k) + } + nv := rv + n + v.Object = nv + c.items[k] = v + c.mu.Unlock() + return nv, nil +} + +// Increment an item of type int32 by n. Returns an error if the item's value is +// not an int32, or if it was not found. If there is no error, the incremented +// value is returned. +func (c *cache) IncrementInt32(k string, n int32) (int32, error) { + c.mu.Lock() + v, found := c.items[k] + if !found || v.Expired() { + c.mu.Unlock() + return 0, fmt.Errorf("Item %s not found", k) + } + rv, ok := v.Object.(int32) + if !ok { + c.mu.Unlock() + return 0, fmt.Errorf("The value for %s is not an int32", k) + } + nv := rv + n + v.Object = nv + c.items[k] = v + c.mu.Unlock() + return nv, nil +} + +// Increment an item of type int64 by n. Returns an error if the item's value is +// not an int64, or if it was not found. If there is no error, the incremented +// value is returned. +func (c *cache) IncrementInt64(k string, n int64) (int64, error) { + c.mu.Lock() + v, found := c.items[k] + if !found || v.Expired() { + c.mu.Unlock() + return 0, fmt.Errorf("Item %s not found", k) + } + rv, ok := v.Object.(int64) + if !ok { + c.mu.Unlock() + return 0, fmt.Errorf("The value for %s is not an int64", k) + } + nv := rv + n + v.Object = nv + c.items[k] = v + c.mu.Unlock() + return nv, nil +} + +// Increment an item of type uint by n. Returns an error if the item's value is +// not an uint, or if it was not found. If there is no error, the incremented +// value is returned. +func (c *cache) IncrementUint(k string, n uint) (uint, error) { + c.mu.Lock() + v, found := c.items[k] + if !found || v.Expired() { + c.mu.Unlock() + return 0, fmt.Errorf("Item %s not found", k) + } + rv, ok := v.Object.(uint) + if !ok { + c.mu.Unlock() + return 0, fmt.Errorf("The value for %s is not an uint", k) + } + nv := rv + n + v.Object = nv + c.items[k] = v + c.mu.Unlock() + return nv, nil +} + +// Increment an item of type uintptr by n. Returns an error if the item's value +// is not an uintptr, or if it was not found. If there is no error, the +// incremented value is returned. +func (c *cache) IncrementUintptr(k string, n uintptr) (uintptr, error) { + c.mu.Lock() + v, found := c.items[k] + if !found || v.Expired() { + c.mu.Unlock() + return 0, fmt.Errorf("Item %s not found", k) + } + rv, ok := v.Object.(uintptr) + if !ok { + c.mu.Unlock() + return 0, fmt.Errorf("The value for %s is not an uintptr", k) + } + nv := rv + n + v.Object = nv + c.items[k] = v + c.mu.Unlock() + return nv, nil +} + +// Increment an item of type uint8 by n. Returns an error if the item's value +// is not an uint8, or if it was not found. If there is no error, the +// incremented value is returned. +func (c *cache) IncrementUint8(k string, n uint8) (uint8, error) { + c.mu.Lock() + v, found := c.items[k] + if !found || v.Expired() { + c.mu.Unlock() + return 0, fmt.Errorf("Item %s not found", k) + } + rv, ok := v.Object.(uint8) + if !ok { + c.mu.Unlock() + return 0, fmt.Errorf("The value for %s is not an uint8", k) + } + nv := rv + n + v.Object = nv + c.items[k] = v + c.mu.Unlock() + return nv, nil +} + +// Increment an item of type uint16 by n. Returns an error if the item's value +// is not an uint16, or if it was not found. If there is no error, the +// incremented value is returned. +func (c *cache) IncrementUint16(k string, n uint16) (uint16, error) { + c.mu.Lock() + v, found := c.items[k] + if !found || v.Expired() { + c.mu.Unlock() + return 0, fmt.Errorf("Item %s not found", k) + } + rv, ok := v.Object.(uint16) + if !ok { + c.mu.Unlock() + return 0, fmt.Errorf("The value for %s is not an uint16", k) + } + nv := rv + n + v.Object = nv + c.items[k] = v + c.mu.Unlock() + return nv, nil +} + +// Increment an item of type uint32 by n. Returns an error if the item's value +// is not an uint32, or if it was not found. If there is no error, the +// incremented value is returned. +func (c *cache) IncrementUint32(k string, n uint32) (uint32, error) { + c.mu.Lock() + v, found := c.items[k] + if !found || v.Expired() { + c.mu.Unlock() + return 0, fmt.Errorf("Item %s not found", k) + } + rv, ok := v.Object.(uint32) + if !ok { + c.mu.Unlock() + return 0, fmt.Errorf("The value for %s is not an uint32", k) + } + nv := rv + n + v.Object = nv + c.items[k] = v + c.mu.Unlock() + return nv, nil +} + +// Increment an item of type uint64 by n. Returns an error if the item's value +// is not an uint64, or if it was not found. If there is no error, the +// incremented value is returned. +func (c *cache) IncrementUint64(k string, n uint64) (uint64, error) { + c.mu.Lock() + v, found := c.items[k] + if !found || v.Expired() { + c.mu.Unlock() + return 0, fmt.Errorf("Item %s not found", k) + } + rv, ok := v.Object.(uint64) + if !ok { + c.mu.Unlock() + return 0, fmt.Errorf("The value for %s is not an uint64", k) + } + nv := rv + n + v.Object = nv + c.items[k] = v + c.mu.Unlock() + return nv, nil +} + +// Increment an item of type float32 by n. Returns an error if the item's value +// is not an float32, or if it was not found. If there is no error, the +// incremented value is returned. +func (c *cache) IncrementFloat32(k string, n float32) (float32, error) { + c.mu.Lock() + v, found := c.items[k] + if !found || v.Expired() { + c.mu.Unlock() + return 0, fmt.Errorf("Item %s not found", k) + } + rv, ok := v.Object.(float32) + if !ok { + c.mu.Unlock() + return 0, fmt.Errorf("The value for %s is not an float32", k) + } + nv := rv + n + v.Object = nv + c.items[k] = v + c.mu.Unlock() + return nv, nil +} + +// Increment an item of type float64 by n. Returns an error if the item's value +// is not an float64, or if it was not found. If there is no error, the +// incremented value is returned. +func (c *cache) IncrementFloat64(k string, n float64) (float64, error) { + c.mu.Lock() + v, found := c.items[k] + if !found || v.Expired() { + c.mu.Unlock() + return 0, fmt.Errorf("Item %s not found", k) + } + rv, ok := v.Object.(float64) + if !ok { + c.mu.Unlock() + return 0, fmt.Errorf("The value for %s is not an float64", k) + } + nv := rv + n + v.Object = nv + c.items[k] = v + c.mu.Unlock() + return nv, nil +} + +// Decrement an item of type int, int8, int16, int32, int64, uintptr, uint, +// uint8, uint32, or uint64, float32 or float64 by n. Returns an error if the +// item's value is not an integer, if it was not found, or if it is not +// possible to decrement it by n. To retrieve the decremented value, use one +// of the specialized methods, e.g. DecrementInt64. +func (c *cache) Decrement(k string, n int64) error { + // TODO: Implement Increment and Decrement more cleanly. + // (Cannot do Increment(k, n*-1) for uints.) + c.mu.Lock() + v, found := c.items[k] + if !found || v.Expired() { + c.mu.Unlock() + return fmt.Errorf("Item not found") + } + switch v.Object.(type) { + case int: + v.Object = v.Object.(int) - int(n) + case int8: + v.Object = v.Object.(int8) - int8(n) + case int16: + v.Object = v.Object.(int16) - int16(n) + case int32: + v.Object = v.Object.(int32) - int32(n) + case int64: + v.Object = v.Object.(int64) - n + case uint: + v.Object = v.Object.(uint) - uint(n) + case uintptr: + v.Object = v.Object.(uintptr) - uintptr(n) + case uint8: + v.Object = v.Object.(uint8) - uint8(n) + case uint16: + v.Object = v.Object.(uint16) - uint16(n) + case uint32: + v.Object = v.Object.(uint32) - uint32(n) + case uint64: + v.Object = v.Object.(uint64) - uint64(n) + case float32: + v.Object = v.Object.(float32) - float32(n) + case float64: + v.Object = v.Object.(float64) - float64(n) + default: + c.mu.Unlock() + return fmt.Errorf("The value for %s is not an integer", k) + } + c.items[k] = v + c.mu.Unlock() + return nil +} + +// Decrement an item of type float32 or float64 by n. Returns an error if the +// item's value is not floating point, if it was not found, or if it is not +// possible to decrement it by n. Pass a negative number to decrement the +// value. To retrieve the decremented value, use one of the specialized methods, +// e.g. DecrementFloat64. +func (c *cache) DecrementFloat(k string, n float64) error { + c.mu.Lock() + v, found := c.items[k] + if !found || v.Expired() { + c.mu.Unlock() + return fmt.Errorf("Item %s not found", k) + } + switch v.Object.(type) { + case float32: + v.Object = v.Object.(float32) - float32(n) + case float64: + v.Object = v.Object.(float64) - n + default: + c.mu.Unlock() + return fmt.Errorf("The value for %s does not have type float32 or float64", k) + } + c.items[k] = v + c.mu.Unlock() + return nil +} + +// Decrement an item of type int by n. Returns an error if the item's value is +// not an int, or if it was not found. If there is no error, the decremented +// value is returned. +func (c *cache) DecrementInt(k string, n int) (int, error) { + c.mu.Lock() + v, found := c.items[k] + if !found || v.Expired() { + c.mu.Unlock() + return 0, fmt.Errorf("Item %s not found", k) + } + rv, ok := v.Object.(int) + if !ok { + c.mu.Unlock() + return 0, fmt.Errorf("The value for %s is not an int", k) + } + nv := rv - n + v.Object = nv + c.items[k] = v + c.mu.Unlock() + return nv, nil +} + +// Decrement an item of type int8 by n. Returns an error if the item's value is +// not an int8, or if it was not found. If there is no error, the decremented +// value is returned. +func (c *cache) DecrementInt8(k string, n int8) (int8, error) { + c.mu.Lock() + v, found := c.items[k] + if !found || v.Expired() { + c.mu.Unlock() + return 0, fmt.Errorf("Item %s not found", k) + } + rv, ok := v.Object.(int8) + if !ok { + c.mu.Unlock() + return 0, fmt.Errorf("The value for %s is not an int8", k) + } + nv := rv - n + v.Object = nv + c.items[k] = v + c.mu.Unlock() + return nv, nil +} + +// Decrement an item of type int16 by n. Returns an error if the item's value is +// not an int16, or if it was not found. If there is no error, the decremented +// value is returned. +func (c *cache) DecrementInt16(k string, n int16) (int16, error) { + c.mu.Lock() + v, found := c.items[k] + if !found || v.Expired() { + c.mu.Unlock() + return 0, fmt.Errorf("Item %s not found", k) + } + rv, ok := v.Object.(int16) + if !ok { + c.mu.Unlock() + return 0, fmt.Errorf("The value for %s is not an int16", k) + } + nv := rv - n + v.Object = nv + c.items[k] = v + c.mu.Unlock() + return nv, nil +} + +// Decrement an item of type int32 by n. Returns an error if the item's value is +// not an int32, or if it was not found. If there is no error, the decremented +// value is returned. +func (c *cache) DecrementInt32(k string, n int32) (int32, error) { + c.mu.Lock() + v, found := c.items[k] + if !found || v.Expired() { + c.mu.Unlock() + return 0, fmt.Errorf("Item %s not found", k) + } + rv, ok := v.Object.(int32) + if !ok { + c.mu.Unlock() + return 0, fmt.Errorf("The value for %s is not an int32", k) + } + nv := rv - n + v.Object = nv + c.items[k] = v + c.mu.Unlock() + return nv, nil +} + +// Decrement an item of type int64 by n. Returns an error if the item's value is +// not an int64, or if it was not found. If there is no error, the decremented +// value is returned. +func (c *cache) DecrementInt64(k string, n int64) (int64, error) { + c.mu.Lock() + v, found := c.items[k] + if !found || v.Expired() { + c.mu.Unlock() + return 0, fmt.Errorf("Item %s not found", k) + } + rv, ok := v.Object.(int64) + if !ok { + c.mu.Unlock() + return 0, fmt.Errorf("The value for %s is not an int64", k) + } + nv := rv - n + v.Object = nv + c.items[k] = v + c.mu.Unlock() + return nv, nil +} + +// Decrement an item of type uint by n. Returns an error if the item's value is +// not an uint, or if it was not found. If there is no error, the decremented +// value is returned. +func (c *cache) DecrementUint(k string, n uint) (uint, error) { + c.mu.Lock() + v, found := c.items[k] + if !found || v.Expired() { + c.mu.Unlock() + return 0, fmt.Errorf("Item %s not found", k) + } + rv, ok := v.Object.(uint) + if !ok { + c.mu.Unlock() + return 0, fmt.Errorf("The value for %s is not an uint", k) + } + nv := rv - n + v.Object = nv + c.items[k] = v + c.mu.Unlock() + return nv, nil +} + +// Decrement an item of type uintptr by n. Returns an error if the item's value +// is not an uintptr, or if it was not found. If there is no error, the +// decremented value is returned. +func (c *cache) DecrementUintptr(k string, n uintptr) (uintptr, error) { + c.mu.Lock() + v, found := c.items[k] + if !found || v.Expired() { + c.mu.Unlock() + return 0, fmt.Errorf("Item %s not found", k) + } + rv, ok := v.Object.(uintptr) + if !ok { + c.mu.Unlock() + return 0, fmt.Errorf("The value for %s is not an uintptr", k) + } + nv := rv - n + v.Object = nv + c.items[k] = v + c.mu.Unlock() + return nv, nil +} + +// Decrement an item of type uint8 by n. Returns an error if the item's value is +// not an uint8, or if it was not found. If there is no error, the decremented +// value is returned. +func (c *cache) DecrementUint8(k string, n uint8) (uint8, error) { + c.mu.Lock() + v, found := c.items[k] + if !found || v.Expired() { + c.mu.Unlock() + return 0, fmt.Errorf("Item %s not found", k) + } + rv, ok := v.Object.(uint8) + if !ok { + c.mu.Unlock() + return 0, fmt.Errorf("The value for %s is not an uint8", k) + } + nv := rv - n + v.Object = nv + c.items[k] = v + c.mu.Unlock() + return nv, nil +} + +// Decrement an item of type uint16 by n. Returns an error if the item's value +// is not an uint16, or if it was not found. If there is no error, the +// decremented value is returned. +func (c *cache) DecrementUint16(k string, n uint16) (uint16, error) { + c.mu.Lock() + v, found := c.items[k] + if !found || v.Expired() { + c.mu.Unlock() + return 0, fmt.Errorf("Item %s not found", k) + } + rv, ok := v.Object.(uint16) + if !ok { + c.mu.Unlock() + return 0, fmt.Errorf("The value for %s is not an uint16", k) + } + nv := rv - n + v.Object = nv + c.items[k] = v + c.mu.Unlock() + return nv, nil +} + +// Decrement an item of type uint32 by n. Returns an error if the item's value +// is not an uint32, or if it was not found. If there is no error, the +// decremented value is returned. +func (c *cache) DecrementUint32(k string, n uint32) (uint32, error) { + c.mu.Lock() + v, found := c.items[k] + if !found || v.Expired() { + c.mu.Unlock() + return 0, fmt.Errorf("Item %s not found", k) + } + rv, ok := v.Object.(uint32) + if !ok { + c.mu.Unlock() + return 0, fmt.Errorf("The value for %s is not an uint32", k) + } + nv := rv - n + v.Object = nv + c.items[k] = v + c.mu.Unlock() + return nv, nil +} + +// Decrement an item of type uint64 by n. Returns an error if the item's value +// is not an uint64, or if it was not found. If there is no error, the +// decremented value is returned. +func (c *cache) DecrementUint64(k string, n uint64) (uint64, error) { + c.mu.Lock() + v, found := c.items[k] + if !found || v.Expired() { + c.mu.Unlock() + return 0, fmt.Errorf("Item %s not found", k) + } + rv, ok := v.Object.(uint64) + if !ok { + c.mu.Unlock() + return 0, fmt.Errorf("The value for %s is not an uint64", k) + } + nv := rv - n + v.Object = nv + c.items[k] = v + c.mu.Unlock() + return nv, nil +} + +// Decrement an item of type float32 by n. Returns an error if the item's value +// is not an float32, or if it was not found. If there is no error, the +// decremented value is returned. +func (c *cache) DecrementFloat32(k string, n float32) (float32, error) { + c.mu.Lock() + v, found := c.items[k] + if !found || v.Expired() { + c.mu.Unlock() + return 0, fmt.Errorf("Item %s not found", k) + } + rv, ok := v.Object.(float32) + if !ok { + c.mu.Unlock() + return 0, fmt.Errorf("The value for %s is not an float32", k) + } + nv := rv - n + v.Object = nv + c.items[k] = v + c.mu.Unlock() + return nv, nil +} + +// Decrement an item of type float64 by n. Returns an error if the item's value +// is not an float64, or if it was not found. If there is no error, the +// decremented value is returned. +func (c *cache) DecrementFloat64(k string, n float64) (float64, error) { + c.mu.Lock() + v, found := c.items[k] + if !found || v.Expired() { + c.mu.Unlock() + return 0, fmt.Errorf("Item %s not found", k) + } + rv, ok := v.Object.(float64) + if !ok { + c.mu.Unlock() + return 0, fmt.Errorf("The value for %s is not an float64", k) + } + nv := rv - n + v.Object = nv + c.items[k] = v + c.mu.Unlock() + return nv, nil +} + +// Delete an item from the cache. Does nothing if the key is not in the cache. +func (c *cache) Delete(k string) { + c.mu.Lock() + v, evicted := c.delete(k) + c.mu.Unlock() + if evicted { + c.onEvicted(k, v) + } +} + +func (c *cache) delete(k string) (interface{}, bool) { + if c.onEvicted != nil { + if v, found := c.items[k]; found { + delete(c.items, k) + return v.Object, true + } + } + delete(c.items, k) + return nil, false +} + +type keyAndValue struct { + key string + value interface{} +} + +// Delete all expired items from the cache. +func (c *cache) DeleteExpired() { + var evictedItems []keyAndValue + nowTime := time.Now() + now := nowTime.UnixNano() + c.mu.Lock() + for k, v := range c.items { + // "Inlining" of expired + if v.Expiration > 0 && now > v.Expiration { + ov, evicted := c.delete(k) + if evicted { + evictedItems = append(evictedItems, keyAndValue{k, ov}) + } + } + } + c.lastCleanTime = nowTime + c.mu.Unlock() + for _, v := range evictedItems { + c.onEvicted(v.key, v.value) + } +} + +// Sets an (optional) function that is called with the key and value when an +// item is evicted from the cache. (Including when it is deleted manually, but +// not when it is overwritten.) Set to nil to disable. +func (c *cache) OnEvicted(f func(string, interface{})) { + c.mu.Lock() + c.onEvicted = f + c.mu.Unlock() +} + +// Write the cache's items (using Gob) to an io.Writer. +// +// NOTE: This method is deprecated in favor of c.Items() and NewFrom() (see the +// documentation for NewFrom().) +func (c *cache) Save(w io.Writer) (err error) { + enc := gob.NewEncoder(w) + defer func() { + if x := recover(); x != nil { + err = fmt.Errorf("Error registering item types with Gob library") + } + }() + c.mu.RLock() + defer c.mu.RUnlock() + for _, v := range c.items { + gob.Register(v.Object) + } + err = enc.Encode(&c.items) + return +} + +// Save the cache's items to the given filename, creating the file if it +// doesn't exist, and overwriting it if it does. +// +// NOTE: This method is deprecated in favor of c.Items() and NewFrom() (see the +// documentation for NewFrom().) +func (c *cache) SaveFile(fname string) error { + fp, err := os.Create(fname) + if err != nil { + return err + } + err = c.Save(fp) + if err != nil { + fp.Close() + return err + } + return fp.Close() +} + +// Add (Gob-serialized) cache items from an io.Reader, excluding any items with +// keys that already exist (and haven't expired) in the current cache. +// +// NOTE: This method is deprecated in favor of c.Items() and NewFrom() (see the +// documentation for NewFrom().) +func (c *cache) Load(r io.Reader) error { + dec := gob.NewDecoder(r) + items := map[string]Item{} + err := dec.Decode(&items) + if err == nil { + c.mu.Lock() + defer c.mu.Unlock() + for k, v := range items { + ov, found := c.items[k] + if !found || ov.Expired() { + c.items[k] = v + } + } + } + return err +} + +// Load and add cache items from the given filename, excluding any items with +// keys that already exist in the current cache. +// +// NOTE: This method is deprecated in favor of c.Items() and NewFrom() (see the +// documentation for NewFrom().) +func (c *cache) LoadFile(fname string) error { + fp, err := os.Open(fname) + if err != nil { + return err + } + err = c.Load(fp) + if err != nil { + fp.Close() + return err + } + return fp.Close() +} + +// Copies all unexpired items in the cache into a new map and returns it. +func (c *cache) Items() map[string]Item { + c.mu.RLock() + defer c.mu.RUnlock() + m := make(map[string]Item, len(c.items)) + now := time.Now().UnixNano() + for k, v := range c.items { + // "Inlining" of Expired + if v.Expiration > 0 { + if now > v.Expiration { + continue + } + } + m[k] = v + } + return m +} + +// Returns the number of items in the cache. This may include items that have +// expired, but have not yet been cleaned up. +func (c *cache) ItemCount() int { + c.mu.RLock() + n := len(c.items) + c.mu.RUnlock() + return n +} + +// Delete all items from the cache. +func (c *cache) Flush() { + c.mu.Lock() + c.items = map[string]Item{} + c.mu.Unlock() +} + +type janitor struct { + Interval time.Duration + stop chan bool + shoudClean chan bool +} + +func (j *janitor) Run(c *cache) { + ticker := time.NewTicker(j.Interval) + for { + select { + case <-ticker.C: + c.DeleteExpired() + case <-j.shoudClean: + c.mu.RLock() + lastCleanTime := c.lastCleanTime + c.mu.RUnlock() + + if lastCleanTime.Add(time.Second * 1).Before(time.Now()) { + c.DeleteExpired() + } + case <-j.stop: + ticker.Stop() + return + } + } +} + +func stopJanitor(c *Cache) { + c.janitor.stop <- true +} + +func runJanitor(c *cache, ci time.Duration) { + j := &janitor{ + Interval: ci, + stop: make(chan bool), + shoudClean: make(chan bool), + } + c.janitor = j + go j.Run(c) +} + +func newCache(de time.Duration, maxItemsCount int, m map[string]Item) *cache { + if de == 0 { + de = -1 + } + c := &cache{ + defaultExpiration: de, + maxItemsCount: maxItemsCount, + items: m, + lastCleanTime: time.Now(), + } + return c +} + +func newCacheWithJanitor(de time.Duration, ci time.Duration, maxItemsCount int, m map[string]Item) *Cache { + c := newCache(de, maxItemsCount, m) + // This trick ensures that the janitor goroutine (which--granted it + // was enabled--is running DeleteExpired on c forever) does not keep + // the returned C object from being garbage collected. When it is + // garbage collected, the finalizer stops the janitor goroutine, after + // which c can be collected. + C := &Cache{c} + if ci > 0 { + runJanitor(c, ci) + runtime.SetFinalizer(C, stopJanitor) + } + return C +} + +// Return a new cache with a given default expiration duration and cleanup +// interval. If the expiration duration is less than one (or NoExpiration), +// the items in the cache never expire (by default), and must be deleted +// manually. If the cleanup interval is less than one, expired items are not +// deleted from the cache before calling c.DeleteExpired(). +func New(defaultExpiration, cleanupInterval time.Duration, maxItemsCount int) *Cache { + items := make(map[string]Item) + return newCacheWithJanitor(defaultExpiration, cleanupInterval, maxItemsCount, items) +} + +// Return a new cache with a given default expiration duration and cleanup +// interval. If the expiration duration is less than one (or NoExpiration), +// the items in the cache never expire (by default), and must be deleted +// manually. If the cleanup interval is less than one, expired items are not +// deleted from the cache before calling c.DeleteExpired(). +// +// NewFrom() also accepts an items map which will serve as the underlying map +// for the cache. This is useful for starting from a deserialized cache +// (serialized using e.g. gob.Encode() on c.Items()), or passing in e.g. +// make(map[string]Item, 500) to improve startup performance when the cache +// is expected to reach a certain minimum size. +// +// Only the cache's methods synchronize access to this map, so it is not +// recommended to keep any references to the map around after creating a cache. +// If need be, the map can be accessed at a later point using c.Items() (subject +// to the same caveat.) +// +// Note regarding serialization: When using e.g. gob, make sure to +// gob.Register() the individual types stored in the cache before encoding a +// map retrieved with c.Items(), and to register those same types before +// decoding a blob containing an items map. +func NewFrom(defaultExpiration, cleanupInterval time.Duration, maxItemsCount int, items map[string]Item) *Cache { + return newCacheWithJanitor(defaultExpiration, cleanupInterval, maxItemsCount, items) +} diff --git a/pkg/memorycacher/cache_test.go b/pkg/memorycacher/cache_test.go new file mode 100644 index 0000000..c58d56c --- /dev/null +++ b/pkg/memorycacher/cache_test.go @@ -0,0 +1,1783 @@ +/* + * @Author: patrickmn,gitsrc + * @Date: 2020-07-09 13:17:30 + * @LastEditors: gitsrc + * @LastEditTime: 2020-07-10 10:06:28 + * @FilePath: /ServiceCar/utils/memorycacher/cache_test.go + */ + +package memorycacher + +import ( + "bytes" + "io/ioutil" + "runtime" + "strconv" + "sync" + "testing" + "time" +) + +type TestStruct struct { + Num int + Children []*TestStruct +} + +const ( + maxItemsCount = 10000 +) + +func TestCache(t *testing.T) { + tc := New(DefaultExpiration, 0, maxItemsCount) + + a, found := tc.Get("a") + if found || a != nil { + t.Error("Getting A found value that shouldn't exist:", a) + } + + b, found := tc.Get("b") + if found || b != nil { + t.Error("Getting B found value that shouldn't exist:", b) + } + + c, found := tc.Get("c") + if found || c != nil { + t.Error("Getting C found value that shouldn't exist:", c) + } + + tc.Set("a", 1, DefaultExpiration) + tc.Set("b", "b", DefaultExpiration) + tc.Set("c", 3.5, DefaultExpiration) + + x, found := tc.Get("a") + if !found { + t.Error("a was not found while getting a2") + } + if x == nil { + t.Error("x for a is nil") + } else if a2 := x.(int); a2+2 != 3 { + t.Error("a2 (which should be 1) plus 2 does not equal 3; value:", a2) + } + + x, found = tc.Get("b") + if !found { + t.Error("b was not found while getting b2") + } + if x == nil { + t.Error("x for b is nil") + } else if b2 := x.(string); b2+"B" != "bB" { + t.Error("b2 (which should be b) plus B does not equal bB; value:", b2) + } + + x, found = tc.Get("c") + if !found { + t.Error("c was not found while getting c2") + } + if x == nil { + t.Error("x for c is nil") + } else if c2 := x.(float64); c2+1.2 != 4.7 { + t.Error("c2 (which should be 3.5) plus 1.2 does not equal 4.7; value:", c2) + } +} + +func TestCacheTimes(t *testing.T) { + var found bool + + tc := New(50*time.Millisecond, 1*time.Millisecond, maxItemsCount) + tc.Set("a", 1, DefaultExpiration) + tc.Set("b", 2, NoExpiration) + tc.Set("c", 3, 20*time.Millisecond) + tc.Set("d", 4, 70*time.Millisecond) + + <-time.After(25 * time.Millisecond) + _, found = tc.Get("c") + if found { + t.Error("Found c when it should have been automatically deleted") + } + + <-time.After(30 * time.Millisecond) + _, found = tc.Get("a") + if found { + t.Error("Found a when it should have been automatically deleted") + } + + _, found = tc.Get("b") + if !found { + t.Error("Did not find b even though it was set to never expire") + } + + _, found = tc.Get("d") + if !found { + t.Error("Did not find d even though it was set to expire later than the default") + } + + <-time.After(20 * time.Millisecond) + _, found = tc.Get("d") + if found { + t.Error("Found d when it should have been automatically deleted (later than the default)") + } +} + +func TestNewFrom(t *testing.T) { + m := map[string]Item{ + "a": Item{ + Object: 1, + Expiration: 0, + }, + "b": Item{ + Object: 2, + Expiration: 0, + }, + } + tc := NewFrom(DefaultExpiration, 0, maxItemsCount, m) + a, found := tc.Get("a") + if !found { + t.Fatal("Did not find a") + } + if a.(int) != 1 { + t.Fatal("a is not 1") + } + b, found := tc.Get("b") + if !found { + t.Fatal("Did not find b") + } + if b.(int) != 2 { + t.Fatal("b is not 2") + } +} + +func TestStorePointerToStruct(t *testing.T) { + tc := New(DefaultExpiration, 0, maxItemsCount) + tc.Set("foo", &TestStruct{Num: 1}, DefaultExpiration) + x, found := tc.Get("foo") + if !found { + t.Fatal("*TestStruct was not found for foo") + } + foo := x.(*TestStruct) + foo.Num++ + + y, found := tc.Get("foo") + if !found { + t.Fatal("*TestStruct was not found for foo (second time)") + } + bar := y.(*TestStruct) + if bar.Num != 2 { + t.Fatal("TestStruct.Num is not 2") + } +} + +func TestIncrementWithInt(t *testing.T) { + tc := New(DefaultExpiration, 0, maxItemsCount) + tc.Set("tint", 1, DefaultExpiration) + err := tc.Increment("tint", 2) + if err != nil { + t.Error("Error incrementing:", err) + } + x, found := tc.Get("tint") + if !found { + t.Error("tint was not found") + } + if x.(int) != 3 { + t.Error("tint is not 3:", x) + } +} + +func TestIncrementWithInt8(t *testing.T) { + tc := New(DefaultExpiration, 0, maxItemsCount) + tc.Set("tint8", int8(1), DefaultExpiration) + err := tc.Increment("tint8", 2) + if err != nil { + t.Error("Error incrementing:", err) + } + x, found := tc.Get("tint8") + if !found { + t.Error("tint8 was not found") + } + if x.(int8) != 3 { + t.Error("tint8 is not 3:", x) + } +} + +func TestIncrementWithInt16(t *testing.T) { + tc := New(DefaultExpiration, 0, maxItemsCount) + tc.Set("tint16", int16(1), DefaultExpiration) + err := tc.Increment("tint16", 2) + if err != nil { + t.Error("Error incrementing:", err) + } + x, found := tc.Get("tint16") + if !found { + t.Error("tint16 was not found") + } + if x.(int16) != 3 { + t.Error("tint16 is not 3:", x) + } +} + +func TestIncrementWithInt32(t *testing.T) { + tc := New(DefaultExpiration, 0, maxItemsCount) + tc.Set("tint32", int32(1), DefaultExpiration) + err := tc.Increment("tint32", 2) + if err != nil { + t.Error("Error incrementing:", err) + } + x, found := tc.Get("tint32") + if !found { + t.Error("tint32 was not found") + } + if x.(int32) != 3 { + t.Error("tint32 is not 3:", x) + } +} + +func TestIncrementWithInt64(t *testing.T) { + tc := New(DefaultExpiration, 0, maxItemsCount) + tc.Set("tint64", int64(1), DefaultExpiration) + err := tc.Increment("tint64", 2) + if err != nil { + t.Error("Error incrementing:", err) + } + x, found := tc.Get("tint64") + if !found { + t.Error("tint64 was not found") + } + if x.(int64) != 3 { + t.Error("tint64 is not 3:", x) + } +} + +func TestIncrementWithUint(t *testing.T) { + tc := New(DefaultExpiration, 0, maxItemsCount) + tc.Set("tuint", uint(1), DefaultExpiration) + err := tc.Increment("tuint", 2) + if err != nil { + t.Error("Error incrementing:", err) + } + x, found := tc.Get("tuint") + if !found { + t.Error("tuint was not found") + } + if x.(uint) != 3 { + t.Error("tuint is not 3:", x) + } +} + +func TestIncrementWithUintptr(t *testing.T) { + tc := New(DefaultExpiration, 0, maxItemsCount) + tc.Set("tuintptr", uintptr(1), DefaultExpiration) + err := tc.Increment("tuintptr", 2) + if err != nil { + t.Error("Error incrementing:", err) + } + + x, found := tc.Get("tuintptr") + if !found { + t.Error("tuintptr was not found") + } + if x.(uintptr) != 3 { + t.Error("tuintptr is not 3:", x) + } +} + +func TestIncrementWithUint8(t *testing.T) { + tc := New(DefaultExpiration, 0, maxItemsCount) + tc.Set("tuint8", uint8(1), DefaultExpiration) + err := tc.Increment("tuint8", 2) + if err != nil { + t.Error("Error incrementing:", err) + } + x, found := tc.Get("tuint8") + if !found { + t.Error("tuint8 was not found") + } + if x.(uint8) != 3 { + t.Error("tuint8 is not 3:", x) + } +} + +func TestIncrementWithUint16(t *testing.T) { + tc := New(DefaultExpiration, 0, maxItemsCount) + tc.Set("tuint16", uint16(1), DefaultExpiration) + err := tc.Increment("tuint16", 2) + if err != nil { + t.Error("Error incrementing:", err) + } + + x, found := tc.Get("tuint16") + if !found { + t.Error("tuint16 was not found") + } + if x.(uint16) != 3 { + t.Error("tuint16 is not 3:", x) + } +} + +func TestIncrementWithUint32(t *testing.T) { + tc := New(DefaultExpiration, 0, maxItemsCount) + tc.Set("tuint32", uint32(1), DefaultExpiration) + err := tc.Increment("tuint32", 2) + if err != nil { + t.Error("Error incrementing:", err) + } + x, found := tc.Get("tuint32") + if !found { + t.Error("tuint32 was not found") + } + if x.(uint32) != 3 { + t.Error("tuint32 is not 3:", x) + } +} + +func TestIncrementWithUint64(t *testing.T) { + tc := New(DefaultExpiration, 0, maxItemsCount) + tc.Set("tuint64", uint64(1), DefaultExpiration) + err := tc.Increment("tuint64", 2) + if err != nil { + t.Error("Error incrementing:", err) + } + + x, found := tc.Get("tuint64") + if !found { + t.Error("tuint64 was not found") + } + if x.(uint64) != 3 { + t.Error("tuint64 is not 3:", x) + } +} + +func TestIncrementWithFloat32(t *testing.T) { + tc := New(DefaultExpiration, 0, maxItemsCount) + tc.Set("float32", float32(1.5), DefaultExpiration) + err := tc.Increment("float32", 2) + if err != nil { + t.Error("Error incrementing:", err) + } + x, found := tc.Get("float32") + if !found { + t.Error("float32 was not found") + } + if x.(float32) != 3.5 { + t.Error("float32 is not 3.5:", x) + } +} + +func TestIncrementWithFloat64(t *testing.T) { + tc := New(DefaultExpiration, 0, maxItemsCount) + tc.Set("float64", float64(1.5), DefaultExpiration) + err := tc.Increment("float64", 2) + if err != nil { + t.Error("Error incrementing:", err) + } + x, found := tc.Get("float64") + if !found { + t.Error("float64 was not found") + } + if x.(float64) != 3.5 { + t.Error("float64 is not 3.5:", x) + } +} + +func TestIncrementFloatWithFloat32(t *testing.T) { + tc := New(DefaultExpiration, 0, maxItemsCount) + tc.Set("float32", float32(1.5), DefaultExpiration) + err := tc.IncrementFloat("float32", 2) + if err != nil { + t.Error("Error incrementfloating:", err) + } + x, found := tc.Get("float32") + if !found { + t.Error("float32 was not found") + } + if x.(float32) != 3.5 { + t.Error("float32 is not 3.5:", x) + } +} + +func TestIncrementFloatWithFloat64(t *testing.T) { + tc := New(DefaultExpiration, 0, maxItemsCount) + tc.Set("float64", float64(1.5), DefaultExpiration) + err := tc.IncrementFloat("float64", 2) + if err != nil { + t.Error("Error incrementfloating:", err) + } + x, found := tc.Get("float64") + if !found { + t.Error("float64 was not found") + } + if x.(float64) != 3.5 { + t.Error("float64 is not 3.5:", x) + } +} + +func TestDecrementWithInt(t *testing.T) { + tc := New(DefaultExpiration, 0, maxItemsCount) + tc.Set("int", int(5), DefaultExpiration) + err := tc.Decrement("int", 2) + if err != nil { + t.Error("Error decrementing:", err) + } + x, found := tc.Get("int") + if !found { + t.Error("int was not found") + } + if x.(int) != 3 { + t.Error("int is not 3:", x) + } +} + +func TestDecrementWithInt8(t *testing.T) { + tc := New(DefaultExpiration, 0, maxItemsCount) + tc.Set("int8", int8(5), DefaultExpiration) + err := tc.Decrement("int8", 2) + if err != nil { + t.Error("Error decrementing:", err) + } + x, found := tc.Get("int8") + if !found { + t.Error("int8 was not found") + } + if x.(int8) != 3 { + t.Error("int8 is not 3:", x) + } +} + +func TestDecrementWithInt16(t *testing.T) { + tc := New(DefaultExpiration, 0, maxItemsCount) + tc.Set("int16", int16(5), DefaultExpiration) + err := tc.Decrement("int16", 2) + if err != nil { + t.Error("Error decrementing:", err) + } + x, found := tc.Get("int16") + if !found { + t.Error("int16 was not found") + } + if x.(int16) != 3 { + t.Error("int16 is not 3:", x) + } +} + +func TestDecrementWithInt32(t *testing.T) { + tc := New(DefaultExpiration, 0, maxItemsCount) + tc.Set("int32", int32(5), DefaultExpiration) + err := tc.Decrement("int32", 2) + if err != nil { + t.Error("Error decrementing:", err) + } + x, found := tc.Get("int32") + if !found { + t.Error("int32 was not found") + } + if x.(int32) != 3 { + t.Error("int32 is not 3:", x) + } +} + +func TestDecrementWithInt64(t *testing.T) { + tc := New(DefaultExpiration, 0, maxItemsCount) + tc.Set("int64", int64(5), DefaultExpiration) + err := tc.Decrement("int64", 2) + if err != nil { + t.Error("Error decrementing:", err) + } + x, found := tc.Get("int64") + if !found { + t.Error("int64 was not found") + } + if x.(int64) != 3 { + t.Error("int64 is not 3:", x) + } +} + +func TestDecrementWithUint(t *testing.T) { + tc := New(DefaultExpiration, 0, maxItemsCount) + tc.Set("uint", uint(5), DefaultExpiration) + err := tc.Decrement("uint", 2) + if err != nil { + t.Error("Error decrementing:", err) + } + x, found := tc.Get("uint") + if !found { + t.Error("uint was not found") + } + if x.(uint) != 3 { + t.Error("uint is not 3:", x) + } +} + +func TestDecrementWithUintptr(t *testing.T) { + tc := New(DefaultExpiration, 0, maxItemsCount) + tc.Set("uintptr", uintptr(5), DefaultExpiration) + err := tc.Decrement("uintptr", 2) + if err != nil { + t.Error("Error decrementing:", err) + } + x, found := tc.Get("uintptr") + if !found { + t.Error("uintptr was not found") + } + if x.(uintptr) != 3 { + t.Error("uintptr is not 3:", x) + } +} + +func TestDecrementWithUint8(t *testing.T) { + tc := New(DefaultExpiration, 0, maxItemsCount) + tc.Set("uint8", uint8(5), DefaultExpiration) + err := tc.Decrement("uint8", 2) + if err != nil { + t.Error("Error decrementing:", err) + } + x, found := tc.Get("uint8") + if !found { + t.Error("uint8 was not found") + } + if x.(uint8) != 3 { + t.Error("uint8 is not 3:", x) + } +} + +func TestDecrementWithUint16(t *testing.T) { + tc := New(DefaultExpiration, 0, maxItemsCount) + tc.Set("uint16", uint16(5), DefaultExpiration) + err := tc.Decrement("uint16", 2) + if err != nil { + t.Error("Error decrementing:", err) + } + x, found := tc.Get("uint16") + if !found { + t.Error("uint16 was not found") + } + if x.(uint16) != 3 { + t.Error("uint16 is not 3:", x) + } +} + +func TestDecrementWithUint32(t *testing.T) { + tc := New(DefaultExpiration, 0, maxItemsCount) + tc.Set("uint32", uint32(5), DefaultExpiration) + err := tc.Decrement("uint32", 2) + if err != nil { + t.Error("Error decrementing:", err) + } + x, found := tc.Get("uint32") + if !found { + t.Error("uint32 was not found") + } + if x.(uint32) != 3 { + t.Error("uint32 is not 3:", x) + } +} + +func TestDecrementWithUint64(t *testing.T) { + tc := New(DefaultExpiration, 0, maxItemsCount) + tc.Set("uint64", uint64(5), DefaultExpiration) + err := tc.Decrement("uint64", 2) + if err != nil { + t.Error("Error decrementing:", err) + } + x, found := tc.Get("uint64") + if !found { + t.Error("uint64 was not found") + } + if x.(uint64) != 3 { + t.Error("uint64 is not 3:", x) + } +} + +func TestDecrementWithFloat32(t *testing.T) { + tc := New(DefaultExpiration, 0, maxItemsCount) + tc.Set("float32", float32(5.5), DefaultExpiration) + err := tc.Decrement("float32", 2) + if err != nil { + t.Error("Error decrementing:", err) + } + x, found := tc.Get("float32") + if !found { + t.Error("float32 was not found") + } + if x.(float32) != 3.5 { + t.Error("float32 is not 3:", x) + } +} + +func TestDecrementWithFloat64(t *testing.T) { + tc := New(DefaultExpiration, 0, maxItemsCount) + tc.Set("float64", float64(5.5), DefaultExpiration) + err := tc.Decrement("float64", 2) + if err != nil { + t.Error("Error decrementing:", err) + } + x, found := tc.Get("float64") + if !found { + t.Error("float64 was not found") + } + if x.(float64) != 3.5 { + t.Error("float64 is not 3:", x) + } +} + +func TestDecrementFloatWithFloat32(t *testing.T) { + tc := New(DefaultExpiration, 0, maxItemsCount) + tc.Set("float32", float32(5.5), DefaultExpiration) + err := tc.DecrementFloat("float32", 2) + if err != nil { + t.Error("Error decrementing:", err) + } + x, found := tc.Get("float32") + if !found { + t.Error("float32 was not found") + } + if x.(float32) != 3.5 { + t.Error("float32 is not 3:", x) + } +} + +func TestDecrementFloatWithFloat64(t *testing.T) { + tc := New(DefaultExpiration, 0, maxItemsCount) + tc.Set("float64", float64(5.5), DefaultExpiration) + err := tc.DecrementFloat("float64", 2) + if err != nil { + t.Error("Error decrementing:", err) + } + x, found := tc.Get("float64") + if !found { + t.Error("float64 was not found") + } + if x.(float64) != 3.5 { + t.Error("float64 is not 3:", x) + } +} + +func TestIncrementInt(t *testing.T) { + tc := New(DefaultExpiration, 0, maxItemsCount) + tc.Set("tint", 1, DefaultExpiration) + n, err := tc.IncrementInt("tint", 2) + if err != nil { + t.Error("Error incrementing:", err) + } + if n != 3 { + t.Error("Returned number is not 3:", n) + } + x, found := tc.Get("tint") + if !found { + t.Error("tint was not found") + } + if x.(int) != 3 { + t.Error("tint is not 3:", x) + } +} + +func TestIncrementInt8(t *testing.T) { + tc := New(DefaultExpiration, 0, maxItemsCount) + tc.Set("tint8", int8(1), DefaultExpiration) + n, err := tc.IncrementInt8("tint8", 2) + if err != nil { + t.Error("Error incrementing:", err) + } + if n != 3 { + t.Error("Returned number is not 3:", n) + } + x, found := tc.Get("tint8") + if !found { + t.Error("tint8 was not found") + } + if x.(int8) != 3 { + t.Error("tint8 is not 3:", x) + } +} + +func TestIncrementInt16(t *testing.T) { + tc := New(DefaultExpiration, 0, maxItemsCount) + tc.Set("tint16", int16(1), DefaultExpiration) + n, err := tc.IncrementInt16("tint16", 2) + if err != nil { + t.Error("Error incrementing:", err) + } + if n != 3 { + t.Error("Returned number is not 3:", n) + } + x, found := tc.Get("tint16") + if !found { + t.Error("tint16 was not found") + } + if x.(int16) != 3 { + t.Error("tint16 is not 3:", x) + } +} + +func TestIncrementInt32(t *testing.T) { + tc := New(DefaultExpiration, 0, maxItemsCount) + tc.Set("tint32", int32(1), DefaultExpiration) + n, err := tc.IncrementInt32("tint32", 2) + if err != nil { + t.Error("Error incrementing:", err) + } + if n != 3 { + t.Error("Returned number is not 3:", n) + } + x, found := tc.Get("tint32") + if !found { + t.Error("tint32 was not found") + } + if x.(int32) != 3 { + t.Error("tint32 is not 3:", x) + } +} + +func TestIncrementInt64(t *testing.T) { + tc := New(DefaultExpiration, 0, maxItemsCount) + tc.Set("tint64", int64(1), DefaultExpiration) + n, err := tc.IncrementInt64("tint64", 2) + if err != nil { + t.Error("Error incrementing:", err) + } + if n != 3 { + t.Error("Returned number is not 3:", n) + } + x, found := tc.Get("tint64") + if !found { + t.Error("tint64 was not found") + } + if x.(int64) != 3 { + t.Error("tint64 is not 3:", x) + } +} + +func TestIncrementUint(t *testing.T) { + tc := New(DefaultExpiration, 0, maxItemsCount) + tc.Set("tuint", uint(1), DefaultExpiration) + n, err := tc.IncrementUint("tuint", 2) + if err != nil { + t.Error("Error incrementing:", err) + } + if n != 3 { + t.Error("Returned number is not 3:", n) + } + x, found := tc.Get("tuint") + if !found { + t.Error("tuint was not found") + } + if x.(uint) != 3 { + t.Error("tuint is not 3:", x) + } +} + +func TestIncrementUintptr(t *testing.T) { + tc := New(DefaultExpiration, 0, maxItemsCount) + tc.Set("tuintptr", uintptr(1), DefaultExpiration) + n, err := tc.IncrementUintptr("tuintptr", 2) + if err != nil { + t.Error("Error incrementing:", err) + } + if n != 3 { + t.Error("Returned number is not 3:", n) + } + x, found := tc.Get("tuintptr") + if !found { + t.Error("tuintptr was not found") + } + if x.(uintptr) != 3 { + t.Error("tuintptr is not 3:", x) + } +} + +func TestIncrementUint8(t *testing.T) { + tc := New(DefaultExpiration, 0, maxItemsCount) + tc.Set("tuint8", uint8(1), DefaultExpiration) + n, err := tc.IncrementUint8("tuint8", 2) + if err != nil { + t.Error("Error incrementing:", err) + } + if n != 3 { + t.Error("Returned number is not 3:", n) + } + x, found := tc.Get("tuint8") + if !found { + t.Error("tuint8 was not found") + } + if x.(uint8) != 3 { + t.Error("tuint8 is not 3:", x) + } +} + +func TestIncrementUint16(t *testing.T) { + tc := New(DefaultExpiration, 0, maxItemsCount) + tc.Set("tuint16", uint16(1), DefaultExpiration) + n, err := tc.IncrementUint16("tuint16", 2) + if err != nil { + t.Error("Error incrementing:", err) + } + if n != 3 { + t.Error("Returned number is not 3:", n) + } + x, found := tc.Get("tuint16") + if !found { + t.Error("tuint16 was not found") + } + if x.(uint16) != 3 { + t.Error("tuint16 is not 3:", x) + } +} + +func TestIncrementUint32(t *testing.T) { + tc := New(DefaultExpiration, 0, maxItemsCount) + tc.Set("tuint32", uint32(1), DefaultExpiration) + n, err := tc.IncrementUint32("tuint32", 2) + if err != nil { + t.Error("Error incrementing:", err) + } + if n != 3 { + t.Error("Returned number is not 3:", n) + } + x, found := tc.Get("tuint32") + if !found { + t.Error("tuint32 was not found") + } + if x.(uint32) != 3 { + t.Error("tuint32 is not 3:", x) + } +} + +func TestIncrementUint64(t *testing.T) { + tc := New(DefaultExpiration, 0, maxItemsCount) + tc.Set("tuint64", uint64(1), DefaultExpiration) + n, err := tc.IncrementUint64("tuint64", 2) + if err != nil { + t.Error("Error incrementing:", err) + } + if n != 3 { + t.Error("Returned number is not 3:", n) + } + x, found := tc.Get("tuint64") + if !found { + t.Error("tuint64 was not found") + } + if x.(uint64) != 3 { + t.Error("tuint64 is not 3:", x) + } +} + +func TestIncrementFloat32(t *testing.T) { + tc := New(DefaultExpiration, 0, maxItemsCount) + tc.Set("float32", float32(1.5), DefaultExpiration) + n, err := tc.IncrementFloat32("float32", 2) + if err != nil { + t.Error("Error incrementing:", err) + } + if n != 3.5 { + t.Error("Returned number is not 3.5:", n) + } + x, found := tc.Get("float32") + if !found { + t.Error("float32 was not found") + } + if x.(float32) != 3.5 { + t.Error("float32 is not 3.5:", x) + } +} + +func TestIncrementFloat64(t *testing.T) { + tc := New(DefaultExpiration, 0, maxItemsCount) + tc.Set("float64", float64(1.5), DefaultExpiration) + n, err := tc.IncrementFloat64("float64", 2) + if err != nil { + t.Error("Error incrementing:", err) + } + if n != 3.5 { + t.Error("Returned number is not 3.5:", n) + } + x, found := tc.Get("float64") + if !found { + t.Error("float64 was not found") + } + if x.(float64) != 3.5 { + t.Error("float64 is not 3.5:", x) + } +} + +func TestDecrementInt8(t *testing.T) { + tc := New(DefaultExpiration, 0, maxItemsCount) + tc.Set("int8", int8(5), DefaultExpiration) + n, err := tc.DecrementInt8("int8", 2) + if err != nil { + t.Error("Error decrementing:", err) + } + if n != 3 { + t.Error("Returned number is not 3:", n) + } + x, found := tc.Get("int8") + if !found { + t.Error("int8 was not found") + } + if x.(int8) != 3 { + t.Error("int8 is not 3:", x) + } +} + +func TestDecrementInt16(t *testing.T) { + tc := New(DefaultExpiration, 0, maxItemsCount) + tc.Set("int16", int16(5), DefaultExpiration) + n, err := tc.DecrementInt16("int16", 2) + if err != nil { + t.Error("Error decrementing:", err) + } + if n != 3 { + t.Error("Returned number is not 3:", n) + } + x, found := tc.Get("int16") + if !found { + t.Error("int16 was not found") + } + if x.(int16) != 3 { + t.Error("int16 is not 3:", x) + } +} + +func TestDecrementInt32(t *testing.T) { + tc := New(DefaultExpiration, 0, maxItemsCount) + tc.Set("int32", int32(5), DefaultExpiration) + n, err := tc.DecrementInt32("int32", 2) + if err != nil { + t.Error("Error decrementing:", err) + } + if n != 3 { + t.Error("Returned number is not 3:", n) + } + x, found := tc.Get("int32") + if !found { + t.Error("int32 was not found") + } + if x.(int32) != 3 { + t.Error("int32 is not 3:", x) + } +} + +func TestDecrementInt64(t *testing.T) { + tc := New(DefaultExpiration, 0, maxItemsCount) + tc.Set("int64", int64(5), DefaultExpiration) + n, err := tc.DecrementInt64("int64", 2) + if err != nil { + t.Error("Error decrementing:", err) + } + if n != 3 { + t.Error("Returned number is not 3:", n) + } + x, found := tc.Get("int64") + if !found { + t.Error("int64 was not found") + } + if x.(int64) != 3 { + t.Error("int64 is not 3:", x) + } +} + +func TestDecrementUint(t *testing.T) { + tc := New(DefaultExpiration, 0, maxItemsCount) + tc.Set("uint", uint(5), DefaultExpiration) + n, err := tc.DecrementUint("uint", 2) + if err != nil { + t.Error("Error decrementing:", err) + } + if n != 3 { + t.Error("Returned number is not 3:", n) + } + x, found := tc.Get("uint") + if !found { + t.Error("uint was not found") + } + if x.(uint) != 3 { + t.Error("uint is not 3:", x) + } +} + +func TestDecrementUintptr(t *testing.T) { + tc := New(DefaultExpiration, 0, maxItemsCount) + tc.Set("uintptr", uintptr(5), DefaultExpiration) + n, err := tc.DecrementUintptr("uintptr", 2) + if err != nil { + t.Error("Error decrementing:", err) + } + if n != 3 { + t.Error("Returned number is not 3:", n) + } + x, found := tc.Get("uintptr") + if !found { + t.Error("uintptr was not found") + } + if x.(uintptr) != 3 { + t.Error("uintptr is not 3:", x) + } +} + +func TestDecrementUint8(t *testing.T) { + tc := New(DefaultExpiration, 0, maxItemsCount) + tc.Set("uint8", uint8(5), DefaultExpiration) + n, err := tc.DecrementUint8("uint8", 2) + if err != nil { + t.Error("Error decrementing:", err) + } + if n != 3 { + t.Error("Returned number is not 3:", n) + } + x, found := tc.Get("uint8") + if !found { + t.Error("uint8 was not found") + } + if x.(uint8) != 3 { + t.Error("uint8 is not 3:", x) + } +} + +func TestDecrementUint16(t *testing.T) { + tc := New(DefaultExpiration, 0, maxItemsCount) + tc.Set("uint16", uint16(5), DefaultExpiration) + n, err := tc.DecrementUint16("uint16", 2) + if err != nil { + t.Error("Error decrementing:", err) + } + if n != 3 { + t.Error("Returned number is not 3:", n) + } + x, found := tc.Get("uint16") + if !found { + t.Error("uint16 was not found") + } + if x.(uint16) != 3 { + t.Error("uint16 is not 3:", x) + } +} + +func TestDecrementUint32(t *testing.T) { + tc := New(DefaultExpiration, 0, maxItemsCount) + tc.Set("uint32", uint32(5), DefaultExpiration) + n, err := tc.DecrementUint32("uint32", 2) + if err != nil { + t.Error("Error decrementing:", err) + } + if n != 3 { + t.Error("Returned number is not 3:", n) + } + x, found := tc.Get("uint32") + if !found { + t.Error("uint32 was not found") + } + if x.(uint32) != 3 { + t.Error("uint32 is not 3:", x) + } +} + +func TestDecrementUint64(t *testing.T) { + tc := New(DefaultExpiration, 0, maxItemsCount) + tc.Set("uint64", uint64(5), DefaultExpiration) + n, err := tc.DecrementUint64("uint64", 2) + if err != nil { + t.Error("Error decrementing:", err) + } + if n != 3 { + t.Error("Returned number is not 3:", n) + } + x, found := tc.Get("uint64") + if !found { + t.Error("uint64 was not found") + } + if x.(uint64) != 3 { + t.Error("uint64 is not 3:", x) + } +} + +func TestDecrementFloat32(t *testing.T) { + tc := New(DefaultExpiration, 0, maxItemsCount) + tc.Set("float32", float32(5), DefaultExpiration) + n, err := tc.DecrementFloat32("float32", 2) + if err != nil { + t.Error("Error decrementing:", err) + } + if n != 3 { + t.Error("Returned number is not 3:", n) + } + x, found := tc.Get("float32") + if !found { + t.Error("float32 was not found") + } + if x.(float32) != 3 { + t.Error("float32 is not 3:", x) + } +} + +func TestDecrementFloat64(t *testing.T) { + tc := New(DefaultExpiration, 0, maxItemsCount) + tc.Set("float64", float64(5), DefaultExpiration) + n, err := tc.DecrementFloat64("float64", 2) + if err != nil { + t.Error("Error decrementing:", err) + } + if n != 3 { + t.Error("Returned number is not 3:", n) + } + x, found := tc.Get("float64") + if !found { + t.Error("float64 was not found") + } + if x.(float64) != 3 { + t.Error("float64 is not 3:", x) + } +} + +func TestAdd(t *testing.T) { + tc := New(DefaultExpiration, 0, maxItemsCount) + err := tc.Add("foo", "bar", DefaultExpiration) + if err != nil { + t.Error("Couldn't add foo even though it shouldn't exist") + } + err = tc.Add("foo", "baz", DefaultExpiration) + if err == nil { + t.Error("Successfully added another foo when it should have returned an error") + } +} + +func TestReplace(t *testing.T) { + tc := New(DefaultExpiration, 0, maxItemsCount) + err := tc.Replace("foo", "bar", DefaultExpiration) + if err == nil { + t.Error("Replaced foo when it shouldn't exist") + } + tc.Set("foo", "bar", DefaultExpiration) + err = tc.Replace("foo", "bar", DefaultExpiration) + if err != nil { + t.Error("Couldn't replace existing key foo") + } +} + +func TestDelete(t *testing.T) { + tc := New(DefaultExpiration, 0, maxItemsCount) + tc.Set("foo", "bar", DefaultExpiration) + tc.Delete("foo") + x, found := tc.Get("foo") + if found { + t.Error("foo was found, but it should have been deleted") + } + if x != nil { + t.Error("x is not nil:", x) + } +} + +func TestItemCount(t *testing.T) { + tc := New(DefaultExpiration, 0, maxItemsCount) + tc.Set("foo", "1", DefaultExpiration) + tc.Set("bar", "2", DefaultExpiration) + tc.Set("baz", "3", DefaultExpiration) + if n := tc.ItemCount(); n != 3 { + t.Errorf("Item count is not 3: %d", n) + } +} + +func TestFlush(t *testing.T) { + tc := New(DefaultExpiration, 0, maxItemsCount) + tc.Set("foo", "bar", DefaultExpiration) + tc.Set("baz", "yes", DefaultExpiration) + tc.Flush() + x, found := tc.Get("foo") + if found { + t.Error("foo was found, but it should have been deleted") + } + if x != nil { + t.Error("x is not nil:", x) + } + x, found = tc.Get("baz") + if found { + t.Error("baz was found, but it should have been deleted") + } + if x != nil { + t.Error("x is not nil:", x) + } +} + +func TestIncrementOverflowInt(t *testing.T) { + tc := New(DefaultExpiration, 0, maxItemsCount) + tc.Set("int8", int8(127), DefaultExpiration) + err := tc.Increment("int8", 1) + if err != nil { + t.Error("Error incrementing int8:", err) + } + x, _ := tc.Get("int8") + int8 := x.(int8) + if int8 != -128 { + t.Error("int8 did not overflow as expected; value:", int8) + } + +} + +func TestIncrementOverflowUint(t *testing.T) { + tc := New(DefaultExpiration, 0, maxItemsCount) + tc.Set("uint8", uint8(255), DefaultExpiration) + err := tc.Increment("uint8", 1) + if err != nil { + t.Error("Error incrementing int8:", err) + } + x, _ := tc.Get("uint8") + uint8 := x.(uint8) + if uint8 != 0 { + t.Error("uint8 did not overflow as expected; value:", uint8) + } +} + +func TestDecrementUnderflowUint(t *testing.T) { + tc := New(DefaultExpiration, 0, maxItemsCount) + tc.Set("uint8", uint8(0), DefaultExpiration) + err := tc.Decrement("uint8", 1) + if err != nil { + t.Error("Error decrementing int8:", err) + } + x, _ := tc.Get("uint8") + uint8 := x.(uint8) + if uint8 != 255 { + t.Error("uint8 did not underflow as expected; value:", uint8) + } +} + +func TestOnEvicted(t *testing.T) { + tc := New(DefaultExpiration, 0, maxItemsCount) + tc.Set("foo", 3, DefaultExpiration) + if tc.onEvicted != nil { + t.Fatal("tc.onEvicted is not nil") + } + works := false + tc.OnEvicted(func(k string, v interface{}) { + if k == "foo" && v.(int) == 3 { + works = true + } + tc.Set("bar", 4, DefaultExpiration) + }) + tc.Delete("foo") + x, _ := tc.Get("bar") + if !works { + t.Error("works bool not true") + } + if x.(int) != 4 { + t.Error("bar was not 4") + } +} + +func TestCacheSerialization(t *testing.T) { + tc := New(DefaultExpiration, 0, maxItemsCount) + testFillAndSerialize(t, tc) + + // Check if gob.Register behaves properly even after multiple gob.Register + // on c.Items (many of which will be the same type) + testFillAndSerialize(t, tc) +} + +func testFillAndSerialize(t *testing.T, tc *Cache) { + tc.Set("a", "a", DefaultExpiration) + tc.Set("b", "b", DefaultExpiration) + tc.Set("c", "c", DefaultExpiration) + tc.Set("expired", "foo", 1*time.Millisecond) + tc.Set("*struct", &TestStruct{Num: 1}, DefaultExpiration) + tc.Set("[]struct", []TestStruct{ + {Num: 2}, + {Num: 3}, + }, DefaultExpiration) + tc.Set("[]*struct", []*TestStruct{ + &TestStruct{Num: 4}, + &TestStruct{Num: 5}, + }, DefaultExpiration) + tc.Set("structception", &TestStruct{ + Num: 42, + Children: []*TestStruct{ + &TestStruct{Num: 6174}, + &TestStruct{Num: 4716}, + }, + }, DefaultExpiration) + + fp := &bytes.Buffer{} + err := tc.Save(fp) + if err != nil { + t.Fatal("Couldn't save cache to fp:", err) + } + + oc := New(DefaultExpiration, 0, maxItemsCount) + err = oc.Load(fp) + if err != nil { + t.Fatal("Couldn't load cache from fp:", err) + } + + a, found := oc.Get("a") + if !found { + t.Error("a was not found") + } + if a.(string) != "a" { + t.Error("a is not a") + } + + b, found := oc.Get("b") + if !found { + t.Error("b was not found") + } + if b.(string) != "b" { + t.Error("b is not b") + } + + c, found := oc.Get("c") + if !found { + t.Error("c was not found") + } + if c.(string) != "c" { + t.Error("c is not c") + } + + <-time.After(5 * time.Millisecond) + _, found = oc.Get("expired") + if found { + t.Error("expired was found") + } + + s1, found := oc.Get("*struct") + if !found { + t.Error("*struct was not found") + } + if s1.(*TestStruct).Num != 1 { + t.Error("*struct.Num is not 1") + } + + s2, found := oc.Get("[]struct") + if !found { + t.Error("[]struct was not found") + } + s2r := s2.([]TestStruct) + if len(s2r) != 2 { + t.Error("Length of s2r is not 2") + } + if s2r[0].Num != 2 { + t.Error("s2r[0].Num is not 2") + } + if s2r[1].Num != 3 { + t.Error("s2r[1].Num is not 3") + } + + s3, found := oc.get("[]*struct") + if !found { + t.Error("[]*struct was not found") + } + s3r := s3.([]*TestStruct) + if len(s3r) != 2 { + t.Error("Length of s3r is not 2") + } + if s3r[0].Num != 4 { + t.Error("s3r[0].Num is not 4") + } + if s3r[1].Num != 5 { + t.Error("s3r[1].Num is not 5") + } + + s4, found := oc.get("structception") + if !found { + t.Error("structception was not found") + } + s4r := s4.(*TestStruct) + if len(s4r.Children) != 2 { + t.Error("Length of s4r.Children is not 2") + } + if s4r.Children[0].Num != 6174 { + t.Error("s4r.Children[0].Num is not 6174") + } + if s4r.Children[1].Num != 4716 { + t.Error("s4r.Children[1].Num is not 4716") + } +} + +func TestFileSerialization(t *testing.T) { + tc := New(DefaultExpiration, 0, maxItemsCount) + tc.Add("a", "a", DefaultExpiration) + tc.Add("b", "b", DefaultExpiration) + f, err := ioutil.TempFile("", "go-cache-cache.dat") + if err != nil { + t.Fatal("Couldn't create cache file:", err) + } + fname := f.Name() + f.Close() + tc.SaveFile(fname) + + oc := New(DefaultExpiration, 0, maxItemsCount) + oc.Add("a", "aa", 0) // this should not be overwritten + err = oc.LoadFile(fname) + if err != nil { + t.Error(err) + } + a, found := oc.Get("a") + if !found { + t.Error("a was not found") + } + astr := a.(string) + if astr != "aa" { + if astr == "a" { + t.Error("a was overwritten") + } else { + t.Error("a is not aa") + } + } + b, found := oc.Get("b") + if !found { + t.Error("b was not found") + } + if b.(string) != "b" { + t.Error("b is not b") + } +} + +func TestSerializeUnserializable(t *testing.T) { + tc := New(DefaultExpiration, 0, maxItemsCount) + ch := make(chan bool, 1) + ch <- true + tc.Set("chan", ch, DefaultExpiration) + fp := &bytes.Buffer{} + err := tc.Save(fp) // this should fail gracefully + if err.Error() != "gob NewTypeObject can't handle type: chan bool" { + t.Error("Error from Save was not gob NewTypeObject can't handle type chan bool:", err) + } +} + +func BenchmarkCacheGetExpiring(b *testing.B) { + benchmarkCacheGet(b, 5*time.Minute) +} + +func BenchmarkCacheGetNotExpiring(b *testing.B) { + benchmarkCacheGet(b, NoExpiration) +} + +func benchmarkCacheGet(b *testing.B, exp time.Duration) { + b.StopTimer() + tc := New(exp, 0, maxItemsCount) + tc.Set("foo", "bar", DefaultExpiration) + b.StartTimer() + for i := 0; i < b.N; i++ { + tc.Get("foo") + } +} + +func BenchmarkRWMutexMapGet(b *testing.B) { + b.StopTimer() + m := map[string]string{ + "foo": "bar", + } + mu := sync.RWMutex{} + b.StartTimer() + for i := 0; i < b.N; i++ { + mu.RLock() + _, _ = m["foo"] + mu.RUnlock() + } +} + +func BenchmarkRWMutexInterfaceMapGetStruct(b *testing.B) { + b.StopTimer() + s := struct{ name string }{name: "foo"} + m := map[interface{}]string{ + s: "bar", + } + mu := sync.RWMutex{} + b.StartTimer() + for i := 0; i < b.N; i++ { + mu.RLock() + _, _ = m[s] + mu.RUnlock() + } +} + +func BenchmarkRWMutexInterfaceMapGetString(b *testing.B) { + b.StopTimer() + m := map[interface{}]string{ + "foo": "bar", + } + mu := sync.RWMutex{} + b.StartTimer() + for i := 0; i < b.N; i++ { + mu.RLock() + _, _ = m["foo"] + mu.RUnlock() + } +} + +func BenchmarkCacheGetConcurrentExpiring(b *testing.B) { + benchmarkCacheGetConcurrent(b, 5*time.Minute) +} + +func BenchmarkCacheGetConcurrentNotExpiring(b *testing.B) { + benchmarkCacheGetConcurrent(b, NoExpiration) +} + +func benchmarkCacheGetConcurrent(b *testing.B, exp time.Duration) { + b.StopTimer() + tc := New(exp, 0, maxItemsCount) + tc.Set("foo", "bar", DefaultExpiration) + wg := new(sync.WaitGroup) + workers := runtime.NumCPU() + each := b.N / workers + wg.Add(workers) + b.StartTimer() + for i := 0; i < workers; i++ { + go func() { + for j := 0; j < each; j++ { + tc.Get("foo") + } + wg.Done() + }() + } + wg.Wait() +} + +func BenchmarkRWMutexMapGetConcurrent(b *testing.B) { + b.StopTimer() + m := map[string]string{ + "foo": "bar", + } + mu := sync.RWMutex{} + wg := new(sync.WaitGroup) + workers := runtime.NumCPU() + each := b.N / workers + wg.Add(workers) + b.StartTimer() + for i := 0; i < workers; i++ { + go func() { + for j := 0; j < each; j++ { + mu.RLock() + _, _ = m["foo"] + mu.RUnlock() + } + wg.Done() + }() + } + wg.Wait() +} + +func BenchmarkCacheGetManyConcurrentExpiring(b *testing.B) { + benchmarkCacheGetManyConcurrent(b, 5*time.Minute) +} + +func BenchmarkCacheGetManyConcurrentNotExpiring(b *testing.B) { + benchmarkCacheGetManyConcurrent(b, NoExpiration) +} + +func benchmarkCacheGetManyConcurrent(b *testing.B, exp time.Duration) { + // This is the same as BenchmarkCacheGetConcurrent, but its result + // can be compared against BenchmarkShardedCacheGetManyConcurrent + // in sharded_test.go. + b.StopTimer() + n := 10000 + tc := New(exp, 0, maxItemsCount) + keys := make([]string, n) + for i := 0; i < n; i++ { + k := "foo" + strconv.Itoa(i) + keys[i] = k + tc.Set(k, "bar", DefaultExpiration) + } + each := b.N / n + wg := new(sync.WaitGroup) + wg.Add(n) + for _, v := range keys { + go func(k string) { + for j := 0; j < each; j++ { + tc.Get(k) + } + wg.Done() + }(v) + } + b.StartTimer() + wg.Wait() +} + +func BenchmarkCacheSetExpiring(b *testing.B) { + benchmarkCacheSet(b, 5*time.Minute) +} + +func BenchmarkCacheSetNotExpiring(b *testing.B) { + benchmarkCacheSet(b, NoExpiration) +} + +func benchmarkCacheSet(b *testing.B, exp time.Duration) { + b.StopTimer() + tc := New(exp, 0, maxItemsCount) + b.StartTimer() + for i := 0; i < b.N; i++ { + tc.Set("foo", "bar", DefaultExpiration) + } +} + +func BenchmarkRWMutexMapSet(b *testing.B) { + b.StopTimer() + m := map[string]string{} + mu := sync.RWMutex{} + b.StartTimer() + for i := 0; i < b.N; i++ { + mu.Lock() + m["foo"] = "bar" + mu.Unlock() + } +} + +func BenchmarkCacheSetDelete(b *testing.B) { + b.StopTimer() + tc := New(DefaultExpiration, 0, maxItemsCount) + b.StartTimer() + for i := 0; i < b.N; i++ { + tc.Set("foo", "bar", DefaultExpiration) + tc.Delete("foo") + } +} + +func BenchmarkRWMutexMapSetDelete(b *testing.B) { + b.StopTimer() + m := map[string]string{} + mu := sync.RWMutex{} + b.StartTimer() + for i := 0; i < b.N; i++ { + mu.Lock() + m["foo"] = "bar" + mu.Unlock() + mu.Lock() + delete(m, "foo") + mu.Unlock() + } +} + +func BenchmarkCacheSetDeleteSingleLock(b *testing.B) { + b.StopTimer() + tc := New(DefaultExpiration, 0, maxItemsCount) + b.StartTimer() + for i := 0; i < b.N; i++ { + tc.mu.Lock() + tc.set("foo", "bar", DefaultExpiration) + tc.delete("foo") + tc.mu.Unlock() + } +} + +func BenchmarkRWMutexMapSetDeleteSingleLock(b *testing.B) { + b.StopTimer() + m := map[string]string{} + mu := sync.RWMutex{} + b.StartTimer() + for i := 0; i < b.N; i++ { + mu.Lock() + m["foo"] = "bar" + delete(m, "foo") + mu.Unlock() + } +} + +func BenchmarkIncrementInt(b *testing.B) { + b.StopTimer() + tc := New(DefaultExpiration, 0, maxItemsCount) + tc.Set("foo", 0, DefaultExpiration) + b.StartTimer() + for i := 0; i < b.N; i++ { + tc.IncrementInt("foo", 1) + } +} + +func BenchmarkDeleteExpiredLoop(b *testing.B) { + b.StopTimer() + tc := New(5*time.Minute, 0, maxItemsCount) + tc.mu.Lock() + for i := 0; i < 100000; i++ { + tc.set(strconv.Itoa(i), "bar", DefaultExpiration) + } + tc.mu.Unlock() + b.StartTimer() + for i := 0; i < b.N; i++ { + tc.DeleteExpired() + } +} + +func TestGetWithExpiration(t *testing.T) { + tc := New(DefaultExpiration, 0, maxItemsCount) + + a, expiration, found := tc.GetWithExpiration("a") + if found || a != nil || !expiration.IsZero() { + t.Error("Getting A found value that shouldn't exist:", a) + } + + b, expiration, found := tc.GetWithExpiration("b") + if found || b != nil || !expiration.IsZero() { + t.Error("Getting B found value that shouldn't exist:", b) + } + + c, expiration, found := tc.GetWithExpiration("c") + if found || c != nil || !expiration.IsZero() { + t.Error("Getting C found value that shouldn't exist:", c) + } + + tc.Set("a", 1, DefaultExpiration) + tc.Set("b", "b", DefaultExpiration) + tc.Set("c", 3.5, DefaultExpiration) + tc.Set("d", 1, NoExpiration) + tc.Set("e", 1, 50*time.Millisecond) + + x, expiration, found := tc.GetWithExpiration("a") + if !found { + t.Error("a was not found while getting a2") + } + if x == nil { + t.Error("x for a is nil") + } else if a2 := x.(int); a2+2 != 3 { + t.Error("a2 (which should be 1) plus 2 does not equal 3; value:", a2) + } + if !expiration.IsZero() { + t.Error("expiration for a is not a zeroed time") + } + + x, expiration, found = tc.GetWithExpiration("b") + if !found { + t.Error("b was not found while getting b2") + } + if x == nil { + t.Error("x for b is nil") + } else if b2 := x.(string); b2+"B" != "bB" { + t.Error("b2 (which should be b) plus B does not equal bB; value:", b2) + } + if !expiration.IsZero() { + t.Error("expiration for b is not a zeroed time") + } + + x, expiration, found = tc.GetWithExpiration("c") + if !found { + t.Error("c was not found while getting c2") + } + if x == nil { + t.Error("x for c is nil") + } else if c2 := x.(float64); c2+1.2 != 4.7 { + t.Error("c2 (which should be 3.5) plus 1.2 does not equal 4.7; value:", c2) + } + if !expiration.IsZero() { + t.Error("expiration for c is not a zeroed time") + } + + x, expiration, found = tc.GetWithExpiration("d") + if !found { + t.Error("d was not found while getting d2") + } + if x == nil { + t.Error("x for d is nil") + } else if d2 := x.(int); d2+2 != 3 { + t.Error("d (which should be 1) plus 2 does not equal 3; value:", d2) + } + if !expiration.IsZero() { + t.Error("expiration for d is not a zeroed time") + } + + x, expiration, found = tc.GetWithExpiration("e") + if !found { + t.Error("e was not found while getting e2") + } + if x == nil { + t.Error("x for e is nil") + } else if e2 := x.(int); e2+2 != 3 { + t.Error("e (which should be 1) plus 2 does not equal 3; value:", e2) + } + if expiration.UnixNano() != tc.items["e"].Expiration { + t.Error("expiration for e is not the correct time") + } + if expiration.UnixNano() < time.Now().UnixNano() { + t.Error("expiration for e is in the past") + } +} diff --git a/pkg/memorycacher/sharded.go b/pkg/memorycacher/sharded.go new file mode 100644 index 0000000..87b5a5d --- /dev/null +++ b/pkg/memorycacher/sharded.go @@ -0,0 +1,208 @@ +/* + * @Author: patrickmn,gitsrc + * @Date: 2020-07-09 13:17:30 + * @LastEditors: gitsrc + * @LastEditTime: 2020-07-09 13:22:41 + * @FilePath: /ServiceCar/utils/memorycache/sharded.go + */ + +package memorycacher + +import ( + "crypto/rand" + "math" + "math/big" + insecurerand "math/rand" + "os" + "runtime" + "time" +) + +// This is an experimental and unexported (for now) attempt at making a cache +// with better algorithmic complexity than the standard one, namely by +// preventing write locks of the entire cache when an item is added. As of the +// time of writing, the overhead of selecting buckets results in cache +// operations being about twice as slow as for the standard cache with small +// total cache sizes, and faster for larger ones. +// +// See cache_test.go for a few benchmarks. + +type unexportedShardedCache struct { + *shardedCache +} + +type shardedCache struct { + seed uint32 + m uint32 + cs []*cache + lastCleanTime time.Time //Last cleanup time + janitor *shardedJanitor +} + +// djb2 with better shuffling. 5x faster than FNV with the hash.Hash overhead. +func djb33(seed uint32, k string) uint32 { + var ( + l = uint32(len(k)) + d = 5381 + seed + l + i = uint32(0) + ) + // Why is all this 5x faster than a for loop? + if l >= 4 { + for i < l-4 { + d = (d * 33) ^ uint32(k[i]) + d = (d * 33) ^ uint32(k[i+1]) + d = (d * 33) ^ uint32(k[i+2]) + d = (d * 33) ^ uint32(k[i+3]) + i += 4 + } + } + switch l - i { + case 1: + case 2: + d = (d * 33) ^ uint32(k[i]) + case 3: + d = (d * 33) ^ uint32(k[i]) + d = (d * 33) ^ uint32(k[i+1]) + case 4: + d = (d * 33) ^ uint32(k[i]) + d = (d * 33) ^ uint32(k[i+1]) + d = (d * 33) ^ uint32(k[i+2]) + } + return d ^ (d >> 16) +} + +func (sc *shardedCache) bucket(k string) *cache { + return sc.cs[djb33(sc.seed, k)%sc.m] +} + +func (sc *shardedCache) Set(k string, x interface{}, d time.Duration) { + sc.bucket(k).Set(k, x, d) +} + +func (sc *shardedCache) Add(k string, x interface{}, d time.Duration) error { + return sc.bucket(k).Add(k, x, d) +} + +func (sc *shardedCache) Replace(k string, x interface{}, d time.Duration) error { + return sc.bucket(k).Replace(k, x, d) +} + +func (sc *shardedCache) Get(k string) (interface{}, bool) { + return sc.bucket(k).Get(k) +} + +func (sc *shardedCache) Increment(k string, n int64) error { + return sc.bucket(k).Increment(k, n) +} + +func (sc *shardedCache) IncrementFloat(k string, n float64) error { + return sc.bucket(k).IncrementFloat(k, n) +} + +func (sc *shardedCache) Decrement(k string, n int64) error { + return sc.bucket(k).Decrement(k, n) +} + +func (sc *shardedCache) Delete(k string) { + sc.bucket(k).Delete(k) +} + +func (sc *shardedCache) DeleteExpired() { + for _, v := range sc.cs { + v.DeleteExpired() + } +} + +// Returns the items in the cache. This may include items that have expired, +// but have not yet been cleaned up. If this is significant, the Expiration +// fields of the items should be checked. Note that explicit synchronization +// is needed to use a cache and its corresponding Items() return values at +// the same time, as the maps are shared. +func (sc *shardedCache) Items() []map[string]Item { + res := make([]map[string]Item, len(sc.cs)) + for i, v := range sc.cs { + res[i] = v.Items() + } + return res +} + +func (sc *shardedCache) Flush() { + for _, v := range sc.cs { + v.Flush() + } +} + +type shardedJanitor struct { + Interval time.Duration + shoudClean chan bool //Signal should be cleaned up + stop chan bool +} + +func (j *shardedJanitor) Run(sc *shardedCache) { + j.stop = make(chan bool) + tick := time.Tick(j.Interval) + for { + select { + case <-tick: + sc.DeleteExpired() + case <-j.shoudClean: //If received should Clean signal + + sc.DeleteExpired() + case <-j.stop: + return + } + } +} + +func stopShardedJanitor(sc *unexportedShardedCache) { + sc.janitor.stop <- true +} + +func runShardedJanitor(sc *shardedCache, ci time.Duration) { + j := &shardedJanitor{ + Interval: ci, + shoudClean: make(chan bool), + } + sc.janitor = j + go j.Run(sc) +} + +func newShardedCache(n int, de time.Duration, maxItemsCount int) *shardedCache { + max := big.NewInt(0).SetUint64(uint64(math.MaxUint32)) + rnd, err := rand.Int(rand.Reader, max) + var seed uint32 + if err != nil { + os.Stderr.Write([]byte("WARNING: go-cache's newShardedCache failed to read from the system CSPRNG (/dev/urandom or equivalent.) Your system's security may be compromised. Continuing with an insecure seed.\n")) + seed = insecurerand.Uint32() + } else { + seed = uint32(rnd.Uint64()) + } + sc := &shardedCache{ + seed: seed, + m: uint32(n), + cs: make([]*cache, n), + } + for i := 0; i < n; i++ { + c := &cache{ + defaultExpiration: de, + items: map[string]Item{}, + maxItemsCount: maxItemsCount, + lastCleanTime: time.Now(), + } + sc.cs[i] = c + } + return sc +} + +func unexportedNewSharded(defaultExpiration, cleanupInterval time.Duration, shards int, maxItemsCount int) *unexportedShardedCache { + if defaultExpiration == 0 { + defaultExpiration = -1 + } + sc := newShardedCache(shards, defaultExpiration, maxItemsCount) + SC := &unexportedShardedCache{sc} + if cleanupInterval > 0 { + runShardedJanitor(sc, cleanupInterval) + runtime.SetFinalizer(SC, stopShardedJanitor) + } + return SC +} diff --git a/pkg/memorycacher/sharded_test.go b/pkg/memorycacher/sharded_test.go new file mode 100644 index 0000000..907c858 --- /dev/null +++ b/pkg/memorycacher/sharded_test.go @@ -0,0 +1,92 @@ +/* + * @Author: patrickmn,gitsrc + * @Date: 2020-07-09 13:17:30 + * @LastEditors: gitsrc + * @LastEditTime: 2020-07-10 10:06:42 + * @FilePath: /ServiceCar/utils/memorycacher/sharded_test.go + */ +package memorycacher + +import ( + "strconv" + "sync" + "testing" + "time" +) + +// func TestDjb33(t *testing.T) { +// } + +var shardedKeys = []string{ + "f", + "fo", + "foo", + "barf", + "barfo", + "foobar", + "bazbarf", + "bazbarfo", + "bazbarfoo", + "foobarbazq", + "foobarbazqu", + "foobarbazquu", + "foobarbazquux", +} + +func TestShardedCache(t *testing.T) { + tc := unexportedNewSharded(DefaultExpiration, time.Minute*2, 13, 100) + for _, v := range shardedKeys { + tc.Set(v, "value", DefaultExpiration) + } +} + +func BenchmarkShardedCacheGetExpiring(b *testing.B) { + benchmarkShardedCacheGet(b, 5*time.Minute) +} + +func BenchmarkShardedCacheGetNotExpiring(b *testing.B) { + benchmarkShardedCacheGet(b, NoExpiration) +} + +func benchmarkShardedCacheGet(b *testing.B, exp time.Duration) { + b.StopTimer() + tc := unexportedNewSharded(exp, 0, 10, 0) + tc.Set("foobarba", "zquux", DefaultExpiration) + b.StartTimer() + for i := 0; i < b.N; i++ { + tc.Get("foobarba") + } +} + +func BenchmarkShardedCacheGetManyConcurrentExpiring(b *testing.B) { + benchmarkShardedCacheGetManyConcurrent(b, 5*time.Minute) +} + +func BenchmarkShardedCacheGetManyConcurrentNotExpiring(b *testing.B) { + benchmarkShardedCacheGetManyConcurrent(b, NoExpiration) +} + +func benchmarkShardedCacheGetManyConcurrent(b *testing.B, exp time.Duration) { + b.StopTimer() + n := 10000 + tsc := unexportedNewSharded(exp, 0, 20, 100000) + keys := make([]string, n) + for i := 0; i < n; i++ { + k := "foo" + strconv.Itoa(i) + keys[i] = k + tsc.Set(k, "bar", DefaultExpiration) + } + each := b.N / n + wg := new(sync.WaitGroup) + wg.Add(n) + for _, v := range keys { + go func(k string) { + for j := 0; j < each; j++ { + tsc.Get(k) + } + wg.Done() + }(v) + } + b.StartTimer() + wg.Wait() +} diff --git a/pkg/mesh/service.go b/pkg/mesh/service.go deleted file mode 100644 index 218f414..0000000 --- a/pkg/mesh/service.go +++ /dev/null @@ -1,28 +0,0 @@ -package mesh - -import ( - "github.com/pkg/errors" - - "github.com/go-resty/resty/v2" - "gitlab.oneitfarm.com/bifrost/capitalizone/core" - v2log "gitlab.oneitfarm.com/bifrost/cilog/v2" -) - -var httpClient = resty.New() - -const ( - MSPAPIPrefix = "/api/v1" - MSPAPIServiceDynamic = MSPAPIPrefix + "/service_unit/dynamic?page=1&limit_num=999999" -) - -func GetAllDynamicServiceMetadataRaw() ([]byte, error) { - resp, err := httpClient.R().Get(core.Is.Config.Mesh.MSPPortalAPI + MSPAPIServiceDynamic) - if err != nil { - return nil, err - } - if resp.StatusCode() > 200 { - v2log.With("header", resp.Header()).Warn("请求 MSP 错误") - return nil, errors.New("response error") - } - return resp.Body(), nil -} diff --git a/pkg/resource/file_source.go b/pkg/resource/file_source.go deleted file mode 100644 index ed0d3f4..0000000 --- a/pkg/resource/file_source.go +++ /dev/null @@ -1,121 +0,0 @@ -package resource - -import ( - "bufio" - "bytes" - "io/ioutil" - "os" - "strconv" - "strings" -) - -func ReadNumberFromRemote(remoteUrl, name string) (n uint64, err error) { - data, err := RemoteGetSourceString(remoteUrl, name) - if err != nil { - return 0, err - } - n, err = strconv.ParseUint(strings.TrimSpace(data), 10, 64) - if err != nil { - return n, err - } - return n, nil -} - -func ReadNumberFromFile(name string) (n uint64, err error) { - file, err := os.Open(name) - if err != nil { - return 0, err - } - defer file.Close() - - data, err := ioutil.ReadAll(file) - if err != nil { - return n, err - } - - n, err = strconv.ParseUint(strings.TrimSpace(string(data)), 10, 64) - if err != nil { - return n, err - } - return n, nil -} - -func ReadIntFromRemote(remoteUrl, name string) (n int64, err error) { - data, err := RemoteGetSourceString(remoteUrl, name) - if err != nil { - return 0, err - } - n, err = strconv.ParseInt(strings.TrimSpace(data), 10, 64) - if err != nil { - return n, err - } - return n, nil -} - -func ReadIntFromFile(name string) (n int64, err error) { - file, err := os.Open(name) - if err != nil { - return 0, err - } - defer file.Close() - - data, err := ioutil.ReadAll(file) - if err != nil { - return n, err - } - - n, err = strconv.ParseInt(strings.TrimSpace(string(data)), 10, 64) - if err != nil { - return n, err - } - return n, nil -} - -func ReadMapFromRemote(remoteUrl, name string) (m map[string]uint64, err error) { - ret, err := RemoteGetSourceByte(remoteUrl, name) - if err != nil { - return nil, err - } - m = make(map[string]uint64) - reader := bytes.NewReader(ret) - scanner := bufio.NewScanner(reader) - for scanner.Scan() { - line := scanner.Text() - parts := strings.SplitN(line, " ", 2) - if len(parts) != 2 { - continue - } - v, err := strconv.ParseUint(strings.TrimSpace(parts[1]), 10, 64) - if err != nil { - continue - } - m[parts[0]] = v - } - - return m, nil -} - -func ReadMapFromFile(name string) (m map[string]uint64, err error) { - file, err := os.Open(name) - if err != nil { - return nil, err - } - defer file.Close() - - m = make(map[string]uint64) - scanner := bufio.NewScanner(file) - for scanner.Scan() { - line := scanner.Text() - parts := strings.SplitN(line, " ", 2) - if len(parts) != 2 { - continue - } - v, err := strconv.ParseUint(strings.TrimSpace(parts[1]), 10, 64) - if err != nil { - continue - } - m[parts[0]] = v - } - - return m, nil -} diff --git a/pkg/resource/file_source/cpu_from_file.go b/pkg/resource/file_source/cpu_from_file.go deleted file mode 100644 index 587b36a..0000000 --- a/pkg/resource/file_source/cpu_from_file.go +++ /dev/null @@ -1,262 +0,0 @@ -package file_source - -import ( - "bufio" - "fmt" - "gitlab.oneitfarm.com/bifrost/capitalizone/pkg/resource" - logger "gitlab.oneitfarm.com/bifrost/cilog/v2" - "io/ioutil" - "os" - "os/exec" - "strconv" - "strings" - "time" -) - -// Doc: https://www.kernel.org/doc/Documentation/cgroup-v1/cpusets.txt -// Reference: https://segmentfault.com/a/1190000008323952 -// Reference: https://my.oschina.net/jxcdwangtao/blog/828648 - -var ( - coreCount uint64 - limitedCoreCount float64 - cpuTick int -) - -// Errors -var ( - ErrCantGetCoreCount = fmt.Errorf("can't get core count") - ErrCantGetLimitedCoreCount = fmt.Errorf("can't get limited core count") - ErrNoCPUTick = fmt.Errorf("no cpu tick") -) - -func (cf *FileSource) GetCPUStat(interval time.Duration, callback resource.CPUStatCallback) { - if cpuTick == 0 { - callback(nil, ErrNoCPUTick) - return - } - if coreCount == 0 { - callback(nil, ErrCantGetCoreCount) - return - } - if limitedCoreCount < 0.01 { - callback(nil, ErrCantGetLimitedCoreCount) - return - } - - prevSystem, err := getSystemCPUUsage() - if err != nil { - callback(nil, err) - return - } - - prevTotal, err := getTotalCPUUsage() - if err != nil { - callback(nil, err) - return - } - - go func() { - time.Sleep(interval) - - system, err := getSystemCPUUsage() - if err != nil { - callback(nil, err) - return - } - total, err := getTotalCPUUsage() - if err != nil { - callback(nil, err) - return - } - - throttled, err := getCPUThrottled() - if err != nil { - callback(nil, err) - return - } - - stat := &resource.CPUStat{} - stat.LimitedCores = limitedCoreCount - stat.Throttled = throttled - cpuDelta := float64(total - prevTotal) - systemDelta := float64(system-prevSystem) * tickToNano() - if systemDelta > 1.0 { - stat.Usage = (cpuDelta / systemDelta) * float64(coreCount) * 100.0 - } - callback(stat, nil) - }() -} - -// units are difference between /proc/stat and cpuacct.usage -// cpuacct.usage's unit is nano second -// /proc/stat's unit is (1 / CLK_TCK) -func tickToNano() float64 { - if cpuTick == 0 { - return 0.0 - } - - return 1000.0 * 1000.0 * 1000.0 / float64(cpuTick) -} - -func (fs *FileSource) InitData() bool { - var err error - coreCount, err = fs.GetCoreCount() - if err != nil { - logger.Warnf("file cpu init error1", err) - } - - limitedCoreCount, err = fs.GetLimitedCoreCount() - if err != nil { - logger.Warnf("file cpu init error2", err) - } - - out, err := exec.Command("getconf", "CLK_TCK").Output() - if err != nil { - logger.Warnf("file cpu init error3", err) - } - cpuTick, err = strconv.Atoi(strings.TrimSpace(string(out))) - if err != nil { - logger.Warnf("file cpu init error4", err) - } - return limitedCoreCount > 0 -} - -func getSystemCPUUsage() (uint64, error) { - // $ cat /proc/stat - // cpu 42812 0 17335 3256641 333 9 1748 0 0 0 - - var scanner *bufio.Scanner - - file, err := os.Open("/proc/stat") - if err != nil { - return 0, err - } - defer file.Close() - scanner = bufio.NewScanner(file) - - prefix := "cpu " - for scanner.Scan() { - line := scanner.Text() - if !strings.HasPrefix(line, prefix) { - continue - } - line = strings.TrimSpace(strings.TrimLeft(line, prefix)) - parts := strings.Split(line, " ") - var total uint64 - for _, part := range parts { - if part == "" { - continue - } - tmp, err := strconv.ParseUint(part, 10, 64) - if err != nil { - return 0, fmt.Errorf("parsing uint64 in /proc/stat, err: %v", err) - } - total += tmp - } - return total, nil - } - return 0, fmt.Errorf("cpu line not found in /proc/stat") -} - -func getTotalCPUUsage() (uint64, error) { - return resource.ReadNumberFromFile("/sys/fs/cgroup/cpuacct/cpuacct.usage") -} - -func (fs *FileSource) GetCoreCount() (uint64, error) { - var data []byte - var err error - - file, err := os.Open("/sys/fs/cgroup/cpuacct/cpuacct.usage_percpu") - if err != nil { - return 0, err - } - defer file.Close() - - data, err = ioutil.ReadAll(file) - if err != nil { - return 0, err - } - - line := strings.TrimSpace(string(data)) - parts := strings.Split(line, " ") - l := len(parts) - return uint64(l), nil -} - -func getCPUThrottled() (uint64, error) { - var m map[string]uint64 - var err error - m, err = resource.ReadMapFromFile("/sys/fs/cgroup/cpu/cpu.stat") - if err != nil { - return 0, err - } - return m["nr_throttled"], nil -} - -func (fs *FileSource) GetLimitedCoreCount() (float64, error) { - if limitedCoreCount > 0 { - return limitedCoreCount, nil - } - var quota, period int64 - var err error - quota, err = resource.ReadIntFromFile("/sys/fs/cgroup/cpu/cpu.cfs_quota_us") - if err != nil { - return 0, err - } - - if quota == -1 { - return getLimitedCoreCountFromCPUSet() - } - period, err = resource.ReadIntFromFile("/sys/fs/cgroup/cpu/cpu.cfs_period_us") - if err != nil { - return 0, err - } - - if period <= 0 { - return 0, fmt.Errorf("cfs_period_us is zero") - } - - return float64(quota) / float64(period), nil -} - -func getLimitedCoreCountFromCPUSet() (float64, error) { - var data []byte - file, err := os.Open("/sys/fs/cgroup/cpuset/cpuset.cpus") - if err != nil { - return 0.0, err - } - defer file.Close() - - data, err = ioutil.ReadAll(file) - if err != nil { - return 0.0, err - } - - var cores int - - line := strings.TrimSpace(string(data)) - parts := strings.Split(line, ",") - for _, part := range parts { - r := strings.Split(part, "-") - if len(r) == 1 { - cores++ - continue - } - if len(r) > 2 { - return 0.0, fmt.Errorf("Invalid list format of cpuset.cpus: %s", line) - } - - f, e1 := strconv.Atoi(r[0]) - t, e2 := strconv.Atoi(r[1]) - if e1 != nil || e2 != nil { - return 0.0, fmt.Errorf("Invalid list format of cpuset.cpus: %s", line) - } - cores += t - f + 1 - } - return float64(cores), nil -} - -func (*FileSource) GetCpuCount() float64 { - return limitedCoreCount -} diff --git a/pkg/resource/file_source/disk.go b/pkg/resource/file_source/disk.go deleted file mode 100644 index fb97651..0000000 --- a/pkg/resource/file_source/disk.go +++ /dev/null @@ -1,90 +0,0 @@ -package file_source - -import ( - "bufio" - "gitlab.oneitfarm.com/bifrost/capitalizone/pkg/resource" - "os" - "strconv" - "strings" - "time" -) - -func (*FileSource) CurrentDiskStat(interval time.Duration, callback resource.DiskStatCallback) { - var readOld, writeOld uint64 - var err error - if diskAcctFile == "" { - for _, file := range diskAcctFiles { - readOld, writeOld, _ = getDiskReadWrite(file) - if readOld+writeOld > 0 { - diskAcctFile = file - break - } - } - } else { - readOld, writeOld, err = getDiskReadWrite(diskAcctFile) - } - if err != nil { - callback(nil, err) - return - } - go func() { - time.Sleep(interval) - var readNew, writeNew uint64 - if diskAcctFile == "" { - for _, file := range diskAcctFiles { - readNew, writeNew, _ = getDiskReadWrite(file) - if readNew+writeNew > 0 { - diskAcctFile = file - break - } - } - } else { - readNew, writeNew, err = getDiskReadWrite(diskAcctFile) - } - if err != nil { - callback(nil, err) - return - } - stat := &resource.DiskStat{ - Read: readNew - readOld, - Write: writeNew - writeOld, - } - callback(stat, nil) - }() -} - -var diskAcctFile string - -var diskAcctFiles = []string{ - "/sys/fs/cgroup/blkio/blkio.io_service_bytes_recursive", - "/sys/fs/cgroup/blkio/blkio.throttle.io_service_bytes", -} - -func getDiskReadWrite(name string) (read, write uint64, err error) { - file, err := os.Open(name) - if err != nil { - return 0, 0, err - } - defer file.Close() - - scanner := bufio.NewScanner(file) - var r, w uint64 - for scanner.Scan() { - line := scanner.Text() - parts := strings.Split(line, " ") - if len(parts) != 3 { - continue - } - if parts[1] == "Read" { - tmp, _ := strconv.Atoi(parts[2]) - r += uint64(tmp) - continue - } - if parts[1] == "Write" { - tmp, _ := strconv.Atoi(parts[2]) - w += uint64(tmp) - continue - } - } - return r, w, nil -} diff --git a/pkg/resource/file_source/memory_from_file.go b/pkg/resource/file_source/memory_from_file.go deleted file mode 100644 index d7b7668..0000000 --- a/pkg/resource/file_source/memory_from_file.go +++ /dev/null @@ -1,143 +0,0 @@ -package file_source - -import ( - "bufio" - "errors" - "fmt" - "gitlab.oneitfarm.com/bifrost/capitalizone/pkg/resource" - "io/ioutil" - "os" - "strconv" - "strings" - - "github.com/shopspring/decimal" -) - -type FileSource struct { - IsInit bool -} - -func NewFileSource() resource.Resource { - res := &FileSource{} - res.InitData() - return res -} - -func (res *FileSource) InitSuccess() bool { - return limitedCoreCount > 0 -} - -func (mf *FileSource) CurrentMemStat() (stat *resource.MemStat, err error) { - var m map[string]uint64 - m, err = resource.ReadMapFromFile("/sys/fs/cgroup/memory/memory.stat") - if err != nil { - return nil, err - } - - stat = &resource.MemStat{} - stat.Total, err = totalMemory(m) - if err != nil { - return nil, err - } - - stat.SwapTotal, stat.SwapUsed = swapState(m) - - stat.Cached = m["total_cache"] - stat.MappedFile = m["total_mapped_file"] - // RSS计算规则修改 - memoryUsageInBytes, err := resource.ReadIntFromFile("/sys/fs/cgroup/memory/memory.usage_in_bytes") - if err != nil { - stat.RSS = m["total_rss"] + stat.MappedFile - } else { - if v, ok := m["total_inactive_file"]; ok { - if uint64(memoryUsageInBytes) < v { - memoryUsageInBytes = 0 - } else { - memoryUsageInBytes -= int64(v) - } - } - stat.RSS = uint64(memoryUsageInBytes) - } - return -} - -func getHostMemTotal() (n uint64, err error) { - var scanner *bufio.Scanner - - file, err := os.Open("/proc/meminfo") - if err != nil { - return 0, err - } - defer file.Close() - scanner = bufio.NewScanner(file) - - for scanner.Scan() { - line := scanner.Text() - parts := strings.SplitN(line, " ", 2) - if len(parts) != 2 { - continue - } - if parts[0] != "MemTotal:" { - continue - } - parts[1] = strings.TrimSpace(parts[1]) - value := strings.TrimSuffix(parts[1], "kB") - value = strings.TrimSpace(value) - n, err = strconv.ParseUint(value, 10, 64) - n *= 1024 - if err != nil { - return 0, err - } - break - } - return -} - -func totalMemory(m map[string]uint64) (uint64, error) { - hostTotal, err := getHostMemTotal() - if err != nil { - return 0, err - } - limit, ok := m["hierarchical_memory_limit"] - if !ok { - return 0, fmt.Errorf("missing hierarchical_memory_limit") - } - if hostTotal > limit { - return limit, nil - } - return hostTotal, nil -} - -func swapState(m map[string]uint64) (total uint64, used uint64) { - memSwap, ok := m["hierarchical_memsw_limit"] - if !ok { - return 0, 0 - } - - mem := m["hierarchical_memory_limit"] - if memSwap == mem { - return 0, 0 - } - - total = memSwap - mem - used = m["total_swap"] - return total, used -} - -func (*FileSource) GetRss() (int64, error) { - buf, err := ioutil.ReadFile("/proc/self/statm") - if err != nil { - return 0, err - } - fields := strings.Split(string(buf), " ") - if len(fields) < 2 { - return 0, errors.New("cannot parse statm") - } - - rss, err := strconv.ParseInt(fields[1], 10, 64) - if err != nil { - return 0, err - } - r := decimal.NewFromInt(rss * int64(os.Getpagesize())) - return r.Div(decimal.NewFromInt32(1024 * 1024)).IntPart(), err -} diff --git a/pkg/resource/file_source/net.go b/pkg/resource/file_source/net.go deleted file mode 100644 index 46265bf..0000000 --- a/pkg/resource/file_source/net.go +++ /dev/null @@ -1,80 +0,0 @@ -package file_source - -import ( - "bytes" - "fmt" - "gitlab.oneitfarm.com/bifrost/capitalizone/pkg/resource" - "os" - "os/exec" - "strings" - "time" -) - -var ethInterface string - -var ErrDefaultEthInterfaceNotfound = fmt.Errorf("default EthInterface notfound") - -func init() { - // todo network - ethInterface = "eth0" - if val := os.Getenv("MSP_ETH_INTERFACE_NAME"); len(val) > 0 { - ethInterface = val - } -} - -func (*FileSource) CurrentNetworkStat(interval time.Duration, callback resource.NetStatCallback) { - var rxbytesOld, txbytesOld uint64 - var err error - - folder := "/sys/class/net/" + ethInterface + "/statistics/" - rxbytesOld, err = resource.ReadNumberFromFile(folder + "rx_bytes") - if err != nil { - callback(nil, err) - return - } - txbytesOld, err = resource.ReadNumberFromFile(folder + "tx_bytes") - if err != nil { - callback(nil, err) - return - } - go func() { - time.Sleep(interval) - rxbytesNew, err := resource.ReadNumberFromFile(folder + "rx_bytes") - if err != nil { - callback(nil, err) - return - } - txbytesNew, err := resource.ReadNumberFromFile(folder + "tx_bytes") - if err != nil { - callback(nil, err) - return - } - stat := &resource.NetworkStat{ - RxBytes: rxbytesNew - rxbytesOld, - TxBytes: txbytesNew - txbytesOld, - } - callback(stat, nil) - }() -} - -func init() { - // $ ip -o -4 route show to default - // default via 172.17.0.1 dev eth0 - cmd := exec.Command("ip", "-o", "-4", "route", "show", "to", "default") - var out bytes.Buffer - var stderr bytes.Buffer - cmd.Stdout = &out - cmd.Stderr = &stderr - err := cmd.Run() - if err != nil { - // fmt.Println("ip cmd err: " + err.Error()) - // fmt.Println("ip cmd err result: " + stderr.String()) - return - } - parts := strings.Split(strings.TrimSpace(out.String()), " ") - if len(parts) < 5 { - fmt.Println(fmt.Errorf("invalid result from \"ip -o -4 route show to default\": %s", out.String())) - return - } - ethInterface = strings.TrimSpace(parts[4]) -} diff --git a/pkg/resource/file_source/netstat.go b/pkg/resource/file_source/netstat.go deleted file mode 100644 index 3d4f84f..0000000 --- a/pkg/resource/file_source/netstat.go +++ /dev/null @@ -1,22 +0,0 @@ -package file_source - -import ( - logger "gitlab.oneitfarm.com/bifrost/cilog/v2" - "gitlab.oneitfarm.com/bifrost/go-netstat/netstat" -) - -func (*FileSource) GetNetstat() (tcp map[string]int, err error) { - socks, err := netstat.TCPSocks(netstat.NoopFilter) - if err != nil { - logger.Warnf("获取服务TCP连接失败 ", err) - return - } - tcp = make(map[string]int) - if len(socks) > 0 { - for _, value := range socks { - state := value.State.String() - tcp[state] += 1 - } - } - return -} diff --git a/pkg/resource/http_source.go b/pkg/resource/http_source.go deleted file mode 100644 index 79d1e77..0000000 --- a/pkg/resource/http_source.go +++ /dev/null @@ -1,52 +0,0 @@ -package resource - -import ( - "fmt" - "io/ioutil" - "net/http" - "strings" - "time" -) - -var _httpClient *http.Client - -func client() *http.Client { - if _httpClient == nil { - _httpClient = &http.Client{ - Timeout: time.Second, - } - } - return _httpClient -} - -func RemoteGetSourceString(remoteUrl, directory string) (data string, err error) { - req, err := http.NewRequest("GET", fmt.Sprintf("%s%s", remoteUrl, directory), nil) - if err != nil { - return data, err - } - resp, err := client().Do(req) - if err != nil { - return data, err - } - defer resp.Body.Close() - ret, err := ioutil.ReadAll(resp.Body) - if err != nil { - return - } - data = strings.TrimSpace(string(ret)) - return data, nil -} - -func RemoteGetSourceByte(remoteUrl, directory string) (ret []byte, err error) { - req, err := http.NewRequest("GET", fmt.Sprintf("%s%s", remoteUrl, directory), nil) - if err != nil { - return ret, err - } - resp, err := client().Do(req) - if err != nil { - return ret, err - } - defer resp.Body.Close() - ret, err = ioutil.ReadAll(resp.Body) - return -} diff --git a/pkg/resource/http_source/cpu_from_url.go b/pkg/resource/http_source/cpu_from_url.go deleted file mode 100644 index 695a000..0000000 --- a/pkg/resource/http_source/cpu_from_url.go +++ /dev/null @@ -1,269 +0,0 @@ -package http_source - -import ( - "bufio" - "bytes" - "fmt" - "gitlab.oneitfarm.com/bifrost/capitalizone/pkg/resource" - "os/exec" - "strconv" - "strings" - "time" - - logger "gitlab.oneitfarm.com/bifrost/cilog/v2" -) - -// Doc: https://www.kernel.org/doc/Documentation/cgroup-v1/cpusets.txt -// Reference: https://segmentfault.com/a/1190000008323952 -// Reference: https://my.oschina.net/jxcdwangtao/blog/828648 - -var ( - coreCount uint64 - limitedCoreCount float64 - cpuTick int -) - -// Errors -var ( - ErrCantGetCoreCount = fmt.Errorf("can't get core count") - ErrCantGetLimitedCoreCount = fmt.Errorf("can't get limited core count") - ErrNoCPUTick = fmt.Errorf("no cpu tick") -) - -type HttpSource struct { - remoteUrl string -} - -func NewHttpSource(url string) resource.Resource { - res := &HttpSource{url} - res.InitData() - return res -} - -func (res *HttpSource) InitSuccess() bool { - return limitedCoreCount > 0 -} - -func (hs *HttpSource) GetCPUStat(interval time.Duration, callback resource.CPUStatCallback) { - if cpuTick == 0 { - callback(nil, ErrNoCPUTick) - return - } - if coreCount == 0 { - callback(nil, ErrCantGetCoreCount) - return - } - if limitedCoreCount < 0.01 { - callback(nil, ErrCantGetLimitedCoreCount) - return - } - - prevSystem, err := hs.getSystemCPUUsage() - if err != nil { - callback(nil, err) - return - } - - prevTotal, err := hs.getTotalCPUUsage() - if err != nil { - callback(nil, err) - return - } - - go func() { - time.Sleep(interval) - - system, err := hs.getSystemCPUUsage() - if err != nil { - callback(nil, err) - return - } - total, err := hs.getTotalCPUUsage() - if err != nil { - callback(nil, err) - return - } - - throttled, err := hs.getCPUThrottled() - if err != nil { - callback(nil, err) - return - } - - stat := &resource.CPUStat{} - stat.LimitedCores = limitedCoreCount - stat.Throttled = throttled - cpuDelta := float64(total - prevTotal) - systemDelta := float64(system-prevSystem) * tickToNano() - if systemDelta > 1.0 { - stat.Usage = (cpuDelta / systemDelta) * float64(coreCount) * 100.0 - } - callback(stat, nil) - }() -} - -// units are difference between /proc/stat and cpuacct.usage -// cpuacct.usage's unit is nano second -// /proc/stat's unit is (1 / CLK_TCK) -func tickToNano() float64 { - if cpuTick == 0 { - return 0.0 - } - - return 1000.0 * 1000.0 * 1000.0 / float64(cpuTick) -} - -func (hs *HttpSource) InitData() bool { - var err error - coreCount, err = hs.GetCoreCount() - if err != nil { - logger.Warnf("file cpu init error1", err) - return false - } - - limitedCoreCount, err = hs.GetLimitedCoreCount() - if err != nil { - logger.Warnf("file cpu init error2", err) - return false - } - - out, err := exec.Command("getconf", "CLK_TCK").Output() - if err != nil { - logger.Warnf("file cpu init error3", err) - return false - } - cpuTick, err = strconv.Atoi(strings.TrimSpace(string(out))) - if err != nil { - logger.Warnf("file cpu init error4", err) - return false - } - return limitedCoreCount > 0 -} - -func (hs *HttpSource) getSystemCPUUsage() (uint64, error) { - // $ cat /proc/stat - // cpu 42812 0 17335 3256641 333 9 1748 0 0 0 - - var scanner *bufio.Scanner - - ret, err := resource.RemoteGetSourceByte(hs.remoteUrl, "/proc/stat") - if err != nil { - return 0, err - } - reader := bytes.NewReader(ret) - scanner = bufio.NewScanner(reader) - prefix := "cpu " - for scanner.Scan() { - line := scanner.Text() - if !strings.HasPrefix(line, prefix) { - continue - } - line = strings.TrimSpace(strings.TrimLeft(line, prefix)) - parts := strings.Split(line, " ") - var total uint64 - for _, part := range parts { - if part == "" { - continue - } - tmp, err := strconv.ParseUint(part, 10, 64) - if err != nil { - return 0, fmt.Errorf("parsing uint64 in /proc/stat, err: %v", err) - } - total += tmp - } - return total, nil - } - return 0, fmt.Errorf("cpu line not found in /proc/stat") -} - -func (hs *HttpSource) getTotalCPUUsage() (uint64, error) { - return resource.ReadNumberFromRemote(hs.remoteUrl, "/sys/fs/cgroup/cpuacct/cpuacct.usage") -} - -func (hs *HttpSource) GetCoreCount() (uint64, error) { - var data []byte - var err error - - data, err = resource.RemoteGetSourceByte(hs.remoteUrl, "/sys/fs/cgroup/cpuacct/cpuacct.usage_percpu") - if err != nil { - return 0, err - } - - line := strings.TrimSpace(string(data)) - parts := strings.Split(line, " ") - l := len(parts) - return uint64(l), nil -} - -func (hs *HttpSource) getCPUThrottled() (uint64, error) { - var m map[string]uint64 - var err error - - m, err = resource.ReadMapFromRemote(hs.remoteUrl, "/sys/fs/cgroup/cpu/cpu.stat") - if err != nil { - return 0, err - } - return m["nr_throttled"], nil -} - -func (hs *HttpSource) GetLimitedCoreCount() (float64, error) { - if limitedCoreCount > 0 { - return limitedCoreCount, nil - } - var quota, period int64 - var err error - quota, err = resource.ReadIntFromRemote(hs.remoteUrl, "/sys/fs/cgroup/cpu/cpu.cfs_quota_us") - if err != nil { - return 0, err - } - - if quota == -1 { - return hs.getLimitedCoreCountFromCPUSet() - } - period, err = resource.ReadIntFromRemote(hs.remoteUrl, "/sys/fs/cgroup/cpu/cpu.cfs_period_us") - if err != nil { - return 0, err - } - - if period <= 0 { - return 0, fmt.Errorf("cfs_period_us is zero") - } - - return float64(quota) / float64(period), nil -} - -func (hs *HttpSource) getLimitedCoreCountFromCPUSet() (float64, error) { - var data []byte - var err error - data, err = resource.RemoteGetSourceByte(hs.remoteUrl, "/sys/fs/cgroup/cpuset/cpuset.cpus") - if err != nil { - return 0.0, err - } - - var cores int - - line := strings.TrimSpace(string(data)) - parts := strings.Split(line, ",") - for _, part := range parts { - r := strings.Split(part, "-") - if len(r) == 1 { - cores++ - continue - } - if len(r) > 2 { - return 0.0, fmt.Errorf("Invalid list format of cpuset.cpus: %s", line) - } - - f, e1 := strconv.Atoi(r[0]) - t, e2 := strconv.Atoi(r[1]) - if e1 != nil || e2 != nil { - return 0.0, fmt.Errorf("Invalid list format of cpuset.cpus: %s", line) - } - cores += t - f + 1 - } - return float64(cores), nil -} - -func (*HttpSource) GetCpuCount() float64 { - return limitedCoreCount -} diff --git a/pkg/resource/http_source/disk_from_url.go b/pkg/resource/http_source/disk_from_url.go deleted file mode 100644 index e82b0e2..0000000 --- a/pkg/resource/http_source/disk_from_url.go +++ /dev/null @@ -1,90 +0,0 @@ -package http_source - -import ( - "bufio" - "bytes" - "gitlab.oneitfarm.com/bifrost/capitalizone/pkg/resource" - "strconv" - "strings" - "time" -) - -var diskAcctFile string - -var diskAcctFiles = []string{ - "/sys/fs/cgroup/blkio/blkio.io_service_bytes_recursive", - "/sys/fs/cgroup/blkio/blkio.throttle.io_service_bytes", -} - -func (hs *HttpSource) CurrentDiskStat(interval time.Duration, callback resource.DiskStatCallback) { - var readOld, writeOld uint64 - var err error - if diskAcctFile == "" { - for _, file := range diskAcctFiles { - readOld, writeOld, _ = getDiskReadWrite(hs.remoteUrl, file) - if readOld+writeOld > 0 { - diskAcctFile = file - break - } - } - } else { - readOld, writeOld, err = getDiskReadWrite(hs.remoteUrl, diskAcctFile) - } - if err != nil { - callback(nil, err) - return - } - go func() { - time.Sleep(interval) - var readNew, writeNew uint64 - if diskAcctFile == "" { - for _, file := range diskAcctFiles { - readNew, writeNew, _ = getDiskReadWrite(hs.remoteUrl, file) - if readNew+writeNew > 0 { - diskAcctFile = file - break - } - } - } else { - readNew, writeNew, err = getDiskReadWrite(hs.remoteUrl, diskAcctFile) - } - if err != nil { - callback(nil, err) - return - } - stat := &resource.DiskStat{ - Read: readNew - readOld, - Write: writeNew - writeOld, - } - callback(stat, nil) - }() -} - -func getDiskReadWrite(url, name string) (read, write uint64, err error) { - ret, err := resource.RemoteGetSourceByte(url, name) - if err != nil { - return 0, 0, err - } - reader := bytes.NewReader(ret) - scanner := bufio.NewScanner(reader) - - var r, w uint64 - for scanner.Scan() { - line := scanner.Text() - parts := strings.Split(line, " ") - if len(parts) != 3 { - continue - } - if parts[1] == "Read" { - tmp, _ := strconv.Atoi(parts[2]) - r += uint64(tmp) - continue - } - if parts[1] == "Write" { - tmp, _ := strconv.Atoi(parts[2]) - w += uint64(tmp) - continue - } - } - return r, w, nil -} diff --git a/pkg/resource/http_source/memory_from_url.go b/pkg/resource/http_source/memory_from_url.go deleted file mode 100644 index 6c8a4f2..0000000 --- a/pkg/resource/http_source/memory_from_url.go +++ /dev/null @@ -1,115 +0,0 @@ -package http_source - -import ( - "bufio" - "bytes" - "fmt" - "gitlab.oneitfarm.com/bifrost/capitalizone/pkg/resource" - "strconv" - "strings" -) - -// Doc: https://www.kernel.org/doc/Documentation/cgroup-v1/memory.txt -// Reference: http://linuxperf.com/?p=142 - -func (hs *HttpSource) CurrentMemStat() (stat *resource.MemStat, err error) { - var m map[string]uint64 - m, err = resource.ReadMapFromRemote(hs.remoteUrl, "/sys/fs/cgroup/memory/memory.stat") - if err != nil { - return nil, err - } - - stat = &resource.MemStat{} - stat.Total, err = hs.totalMemory(m) - if err != nil { - return nil, err - } - - stat.SwapTotal, stat.SwapUsed = hs.swapState(m) - - stat.Cached = m["total_cache"] - stat.MappedFile = m["total_mapped_file"] - // RSS计算规则修改 - memoryUsageInBytes, err := resource.ReadIntFromRemote(hs.remoteUrl, "/sys/fs/cgroup/memory/memory.usage_in_bytes") - if err != nil { - stat.RSS = m["total_rss"] + stat.MappedFile - } else { - if v, ok := m["total_inactive_file"]; ok { - if uint64(memoryUsageInBytes) < v { - memoryUsageInBytes = 0 - } else { - memoryUsageInBytes -= int64(v) - } - } - stat.RSS = uint64(memoryUsageInBytes) - } - return -} - -func (hs *HttpSource) getHostMemTotal() (n uint64, err error) { - var scanner *bufio.Scanner - - ret, err := resource.RemoteGetSourceByte(hs.remoteUrl, "/proc/meminfo") - if err != nil { - return 0, err - } - reader := bytes.NewReader(ret) - scanner = bufio.NewScanner(reader) - - for scanner.Scan() { - line := scanner.Text() - parts := strings.SplitN(line, " ", 2) - if len(parts) != 2 { - continue - } - if parts[0] != "MemTotal:" { - continue - } - parts[1] = strings.TrimSpace(parts[1]) - value := strings.TrimSuffix(parts[1], "kB") - value = strings.TrimSpace(value) - n, err = strconv.ParseUint(value, 10, 64) - n *= 1024 - if err != nil { - return 0, err - } - break - } - return -} - -func (hs *HttpSource) totalMemory(m map[string]uint64) (uint64, error) { - hostTotal, err := hs.getHostMemTotal() - if err != nil { - return 0, err - } - limit, ok := m["hierarchical_memory_limit"] - if !ok { - return 0, fmt.Errorf("missing hierarchical_memory_limit") - } - if hostTotal > limit { - return limit, nil - } - return hostTotal, nil -} - -func (hs *HttpSource) swapState(m map[string]uint64) (total uint64, used uint64) { - memSwap, ok := m["hierarchical_memsw_limit"] - if !ok { - return 0, 0 - } - - mem := m["hierarchical_memory_limit"] - if memSwap == mem { - return 0, 0 - } - - total = memSwap - mem - used = m["total_swap"] - return total, used -} - -// 获取当前进程所占用的内存 -func (hs *HttpSource) GetRss() (int64, error) { - return 0, nil -} diff --git a/pkg/resource/http_source/net.go b/pkg/resource/http_source/net.go deleted file mode 100644 index dcc5e54..0000000 --- a/pkg/resource/http_source/net.go +++ /dev/null @@ -1,72 +0,0 @@ -package http_source - -import ( - "fmt" - "gitlab.oneitfarm.com/bifrost/capitalizone/pkg/resource" - "time" -) - -var ethInterface string - -var ErrDefaultEthInterfaceNotfound = fmt.Errorf("default EthInterface notfound") - -func (hs *HttpSource) CurrentNetworkStat(interval time.Duration, callback resource.NetStatCallback) { - var rxbytesOld, txbytesOld uint64 - var err error - if ethInterface == "" { - callback(nil, ErrDefaultEthInterfaceNotfound) - return - } - folder := "/sys/class/net/" + ethInterface + "/statistics/" - rxbytesOld, err = resource.ReadNumberFromFile(folder + "rx_bytes") - if err != nil { - callback(nil, err) - return - } - txbytesOld, err = resource.ReadNumberFromFile(folder + "tx_bytes") - if err != nil { - callback(nil, err) - return - } - go func() { - time.Sleep(interval) - rxbytesNew, err := resource.ReadNumberFromFile(folder + "rx_bytes") - if err != nil { - callback(nil, err) - return - } - txbytesNew, err := resource.ReadNumberFromFile(folder + "tx_bytes") - if err != nil { - callback(nil, err) - return - } - stat := &resource.NetworkStat{ - RxBytes: rxbytesNew - rxbytesOld, - TxBytes: txbytesNew - txbytesOld, - } - callback(stat, nil) - }() -} - -/*func init() { - // $ ip -o -4 route show to default - // default via 172.17.0.1 dev eth0 - cmd := exec.Command("ip", "-o", "-4", "route", "show", "to", "default") - var out bytes.Buffer - var stderr bytes.Buffer - cmd.Stdout = &out - cmd.Stderr = &stderr - err := cmd.Run() - if err != nil { - fmt.Println("ip cmd err: " + err.Error()) - fmt.Println("ip cmd err result: " + stderr.String()) - return - } - parts := strings.Split(strings.TrimSpace(out.String()), " ") - if len(parts) != 5 { - fmt.Println(fmt.Errorf("invalid result from \"ip -o -4 route show to default\": %s", out.String())) - return - } - ethInterface = strings.TrimSpace(parts[4]) -} -*/ diff --git a/pkg/resource/http_source/netstat.go b/pkg/resource/http_source/netstat.go deleted file mode 100644 index 536c179..0000000 --- a/pkg/resource/http_source/netstat.go +++ /dev/null @@ -1,5 +0,0 @@ -package http_source - -func (*HttpSource) GetNetstat() (map[string]int, error) { - return nil, nil -} diff --git a/pkg/resource/resource.go b/pkg/resource/resource.go deleted file mode 100644 index 5ea4ebb..0000000 --- a/pkg/resource/resource.go +++ /dev/null @@ -1,48 +0,0 @@ -package resource - -import "time" - -type MemStat struct { - Total uint64 // min(hierarchical_memory_limit, host memory total) - RSS uint64 // rss in memory.stat + mapped_file - Cached uint64 // mapped_file + unmapped_file + tmpfs - MappedFile uint64 // mapped_file - - SwapTotal uint64 - SwapUsed uint64 -} - -type CPUStat struct { - LimitedCores float64 - Usage float64 - Throttled uint64 // cpu.stat: nr_throttled -} - -type DiskStat struct { - Read uint64 - Write uint64 -} - -type NetworkStat struct { - RxBytes uint64 - TxBytes uint64 -} - -type CPUStatCallback func(stat *CPUStat, err error) - -type DiskStatCallback func(stat *DiskStat, err error) - -type NetStatCallback func(stat *NetworkStat, err error) - -type Resource interface { - CurrentMemStat() (stat *MemStat, err error) - GetCPUStat(interval time.Duration, callback CPUStatCallback) - CurrentDiskStat(interval time.Duration, callback DiskStatCallback) - CurrentNetworkStat(interval time.Duration, callback NetStatCallback) - GetRss() (int64, error) - GetNetstat() (map[string]int, error) - GetLimitedCoreCount() (float64, error) - GetCpuCount() float64 - InitSuccess() bool - InitData() bool -} diff --git a/pkg/signature/signer.go b/pkg/signature/signer.go index c8f3478..8d15c3e 100644 --- a/pkg/signature/signer.go +++ b/pkg/signature/signer.go @@ -18,14 +18,14 @@ func NewSigner(priv crypto.PrivateKey) *Signer { return &Signer{priv: priv} } -// Sign 签名 +// Sign func (s *Signer) Sign(text []byte) (sign string, err error) { switch priv := s.priv.(type) { case *ecdsa.PrivateKey: sign, err = EcdsaSign(priv, text) return case *rsa.PrivateKey: - // TODO 支持 RSA + // Todo supports RSA return "", errors.New("algo not supported") default: return "", errors.New("algo not supported") @@ -42,13 +42,13 @@ func NewVerifier(pub crypto.PublicKey) *Verifier { return &Verifier{pub: pub} } -// Verify 验证签名 +// Verify Verify signature func (v *Verifier) Verify(text []byte, sign string) (bool, error) { switch pub := v.pub.(type) { case *ecdsa.PublicKey: return EcdsaVerify(text, sign, pub) case *rsa.PublicKey: - // TODO 支持 RSA + // Todo supports RSA default: } return false, errors.New("algo not supported") diff --git a/pkg/signature/signer_test.go b/pkg/signature/signer_test.go index 6e4ad6d..4ee59c3 100644 --- a/pkg/signature/signer_test.go +++ b/pkg/signature/signer_test.go @@ -3,14 +3,14 @@ package signature import ( "crypto/ecdsa" "fmt" - "gitlab.oneitfarm.com/bifrost/capitalizone/pkg/keygen" + "github.com/ztalab/ZACA/pkg/keygen" "testing" ) func TestEcdsaSign(t *testing.T) { priv, _, _ := keygen.GenKey(keygen.EcdsaSigAlg) s := NewSigner(priv) - sign, err := s.Sign([]byte("测试")) + sign, err := s.Sign([]byte("Test")) if err != nil { panic(err) } @@ -18,7 +18,7 @@ func TestEcdsaSign(t *testing.T) { } func TestEcdsaVerify(t *testing.T) { - text := []byte("测试") + text := []byte("Test") priv, _, _ := keygen.GenKey(keygen.EcdsaSigAlg) s := NewSigner(priv) sign, err := s.Sign(text) diff --git a/pkg/signature/util.go b/pkg/signature/util.go index a7fce5e..e6ce2d6 100644 --- a/pkg/signature/util.go +++ b/pkg/signature/util.go @@ -11,7 +11,7 @@ import ( "github.com/pkg/errors" ) -// EcdsaSign 对 text 签名 返回加密结果, 结果为数字证书 r,s 的序列化后拼接, 然后用 hex 转换为 string +// EcdsaSign The encryption result of the text signature is returned. The result is the serialization and splicing of the digital certificate R and s, and then converted into string with hex func EcdsaSign(priv *ecdsa.PrivateKey, text []byte) (string, error) { hash := sha256.Sum256(text) r, s, err := ecdsa.Sign(rand.Reader, priv, hash[:]) @@ -21,7 +21,7 @@ func EcdsaSign(priv *ecdsa.PrivateKey, text []byte) (string, error) { return EcdsaSignEncode(r, s) } -// EcdsaSignEncode r, s 转换成字符串 +// EcdsaSignEncode r, s Convert to string func EcdsaSignEncode(r, s *big.Int) (string, error) { rt, err := r.MarshalText() if err != nil { @@ -35,7 +35,7 @@ func EcdsaSignEncode(r, s *big.Int) (string, error) { return hex.EncodeToString([]byte(b)), nil } -// EcdsaSignDecode r, s 字符串解析 +// EcdsaSignDecode r, s String parsing func EcdsaSignDecode(sign string) (rint, sint big.Int, err error) { b, err := hex.DecodeString(sign) if err != nil { @@ -60,7 +60,7 @@ func EcdsaSignDecode(sign string) (rint, sint big.Int, err error) { return } -// EcdsaVerify 校验文本内容是否与签名一致 使用公钥校验签名和文本内容 +// EcdsaVerify Verify whether the text content is consistent with the signature. Use the public key to verify the signature and text content func EcdsaVerify(text []byte, sign string, pubKey *ecdsa.PublicKey) (bool, error) { hash := sha256.Sum256(text) rint, sint, err := EcdsaSignDecode(sign) diff --git a/pkg/vaultinit/init.go b/pkg/vaultinit/init.go deleted file mode 100644 index ae954f7..0000000 --- a/pkg/vaultinit/init.go +++ /dev/null @@ -1,204 +0,0 @@ -package vaultinit - -import ( - "crypto/tls" - "database/sql" - "errors" - "log" - "net/http" - "os" - "time" - - "github.com/hashicorp/go-discover" - vaultAPI "github.com/hashicorp/vault/api" - jsoniter "github.com/json-iterator/go" - "gitlab.oneitfarm.com/bifrost/capitalizone/core" - "gitlab.oneitfarm.com/bifrost/capitalizone/database/mysql/cfssl-model/model" - "gitlab.oneitfarm.com/bifrost/capitalizone/pkg/discover/k8s" - logger "gitlab.oneitfarm.com/bifrost/cilog/v2" - "gorm.io/gorm" -) - -// keyname ... -const ( - StoreKeyName = "vault" -) - -var httpClient = &http.Client{ - Transport: &http.Transport{ - TLSClientConfig: &tls.Config{ - InsecureSkipVerify: true, - }, - IdleConnTimeout: 60 * time.Second, - }, - Timeout: 60 * time.Second, -} - -// Init ... -func Init() { - if os.Getenv("IS_VAULT_INIT") == "false" { - return - } - - d := discover.Discover{ - Providers: map[string]discover.Provider{ - "k8s": &k8s.Provider{}, - }, - } - l := log.New(os.Stderr, "", log.LstdFlags) - -retry: - addrs, err := d.Addrs(core.Is.Config.Vault.Discover, l) - if err != nil { - logger.Errorf("Vault addr discover err: %s", err) - time.Sleep(5 * time.Second) - goto retry - } - - if len(addrs) == 0 { - logger.Error("Vault node = 0") - time.Sleep(5 * time.Second) - goto retry - } - - var inited bool - // 获取整体 Inited 状态 - { - for _, addr := range addrs { - conf := &vaultAPI.Config{ - Address: "http://" + addr + ":8200", - HttpClient: httpClient, - } - cli, _ := vaultAPI.NewClient(conf) - status, err := cli.Sys().Health() - if err != nil { - logger.With("addr", addr).Errorf("Get init status err: %s", err) - time.Sleep(5 * time.Second) - goto retry - } - - if status.Initialized { - inited = true - } - } - } - - // 初始化 - if !inited { - if err := vaultInit(addrs[0]); err != nil { - time.Sleep(5 * time.Second) - goto retry - } - } - - // 解密 - for _, addr := range addrs { - if err := vaultUnseal(addr); err != nil { - logger.With("addr", addr).Errorf("Vault Unseal err: %s", err) - } - } - - go func() { - time.Sleep(1 * time.Minute) - Init() - }() -} - -func vaultInit(addr string) error { - conf := &vaultAPI.Config{ - Address: "http://" + addr + ":8200", - HttpClient: httpClient, - } - cli, _ := vaultAPI.NewClient(conf) - status, err := cli.Sys().Health() - if err != nil { - logger.With("addr", addr).Errorf("Get init status err: %s", err) - return err - } - - if status.Initialized { - return nil - } - - logger.With("addr", addr).Info("Vault init...") - resp, err := cli.Sys().Init(&vaultAPI.InitRequest{ - SecretShares: 5, - SecretThreshold: 3, - }) - if err != nil { - logger.With("addr", addr).Errorf("Vault init err: %s", err) - return err - } - logger.With("addr", addr).Infof("Vault inited success") // 敏感信息不能流入日志 - data, _ := jsoniter.MarshalToString(resp) - // 临时储存 DB - keyPair := &model.SelfKeypair{ - Name: StoreKeyName, - PrivateKey: sql.NullString{String: data, Valid: true}, - Certificate: sql.NullString{String: "", Valid: true}, - CreatedAt: time.Now(), - UpdatedAt: time.Now(), - } - if err := core.Is.Db.Create(keyPair).Error; err != nil { - logger.With("key", resp).Errorf("Store Vaule key err: %s", err) - } - - return nil -} - -func vaultUnseal(addr string) error { - conf := &vaultAPI.Config{ - Address: "http://" + addr + ":8200", - HttpClient: httpClient, - } - cli, _ := vaultAPI.NewClient(conf) - status, err := cli.Sys().Health() - if err != nil { - logger.With("addr", addr).Errorf("Get init status err: %s", err) - return err - } - - if !status.Sealed { - return nil - } - - keyPair := &model.SelfKeypair{} - err = core.Is.Db.Where("name = ?", StoreKeyName).Order("id desc").First(keyPair).Error - if err != nil { - if errors.Is(err, gorm.ErrRecordNotFound) { - logger.Errorf("Vault key not found") - return err - } - logger.Errorf("DB query err: %s", err) - return err - } - key := keyPair.PrivateKey.String - keys := new(vaultAPI.InitResponse) - if err := jsoniter.UnmarshalFromString(key, keys); err != nil { - logger.Errorf("Unmarshal keys err: %s", err) - return err - } - logger.With("addr", addr, "keys", keys).Info("Vault unseal...") - for _, unsealKey := range keys.Keys { - resp, err := cli.Sys().Unseal(unsealKey) - if err != nil { - logger.With("addr", addr).Errorf("Vault Unseal err: %s", err) - continue - } - if !resp.Sealed { - logger.With("addr", addr).Info("Vault unsealed.") - break - } - } - - status, err = cli.Sys().Health() - if err != nil { - return err - } - - if status.Sealed { - logger.With("addr", addr).Error("Vault unseal failed") - } - - return nil -} diff --git a/pkg/vaultsecret/secret.go b/pkg/vaultsecret/secret.go index f9c238c..a2f9bfc 100644 --- a/pkg/vaultsecret/secret.go +++ b/pkg/vaultsecret/secret.go @@ -1,11 +1,11 @@ package vaultsecret import ( + "github.com/ztalab/ZACA/pkg/logger" "strings" vaultAPI "github.com/hashicorp/vault/api" "github.com/spf13/cast" - v2 "gitlab.oneitfarm.com/bifrost/cilog/v2" ) const ( @@ -73,7 +73,7 @@ func (v *VaultSecret) GetCertPEMKey(sn string) (*string, *string, error) { if err != nil { return nil, nil, err } - v2.S().With("data", data.Data).Debugf("Vault 获取 CERT KEY") + logger.S().With("data", data.Data).Debugf("Vault Obtain CERT KEY") var pem string var key string if data != nil { diff --git a/start.sh b/start.sh deleted file mode 100644 index f67bfd1..0000000 --- a/start.sh +++ /dev/null @@ -1,5 +0,0 @@ -#!/bin/bash - -./capitalizone http & -./capitalizone tls & -./capitalizone ocsp & \ No newline at end of file diff --git a/telegraf.conf b/telegraf.conf deleted file mode 100644 index a5cbc20..0000000 --- a/telegraf.conf +++ /dev/null @@ -1,12 +0,0 @@ -[agent] - interval = "3s" - flush_interval = "5s" - debug = true - -[[inputs.prometheus]] - urls = ["http://127.0.0.1:8080/metrics"] - -[[outputs.influxdb]] - urls = ["http://victoria-vmauth.gw001.oneitfarm.com:80"] - username = "msp_test_influx" - password = "msp_test_influx" \ No newline at end of file diff --git a/test/benchmark/client/client.go b/test/benchmark/client/client.go index 6b52c16..e974fb4 100644 --- a/test/benchmark/client/client.go +++ b/test/benchmark/client/client.go @@ -5,9 +5,9 @@ import ( "flag" "fmt" "github.com/valyala/fasthttp" - "gitlab.oneitfarm.com/bifrost/capitalizone/pkg/caclient" - "gitlab.oneitfarm.com/bifrost/capitalizone/pkg/spiffe" - logger "gitlab.oneitfarm.com/bifrost/cilog/v2" + "github.com/ztalab/ZACA/pkg/caclient" + "github.com/ztalab/ZACA/pkg/logger" + "github.com/ztalab/ZACA/pkg/spiffe" "go.uber.org/zap/zapcore" "io/ioutil" "net/http" @@ -121,7 +121,7 @@ func main() { func mTlsConfig() *tls.Config { cai := caclient.NewCAI( - caclient.WithCAServer(caclient.RoleSidecar, *ca), + caclient.WithCAServer(caclient.RoleDefault, *ca), caclient.WithOcspAddr(*ocspAddr)) ex, err := cai.NewExchanger(&spiffe.IDGIdentity{ SiteID: "test_site", diff --git a/test/benchmark/server/server.go b/test/benchmark/server/server.go index 108bb6d..e2a230f 100644 --- a/test/benchmark/server/server.go +++ b/test/benchmark/server/server.go @@ -5,8 +5,8 @@ import ( "flag" "fmt" "github.com/valyala/fasthttp" - "gitlab.oneitfarm.com/bifrost/capitalizone/pkg/caclient" - "gitlab.oneitfarm.com/bifrost/capitalizone/pkg/spiffe" + "github.com/ztalab/ZACA/pkg/caclient" + "github.com/ztalab/ZACA/pkg/spiffe" "net" "time" ) @@ -78,7 +78,7 @@ func main() { func mTlsConfig() *tls.Config { cai := caclient.NewCAI( - caclient.WithCAServer(caclient.RoleSidecar, *ca), + caclient.WithCAServer(caclient.RoleDefault, *ca), caclient.WithOcspAddr(*ocspAddr)) ex, err := cai.NewExchanger(&spiffe.IDGIdentity{ SiteID: "test_site", diff --git a/test/fake/fake_server.go b/test/fake/fake_server.go deleted file mode 100644 index a0b526c..0000000 --- a/test/fake/fake_server.go +++ /dev/null @@ -1,26 +0,0 @@ -package main - -import ( - "gitlab.oneitfarm.com/bifrost/capitalizone/test/fake/modules" - v2log "gitlab.oneitfarm.com/bifrost/cilog/v2" - "time" -) - -func main() { - //cli.Start(func(i *core.I) error { - // if err := keymanager.InitKeeper(); err != nil { - // i.Logger.Fatal(err) - // return err - // } - // timerRun() - // return nil - //}) -} - -func timerRun() { - for { - v2log.Info("运行 Fake Modules") - go modules.Run() - <-time.After(5 * time.Minute) - } -} diff --git a/test/fake/modules/fake_clients.go b/test/fake/modules/fake_clients.go deleted file mode 100644 index 6bf3d2e..0000000 --- a/test/fake/modules/fake_clients.go +++ /dev/null @@ -1,126 +0,0 @@ -package modules - -import ( - "context" - "flag" - "math/rand" - "sync" - "time" - - v2log "gitlab.oneitfarm.com/bifrost/cilog/v2" - "golang.org/x/sync/semaphore" - - "gitlab.oneitfarm.com/bifrost/capitalizone/ca/keymanager" - "gitlab.oneitfarm.com/bifrost/capitalizone/pkg/caclient" - "gitlab.oneitfarm.com/bifrost/capitalizone/pkg/spiffe" - "gitlab.oneitfarm.com/bifrost/capitalizone/test/fake/tools" -) - -var ( - ca = flag.String("ca", "https://127.0.0.1:8081", "ca addr") - ocspAddr = flag.String("ocsp", "http://127.0.0.1:8082", "ocsp addr") -) - -func init() { - flag.Parse() - rand.Seed(time.Now().UnixNano()) -} - -func Run() { - fakeSdkClients() -} - -// 模拟 sdk 使用者 -func fakeSdkClients() { - v2log.Info("启动 Fake SDK Clients") - stopCh := make(chan struct{}) - cai := caclient.NewCAI( - caclient.WithCAServer(caclient.RoleSidecar, *ca), - caclient.WithOcspAddr(*ocspAddr), - caclient.WithAuthKey("0739a645a7d6601d9d45f6b237c4edeadad904f2fce53625dfdd541ec4fc8134"), - ) - ids := make(map[spiffe.IDGIdentity]*caclient.Exchanger) - var mu sync.Mutex - uniqueIds := tools.RealTimeUniqueIds() - for _, uniqueId := range uniqueIds { - id := spiffe.IDGIdentity{ - SiteID: "local_test", - ClusterID: "local_cluster", - UniqueID: uniqueId, - } - ex, err := cai.NewExchanger(&id) - if err != nil { - panic(err) - } - // 签发证书 - v2log.With("id", uniqueId).Info("申请签发证书") - go func(ex *caclient.Exchanger) { - if _, err := ex.Transport.GetCertificate(); err != nil { - v2log.With("id", uniqueId).Errorf("签发证书失败: %s", err) - return - } - mu.Lock() - ids[id] = ex - mu.Unlock() - }(ex) - } - <-time.After(20 * time.Second) - fakeOcspFetcher(stopCh, ids) - <-time.After(5 * time.Minute) - stopCh <- struct{}{} - revokeAll(ids) -} - -func fakeOcspFetcher(stopCh chan struct{}, ids map[spiffe.IDGIdentity]*caclient.Exchanger) { - v2log.Info("启动 Fake OCSP Fetcher") - // 模拟 OCSP 通信 - var clients []*caclient.Exchanger - for _, ex := range ids { - clients = append(clients, ex) - } - sem := semaphore.NewWeighted(1000) - go func() { - L: - for { - select { - case <-stopCh: - v2log.Info("接收到终止信号, 停止 OCSP Fetcher") - break L - default: - <-time.After(5 * time.Second) - for _, ex := range clients { - // 每个客户端 5 秒内随机挑选一个对象进行连接 - num := rand.Intn(100) - targets := clients[:num] - sem.Acquire(context.Background(), int64(len(targets))) - for _, target := range targets { - cert, err := target.Transport.GetCertificate() - if err != nil { - panic(err) - } - _, caCert, err := keymanager.GetKeeper().GetCachedSelfKeyPair() - if err != nil { - panic(err) - } - go func(ex *caclient.Exchanger) { - defer sem.Release(1) - ok, err := ex.OcspFetcher.Validate(cert.Leaf, caCert) - v2log.Infof("完成验证 OCSP 请求, 结果: %v, 错误: %v", ok, err) - }(ex) - } - } - } - } - }() -} - -func revokeAll(ids map[spiffe.IDGIdentity]*caclient.Exchanger) { - v2log.Info("开始吊销所有证书") - for id, ex := range ids { - if err := ex.RevokeItSelf(); err != nil { - v2log.With("id", id.UniqueID).Errorf("吊销自身证书错误: %s", err) - continue - } - v2log.With("id", id.UniqueID).Info("吊销自身证书成功") - } -} diff --git a/test/fake/tools/unique_id_generator.go b/test/fake/tools/unique_id_generator.go deleted file mode 100644 index 212d771..0000000 --- a/test/fake/tools/unique_id_generator.go +++ /dev/null @@ -1,40 +0,0 @@ -package tools - -import ( - "crypto/tls" - "io/ioutil" - "net/http" - "time" - - "github.com/tidwall/gjson" -) - -var api = "https://admin:ci_admin_2020@msp-portal.gw002.oneitfarm.com/api/v1/service_unit/dynamic?page=1&limit_num=1000" - -var httpClient = http.Client{ - Transport: &http.Transport{ - TLSClientConfig: &tls.Config{ - InsecureSkipVerify: true, - }, - MaxIdleConns: 10, - MaxIdleConnsPerHost: 10, - }, - Timeout: 20 * time.Second, -} - -func RealTimeUniqueIds() []string { - resp, err := httpClient.Get(api) - if err != nil { - panic(err) - } - body, err := ioutil.ReadAll(resp.Body) - if err != nil { - panic(err) - } - defer resp.Body.Close() - uniqueIds := make([]string, 0) - for _, item := range gjson.GetBytes(body, "data.list.#.unique_id").Array() { - uniqueIds = append(uniqueIds, item.String()) - } - return uniqueIds -} diff --git a/test/fakeca/ca-key.pem b/test/fakeca/ca-key.pem deleted file mode 100644 index 4095219..0000000 --- a/test/fakeca/ca-key.pem +++ /dev/null @@ -1,51 +0,0 @@ ------BEGIN RSA PRIVATE KEY----- -MIIJKQIBAAKCAgEAux/MBFxMPPSxzGhhOjO5kqRiSlLGvZi9CVafsL2RwCfXIWEh -KCM2kGF+3ouAbPEVq3AzcTPiUQQJy1efRNBn32q6a5lpPPgsKViKnfeGyT+yQBb3 -fYuqWfGyS8xOClxfeNupMjndgE0eFZv9ttlsajAZ13UFT/ClF5MTILXNiW6LV1cH -ver4t3eL+C88k8Mu0f9w6XIIafXGe1GRBW/I8/nsUYhNT91uI0QdJl6WbU8gNtkZ -cLtGCLmOy7w7YuQr9h3zzHcK8UJoVv6fpHR13i8SyKkNF11PaR41DjNfrg3wBpmU -PvgDOygkCcAtKB2Zw4gfs0QB8KMbvYzRrj8AEarb7nFbeVnIMXJJGT3TPXhoqNFp -mczq4fdDt62tLbXiC4N8IXnASnKHdzzNN9MQlKcH822i+T14eYUYaX5LppTQ+zo4 -lCtCQdz7xza5Brzlli3tYJzEAR3wGPQPRIPocPvzaP8QJmmsji/Mb1WqIxeQsLnD -w71Jz5Z/ciZLOPygLMipU0A8/kxqU5/2jzJqIioU/mtJgY0jbLGLWi01lq8aGvIx -jMJmbFAW1cYRjBneM+CfRx9i88xd85AhZbu9PP8L7rXbWxASlbDU+vI517X/97q/ -rNGyQuB9bflZl9UQZWiNRYN+Tdse5Qb1sQtG5DP9MdSYVmklN+DC0Ky9hvUCAwEA -AQKCAgEAr9oI68wlZV97u6IYRQ8ZBWNwb3JtDP5+IEmE1x6/2zpVpSvXchXtkC/f -f5rP/RFvkOZnoanltMc7wRm+Nng4NbDiMn5AdSiwr8ZitmNJCtXOWQZ8XUKku9Q0 -/6CncnsuybB8yBOzQoC2lg47l9uqU/ySPHXfoSmArK/t2N6hotb9If5K60Lnq9sv -8fPKVP6ngHgzD1+34oQzm+6if952ljm3yUNzjGuGtqOrVUSsz1r95i2OB/Amt8pS -FMr3cE8HtUqes/bnlKUY0zCErJM5tay9VU/xWpS5LQtTRt4mu7ajbEBmHesLdkuZ -2oP8q+dtemDxkZGG1TMjvbWZ08um7lZzo+8CNLO749PTCcJ6pMqiRhYmL0Ka+k6z -CfvLc2Rl37N/xL8Tvflq/Roj6JwHDHLjbcZL8GCGXdlhiMahGG62II2Zo26gvScF -33puO7IzgNrOLIXmxGD0JtAweqPmhmTcNhIGP89LtyGHKRZKEhSNLa32qXxo5yM+ -QZIxXKQvdVD7AO4f7GEmsgm/GoeRa/sRem/sngJgz+qT6jL7UIF350ehkiCAetMW -nvEkHsYXjE8JltOua3J7lbMPD0trOemXf7MGmcne25Ul9uG2ctAeSnwH+hNVs2be -PhqipOeSxM7dVFs/V4i8xF85NFUf7It0aSyzT6JsKbo4g6Wlh2ECggEBAO+6C5dc -zh8a2a+O+bDIcHuY6CXOuR67xOq6m2LHZNJH78e2UFJPTj0Asmyg6NRpFORRWGdu -618IT1BIhBlPTzdRttjYtit1FXiEXM3ouAonfDnuJoY7Ar/Bl8dw+rKVIo6zibZP -GCrnfOrpZy15DsMlswNEOmRKCGVtsK1MNrmX5RZSM0+nd5GtRy87bbFgxdeKxax0 -IcSc6KNjsd9dpezVy9N1stSRE6zlHwz54IgXwva4r1H/GZI6bwHC1f39qTwar82K -27sJ3JYsyzuDohmoTqe9Kfsgg/ycCbR6wYFJ4Dg9QioqJZWPV+AX+OqdAmkg/PTO -bsQjOvDb7/c38a0CggEBAMfToORivFil1nzjfcdS/K7nhFkjHtwSXvx6LV5XHg/z -n8lkU9EnGgYLwKGQsxGo3VaNFjoCEHPOX1H5HcUC/ubB/M8qbyNG92ng1CoIqGEA -orhbsFWTrdXPZyHBhTFkSfgjqng5pJ/C9VLG5cSHUZhipukJZWnl8D/05KgB5OoA -g3Ms7n8kpM5i52dA3R36HKuq7pC5aK69HZPCwdnTr64Z/e1e7Gm/PGqCTCDbcy4o -NevEee2D6WL28AnjM2rSgUHTGdhdR5gzqsJ9Rg6ErWazhLxtGzaSHd/FY4zp2IRR -Q/pBbTxPB2wA9qAPup7D2o3iHEVYNEbIb3DwHC9b42kCggEBAOQHYS06XuqhfL2M -z5/EGZLH0NLdv6MnUtlAZbvhFNsdCE0880xIGZDXT+Eacj5SMkg3xFKm5IbB/cfN -IrJ8iUqClN3Rzf6qYaEArtJCcoVeJ8Fm8gguq5/BQ2G36w/h4jxBNVZQCBoPlfhR -yg4sH8mq24vY434wGap5W1yMqWWCwauoxxwWnRrZ62NXMTFPbwFnBD7VPKWr53I7 -y58/kiwk9bwQMeoWkdOdIhsSuOxbRd2vsTza3fUFVkAjl7ABEHCWpfFrCzXv3H6Q -30SeIpwrR54wxnUU6ddoiaa+OcCEiB6WzpATBSstbZZA10o7zWY4tcUbc6toI5Gm -c60B7wUCggEAGR17MllKjOXDEI1TKLNAeQc2NjHY1hYlBByjCSFv8NdQ9PH8KuR8 -O9hMTMIwD38m2pL/ASlzQerlRhtGZ5WZzg0BplQSBeAMhq0wXVwbNZ3r1boIbqkS -BzZ7It38zJlrRpnM+jFchIvvY4kGJe5QDhQQIGgC7A1vZcp01rzuNY789oPmMeq8 -IAiERxD6+aKO6fCg7WWwR44TYpfu317gzMQNi0EL+7qCcGyR/us2Dc4WANz/6h9T -PRIGV2fL9ZghkNFkhTIsJnAP/UPbg0WfubHEdB5OdkxlHtPjUDmv8el43sJMZXEV -YLi6RqHTddLt7DlIDlsoruDQSOSU1kr5SQKCAQBjvtnQAhHYxljOzkp8Jc+8A00K -yZ3KfpJO8+gHgZtbj7dGo1vRolOqiuLSXq+l19ykEB5spjQMx+GzpN8xElRPanDZ -GxlPPUJjE7vBgrmT5N+EEvDDDiK9dGi+wc4zxQDhuBTPcMFY9FzdZFH+AJkz65WS -viv77VNoqYa2UwkydjUzkqnceRMkE+qNd3o5kNBRl2coslsbw1qGk7tceqD4XHFY -eSlKOCEOrPxbOE+xihcBQ+MVNlpHuXgspWbtzz3q5pV4JCUVaNgBbuHqLQnYxBmF -kXrWHxaMha6t+R64nGdcArRngoUhT8qH4C5nr3I8F88HD6F08X5ZCbgYq68T ------END RSA PRIVATE KEY----- diff --git a/test/fakeca/ca.pem b/test/fakeca/ca.pem deleted file mode 100644 index 5845773..0000000 --- a/test/fakeca/ca.pem +++ /dev/null @@ -1,31 +0,0 @@ ------BEGIN CERTIFICATE----- -MIIFUDCCAzigAwIBAgIULm4Cswy7l0suRofUJEzbnJnky8kwDQYJKoZIhvcNAQEN -BQAwGDEWMBQGA1UECxMNQ0kxMjMgUm9vdCBDQTAeFw0yMTAxMDcwODU4MDBaFw0y -NjAxMDYwODU4MDBaMEcxGzAZBgNVBAcTEnNwaWZmZTovL3Rlc3QvdGVzdDETMBEG -A1UECxMKVEVTVCBTUCBDQTETMBEGA1UEAxMKVEVTVCBTUCBDQTCCAiIwDQYJKoZI -hvcNAQEBBQADggIPADCCAgoCggIBALsfzARcTDz0scxoYTozuZKkYkpSxr2YvQlW -n7C9kcAn1yFhISgjNpBhft6LgGzxFatwM3Ez4lEECctXn0TQZ99qumuZaTz4LClY -ip33hsk/skAW932LqlnxskvMTgpcX3jbqTI53YBNHhWb/bbZbGowGdd1BU/wpReT -EyC1zYlui1dXB73q+Ld3i/gvPJPDLtH/cOlyCGn1xntRkQVvyPP57FGITU/dbiNE -HSZelm1PIDbZGXC7Rgi5jsu8O2LkK/Yd88x3CvFCaFb+n6R0dd4vEsipDRddT2ke -NQ4zX64N8AaZlD74AzsoJAnALSgdmcOIH7NEAfCjG72M0a4/ABGq2+5xW3lZyDFy -SRk90z14aKjRaZnM6uH3Q7etrS214guDfCF5wEpyh3c8zTfTEJSnB/Ntovk9eHmF -GGl+S6aU0Ps6OJQrQkHc+8c2uQa85ZYt7WCcxAEd8Bj0D0SD6HD782j/ECZprI4v -zG9VqiMXkLC5w8O9Sc+Wf3ImSzj8oCzIqVNAPP5MalOf9o8yaiIqFP5rSYGNI2yx -i1otNZavGhryMYzCZmxQFtXGEYwZ3jPgn0cfYvPMXfOQIWW7vTz/C+6121sQEpWw -1PryOde1//e6v6zRskLgfW35WZfVEGVojUWDfk3bHuUG9bELRuQz/THUmFZpJTfg -wtCsvYb1AgMBAAGjYzBhMA4GA1UdDwEB/wQEAwIBhjAPBgNVHRMBAf8EBTADAQH/ -MB0GA1UdDgQWBBQ4VMIR4ik9KcgnJwpp/KLTGeKU1DAfBgNVHSMEGDAWgBRZj2kc -1tucRLowbLilqj1kN5wK8DANBgkqhkiG9w0BAQ0FAAOCAgEADawyfssECMG1s3zA -E/ID2iQcXVaejT04E/ZUPt+LeWrvjAfwtnNcpUPt7i0VYB0e9jEaAaDIcEDbsdfl -or6UfMLVMy7bV3d0vDCNSnWjZl9zAj4MfuPnHrvnuN5HFg8XPY3KqCuJayC55iZu -adPYocsmVdxLlG3IlqWFidr9aADV5hItvBG0mTyPeEvW8YiDfNzvYZlLKJoW/Hp8 -E7Ao11rf+NPbz/ScrP0RnJ7L1kTrQPK5pCJe3ncc6haQurpyLSRhKz/mYnhQebYh -nog4a2uJ2xdAP4VEvq1liZSIi7f3HKgoyiYNDqoy33zad4W/pTYuE7X8ulvilXwY -gToHaVmo0mmLKZzrKNVm+SahQrWeK2ATg41/YdNc/Kpxe2rR/xxChOyzZv5JGxiR -vHDzRapnaKk/IxyblueSaT4ts3MDQfIc9tm2biTTM5NpbrhhiKs32aaPy5GHQcmJ -3FIEvon7I/PSABa8MetwteVv6FZucGcvXiyI3mbp0tS/sGeDG/Q52j9TmOssIMW4 -TSSnWf9tI3kgkq9+J/H2A7CPTwvxrrZH3l8W+8J0PMOsXHt7UZYX7uUEHB92dKjj -5fwCweD+DA+3yTQ2D3fhi9LD2DGunwcSYwyFpGXf5G+83L3YS8O8ez7wv8LzBNdk -dTzY8HdkOrViVPXQrIHxI9RXj9A= ------END CERTIFICATE----- diff --git a/test/fakeca/root-certs.pem b/test/fakeca/root-certs.pem deleted file mode 100644 index c934912..0000000 --- a/test/fakeca/root-certs.pem +++ /dev/null @@ -1,29 +0,0 @@ ------BEGIN CERTIFICATE----- -MIIFADCCAuigAwIBAgIUObj39X41X44eWt3pDCRdgttcBlgwDQYJKoZIhvcNAQEN -BQAwGDEWMBQGA1UECxMNQ0kxMjMgUm9vdCBDQTAeFw0yMDEyMjUwNTE2MDBaFw00 -MDEyMjAwNTE2MDBaMBgxFjAUBgNVBAsTDUNJMTIzIFJvb3QgQ0EwggIiMA0GCSqG -SIb3DQEBAQUAA4ICDwAwggIKAoICAQCYXt/PZjTsWfTL8zpgFQXzToNBjLHfMGi5 -VvTD3C3GEqtDFdSfYQaygMHzV4azCqA9iOuPEDWHYl35+/p01s10wLy5PYvPY0J6 -ueYpPDI7ZuNzRfzkwG4WxDGfHnVW4idLDhD+ZmwVakp2Pr5NMUuuvnsrHe491wCs -lFXJ9Woeoo5mJapgQanocfXrmYwcWTcYoyxwuZRUAc0CxJS3JYD690vHGiODaOM4 -yqyfj2RAG4GO13EDM8DOgefynPPUAm4bRR+tQyA3TE99XuTWAiRu/Skw2ZK6nnst -ZqiD+2s36dHKkO0y5ArBwk1krKxqHSiRzTY+TOi+TO1Vxbe/x8ipFNFJBEwv5Bhf -6UImGO9Zo9ZnndAldIgFeGczYVUhXVSeK0FiPVmH9NUFVeb2uqGy/Gi7JpgDy8yk -JVxuNB4NwYfHNY4sP051DwKvjSyQyUVHfbsewYezKGJIIqXIJCUV8fc+OpVTjct/ -8mwap0jNn+c1E2Vdx1xn66i2w++IroMZRL7yneQxQEunqqAemRRKdcesvFMApTea -/SABXOnCtEAZg/uDFN8Zf+MPSuXjulpitEqyigZkaR01VncbXsipcBpxmoFWm3C9 -Eu7ukpSY7+W016U16VOqo4Wg22o+9YELwVWJo78Z+FtaZpwhT6rF8IK/k3OcOjIW -Ycb2La89BQIDAQABo0IwQDAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB -/zAdBgNVHQ4EFgQUWY9pHNbbnES6MGy4pao9ZDecCvAwDQYJKoZIhvcNAQENBQAD -ggIBAEnI5B8hqh8ptUk0R06OGHAvxiKaC9IqCuAVwIQUtKNZ0gO+Q39chvncy5GJ -IhptUmGNcE6h9mKVvFNyChRsTe+WY0iSFfC/gFh+QOR88kW7ViHmlE9njY974YK/ -nBem+5ko1/mHWxmMztcnxM9VN2q6RoS6o7BNzXgk/JKOGk8XtMuVj0AOjIcbSVB2 -BgJV52R7zgahZL8ULfohKYeIM6VMDyDYDczdvMOE+pyD0NrpxHRNdNDJ4QL1Jawt -pNUKXhf7F60mVGv1NHT3S7MBd65Q8R7lAm5Xh3j4WXpcYPBP8RQhfOAuJS8ioMFj -eqbPsRHyvPwI1tuuNpM7Vn8dH0gdYLX5XMmk4MRgD5cPlfv+Hv5zCgDQodLTynEY -uhX3X9UD6C8U6Ffy9yQP4aue1OzytREblGZhB1r8/LV6xdssFaW6avhIiHM0KENf -IIjnGQNx3zOx5WjMOswBtF7u5PZHinv+kgiAwx14X1DOY/Vtkn31Hrjgi7E5gxSn -ewC9aR4TVRxezLN71vZ6oVqGK9YP+sd+EZMTpKwGXO9QOr1pzBzmzq0RhEdSfcsd -foelxzWbGN9MQLvYjAdfTdYPOvKN8UuNI7HYTSXOjI3HLBAeGdgfyuTlnQAgt34P -rpZzweHz+ofwDRZNChpKJO2jE0it9DpcQOTiiVqcvXv1pNqa ------END CERTIFICATE----- diff --git a/test/tcp/main.go b/test/tcp/main.go deleted file mode 100644 index 9e6d5de..0000000 --- a/test/tcp/main.go +++ /dev/null @@ -1,40 +0,0 @@ -package main - -import ( - "crypto/tls" - "fmt" -) - -func main() { - //conn, err := net.Dial("tcp", "192.168.2.80:7000") - //if true { - // conn, err = tls.Dial("tcp", "192.168.2.80:7443", &tls.Config{ - // InsecureSkipVerify: true, - // ServerName: "tcp.test.com", - // }) - //} - conn, err := tls.Dial("tcp", "dp-thompbxnvc67x-26656.gw105.oneitfarm.com:7443", &tls.Config{ - InsecureSkipVerify: true, - }) - if err != nil { - panic(fmt.Errorf("dial err: %v", err)) - } - - fmt.Println("send msg") - defer conn.Close() - - n, err := conn.Write([]byte("11111111111\n")) - if err != nil { - fmt.Println(n, err) - return - } - - buf := make([]byte, 100) - n, err = conn.Read(buf) - if err != nil { - fmt.Println(n, err) - return - } - - fmt.Println(string(buf[:n])) -} diff --git a/util/bodyBuffer/buf.go b/util/bodyBuffer/buf.go deleted file mode 100644 index 521a3c2..0000000 --- a/util/bodyBuffer/buf.go +++ /dev/null @@ -1,47 +0,0 @@ -package bodyBuffer - -import ( - "bytes" - "net/http" -) - -type BodyWriter struct { - http.ResponseWriter - Body *bytes.Buffer - // len int // 记录总长度 - cloneLimit int // 响应镜像复制大小限制 -} - -func NewBodWriter(cloneLimit int) *BodyWriter { - return &BodyWriter{ - ResponseWriter: nil, - Body: bytes.NewBuffer(nil), - cloneLimit: cloneLimit, - } -} - -func (w *BodyWriter) Write(b []byte) (int, error) { - if len(b) == 0 { - return 0, nil - } - // 验证响应体大小, - // w.len += len(b) - // clone body - l := w.Body.Len() - if l <= w.cloneLimit { - if len(b) > w.cloneLimit-l { - w.Body.Write(b[0 : w.cloneLimit-l]) - } else { - w.Body.Write(b) - } - } - /*if w.len <= w.cloneLimit { - w.Body.Write(b) - }*/ - return w.ResponseWriter.Write(b) -} - -func (w *BodyWriter) Reset() { - w.Body.Reset() - // w.len = 0 -} diff --git a/util/bodyBuffer/buf_test.go b/util/bodyBuffer/buf_test.go deleted file mode 100644 index 4a27c25..0000000 --- a/util/bodyBuffer/buf_test.go +++ /dev/null @@ -1,79 +0,0 @@ -package bodyBuffer - -import ( - "bytes" - "log" - "strconv" - "sync" - "testing" - - "github.com/labstack/echo/v4" -) - -func TestNewBodLogWriter(t *testing.T) { - e := echo.New() - e.Use(EchoBodyLogMiddleware) - e.Use(debug) - e.Any("*", func(ctx echo.Context) error { - l, _ := strconv.Atoi(ctx.FormValue("len")) - _, err := ctx.Response().Write(bytes.Repeat([]byte("*"), l)) - if err != nil { - return err - } - // ctx.Response().Write(bytes.Repeat([]byte("#"), 10)) - return nil - }) - e.Start(":45000") -} - -var ( - requestBodyPool = sync.Pool{} - responseBodyPool = sync.Pool{} -) - -const BODY_LIMIT = 1024 - -func init() { - requestBodyPool.New = func() interface{} { - return &bytes.Buffer{} - } - responseBodyPool.New = func() interface{} { - return NewBodWriter(10) - } -} - -func EchoBodyLogMiddleware(next echo.HandlerFunc) echo.HandlerFunc { - return func(ctx echo.Context) error { - // request body - /*reqLen := ctx.Request().ContentLength - if reqLen > 0 && reqLen < BODY_LIMIT { - reqBuf := requestBodyPool.Get().(*bytes.Buffer) - _, _ = reqBuf.ReadFrom(ctx.Request().Body) - ctx.Request().Body = ioutil.NopCloser(reqBuf) - ctx.Set(confer.REQUEST_BODY_KEY, reqBuf.Bytes()) - - defer func() { - reqBuf.Reset() - requestBodyPool.Put(reqBuf) - }() - }*/ - // response body - buf := responseBodyPool.Get().(*BodyWriter) - buf.ResponseWriter = ctx.Response().Writer - ctx.Response().Writer = buf - defer func() { - buf.Reset() - responseBodyPool.Put(buf) - }() - return next(ctx) - } -} - -func debug(next echo.HandlerFunc) echo.HandlerFunc { - return func(ctx echo.Context) error { - res := ctx.Response().Writer.(*BodyWriter) - err := next(ctx) - log.Println(res.Body.Len(), res.Body.String()) - return err - } -} diff --git a/util/cache.go b/util/cache.go index 79fffde..93154d8 100644 --- a/util/cache.go +++ b/util/cache.go @@ -8,7 +8,7 @@ import ( "time" "github.com/gin-gonic/gin" - "gitlab.oneitfarm.com/bifrost/go-toolbox/memorycacher" + "github.com/ztalab/ZACA/pkg/memorycacher" ) var MapCache *memorycacher.Cache diff --git a/util/flow.go b/util/flow.go deleted file mode 100644 index 44143f4..0000000 --- a/util/flow.go +++ /dev/null @@ -1,21 +0,0 @@ -package util - -import ( - "fmt" - "gitlab.oneitfarm.com/bifrost/capitalizone/pkg/kutil/wait" - "go.uber.org/zap" - "time" -) - -func RetryWithTimeout(f func() error, interval, timeout time.Duration, logger *zap.SugaredLogger) error { - var times int - err := wait.PollImmediate(interval, timeout, func() (done bool, err error) { - if err := f(); err != nil { - times++ - logger.Error(fmt.Sprintf("failed %v times: ", times), err) - return false, nil - } - return true, nil - }) - return err -} diff --git a/util/json.go b/util/json.go deleted file mode 100644 index ab41c30..0000000 --- a/util/json.go +++ /dev/null @@ -1,14 +0,0 @@ -package util - -import ( - jsoniter "github.com/json-iterator/go" - "github.com/json-iterator/go/extra" -) - -var json = jsoniter.ConfigCompatibleWithStandardLibrary - -func init() { - // RegisterFuzzyDecoders decode input from PHP with tolerance. - // It will handle string/number auto conversation, and treat empty [] as empty struct. - extra.RegisterFuzzyDecoders() -} \ No newline at end of file diff --git a/util/redis.go b/util/redis.go deleted file mode 100644 index c7d8682..0000000 --- a/util/redis.go +++ /dev/null @@ -1 +0,0 @@ -package util diff --git a/util/slice.go b/util/slice.go deleted file mode 100644 index 1913446..0000000 --- a/util/slice.go +++ /dev/null @@ -1,9 +0,0 @@ -package util - -func StringSliceToInterfaceSlice(data []string) []interface{} { - result := make([]interface{}, 0, len(data)) - for _, v := range data { - result = append(result, v) - } - return result -} diff --git a/util/strings.go b/util/strings.go deleted file mode 100644 index c7d8682..0000000 --- a/util/strings.go +++ /dev/null @@ -1 +0,0 @@ -package util diff --git a/util/types/etcd.go b/util/types/etcd.go deleted file mode 100644 index 7c06f10..0000000 --- a/util/types/etcd.go +++ /dev/null @@ -1,5 +0,0 @@ -package types - -type EtcdResp struct { - -} diff --git a/util/types/event.go b/util/types/event.go deleted file mode 100644 index 6d69eb6..0000000 --- a/util/types/event.go +++ /dev/null @@ -1,8 +0,0 @@ -package types - -type Events []*Event - -type Event struct { - Type string - Obj interface{} -} \ No newline at end of file diff --git a/util/types/sql.go b/util/types/sql.go deleted file mode 100644 index 2d2bc66..0000000 --- a/util/types/sql.go +++ /dev/null @@ -1,49 +0,0 @@ -package types - -import ( - "database/sql" - jsoniter "github.com/json-iterator/go" -) - -type NullString sql.NullString -type NullInt64 sql.NullInt64 -type NullInt32 sql.NullInt32 -type NullBool sql.NullBool -type NullTime sql.NullTime - -//MarshalJSON method is called by json.Marshal, -//whenever it is of type NullString -func (x *NullString) MarshalJSON() ([]byte, error) { - if !x.Valid { - return []byte("null"), nil - } - return jsoniter.Marshal(x.String) -} - -func (x *NullInt64) MarshalJSON() ([]byte, error) { - if !x.Valid { - return []byte("null"), nil - } - return jsoniter.Marshal(x.Int64) -} - -func (x *NullInt32) MarshalJSON() ([]byte, error) { - if !x.Valid { - return []byte("null"), nil - } - return jsoniter.Marshal(x.Int32) -} - -func (x *NullBool) MarshalJSON() ([]byte, error) { - if !x.Valid { - return []byte("null"), nil - } - return jsoniter.Marshal(x.Bool) -} - -func (x *NullTime) MarshalJSON() ([]byte, error) { - if !x.Valid { - return []byte("null"), nil - } - return jsoniter.Marshal(x.Time) -} \ No newline at end of file diff --git a/util/util.go b/util/util.go deleted file mode 100644 index f15e8fc..0000000 --- a/util/util.go +++ /dev/null @@ -1,304 +0,0 @@ -package util - -import ( - "crypto/md5" - "crypto/tls" - "crypto/x509" - "encoding/base64" - "encoding/hex" - "encoding/pem" - "fmt" - "gitlab.oneitfarm.com/bifrost/capitalizone/pkg/resource" - "gitlab.oneitfarm.com/bifrost/capitalizone/pkg/resource/file_source" - "gitlab.oneitfarm.com/bifrost/capitalizone/util/bodyBuffer" - "io/ioutil" - "log" - "net" - "net/http" - "os" - "reflect" - "runtime/debug" - "strconv" - "strings" - "time" - - "github.com/shopspring/decimal" - logger "gitlab.oneitfarm.com/bifrost/cilog/v2" -) - -func FloatToString(Num float64) string { - // to convert a float number to a string - return strconv.FormatFloat(Num, 'f', 2, 64) -} - -func MD5Bytes(s []byte) string { - ret := md5.Sum(s) - return hex.EncodeToString(ret[:]) -} - -// 计算字符串MD5值 -func MD5(s string) string { - return MD5Bytes([]byte(s)) -} - -// 计算文件MD5值 -func MD5File(file string) (string, error) { - data, err := ioutil.ReadFile(file) - if err != nil { - return "", err - } - return MD5Bytes(data), nil -} - -func ToInterfaceSlice(slice interface{}) []interface{} { - s := reflect.ValueOf(slice) - if s.Kind() != reflect.Slice { - return nil - } - ret := make([]interface{}, s.Len()) - for i := 0; i < s.Len(); i++ { - ret[i] = s.Index(i).Interface() - } - return ret -} - -/** - * 获取容器总内存,使用内存 - * mStat.Total //容器内总内存 - * mStat.RSS //容器内使用的内存 - */ -func GetContainerMemory(rs resource.Resource) (int64, int64) { - if rs == nil { - rs = file_source.NewFileSource() - } - mStat, err := rs.CurrentMemStat() - if err != nil { - // 错误日志 - logger.Errorf("heartBeatReport metrics.getContainerMemory error", err) - return 0, 0 - } else { - var total, rss string - total = strconv.FormatUint(mStat.Total, 10) - rss = strconv.FormatUint(mStat.RSS, 10) - t, _ := decimal.NewFromString(total) - r, _ := decimal.NewFromString(rss) - d := decimal.NewFromInt32(1024 * 1024) - return t.Div(d).IntPart(), r.Div(d).IntPart() - } -} - -// 文件是否存在 -func IsFileExist(path string) bool { - _, err := os.Stat(path) - if err != nil { - if os.IsNotExist(err) { - return false - } - } - return true -} - -// GoWithRecover wraps a `go func()` with recover() -func GoWithRecover(handler func(), recoverHandler func(r interface{})) { - go func() { - defer func() { - if r := recover(); r != nil { - logger.Errorf("%s goroutine panic: %v\n%s\n", time.Now().Format("2006-01-02 15:04:05"), r, string(debug.Stack())) - if recoverHandler != nil { - go func() { - defer func() { - if p := recover(); p != nil { - log.Println("recover goroutine panic:%v\n%s\n", p, string(debug.Stack())) - } - }() - recoverHandler(r) - }() - } - } - }() - handler() - }() -} - -func TimeMs() int64 { - return time.Now().UnixNano() / 1e6 -} - -func StructToMap(obj interface{}) map[string]interface{} { - obj1 := reflect.TypeOf(obj) - obj2 := reflect.ValueOf(obj) - data := make(map[string]interface{}) - for i := 0; i < obj1.NumField(); i++ { - data[obj1.Field(i).Name] = obj2.Field(i).Interface() - } - return data -} - -// 获取interface类型存储的string -func GetInterfaceString(param interface{}) string { - switch param.(type) { - case string: - return param.(string) - case int: - return strconv.Itoa(param.(int)) - case float64: - return strconv.Itoa(int(param.(float64))) - } - return "" -} - -// 生成md5字符串 -func NewMd5(str ...string) string { - h := md5.New() - for _, v := range str { - h.Write([]byte(v)) - } - return hex.EncodeToString(h.Sum(nil)) -} - -/** - * 判断是否是 connection refused错误 - */ -func IsConnectionRefused(err error) bool { - if err == nil { - return false - } - return strings.Contains(err.Error(), "connection refused") -} - -func GetContainerCpu(rs resource.Resource, cpu chan float64, times time.Duration) { - if times == 0 { - times = time.Millisecond * 250 // 250ms - } - // cpu获取 - rs.GetCPUStat(times, func(stat *resource.CPUStat, err error) { - if err != nil { - // 错误日志 - logger.Errorf("获取cpu错误", err) - cpu <- 0 - } else { - // CPU百分比 - cpu <- stat.Usage - } - }) -} - -func GetContainerDisk(rs resource.Resource, disk chan resource.DiskStat, times time.Duration) { - // 磁盘获取速率时间单位为秒 - if times == 0 { - times = time.Second - } - rs.CurrentDiskStat(times, func(stat *resource.DiskStat, err error) { - if err != nil { - // 错误日志 - logger.Errorf("获取disk错误", err) - disk <- resource.DiskStat{} - } else { - // 磁盘速率 - disk <- *stat - } - }) -} - -func GetContainerNetwork(rs resource.Resource, net chan resource.NetworkStat, times time.Duration) { - // 网络流量获取速率时间单位为秒 - if times == 0 { - times = time.Second - } - rs.CurrentNetworkStat(times, func(stat *resource.NetworkStat, err error) { - if err != nil { - // 错误日志 - logger.Errorf("获取network错误", err) - net <- resource.NetworkStat{} - } else { - // 磁盘速率 - net <- *stat - } - }) -} - -func InArray(in string, array []string) bool { - for k := range array { - if in == array[k] { - return true - } - } - return false -} - -// 解析请求中的traceId -func GetTraceIdNetHTTP(header http.Header) string { - traceId := header.Get("sw8") - if len(traceId) == 0 { - return "" - } - sw8Array := strings.Split(traceId, "-") - - if len(sw8Array) >= 2 { - if traceID, err := base64.StdEncoding.DecodeString(sw8Array[1]); err == nil { - return string(traceID) - } - } - return "" -} - -func GetPort(host string) string { - _, port, _ := net.SplitHostPort(host) - return port -} - -func GetResponseBody(w http.ResponseWriter) []byte { - b, ok := w.(*bodyBuffer.BodyWriter) - if !ok { - return nil - } - return b.Body.Bytes() -} - -func MapDeepCopy(value map[string]string) map[string]string { - newMap := make(map[string]string) - if value == nil { - return newMap - } - for k, v := range value { - newMap[k] = v - } - - return newMap -} - -func RemoveDuplicateElement(languages []string) []string { - result := make([]string, 0, len(languages)) - temp := map[string]struct{}{} - for _, item := range languages { - if _, ok := temp[item]; !ok { - temp[item] = struct{}{} - result = append(result, item) - } - } - return result -} - -func DumpCertAndPrivateKey(cert *tls.Certificate) { - block := &pem.Block{ - Type: "CERTIFICATE", - Bytes: cert.Leaf.Raw, - } - log.Println(string(pem.EncodeToMemory(block))) - b, err := x509.MarshalPKCS8PrivateKey(cert.PrivateKey) - if err != nil { - log.Println("x509.MarshalPKCS8PrivateKey", err) - return - } - log.Println(string(pem.EncodeToMemory(&pem.Block{Type: "PRIVATE KEY", Bytes: b}))) -} - -func DumpUnit(l int) string { - if l < 1024 { - return fmt.Sprintf("%dB", l) - } else if l < 1048576 { - return fmt.Sprintf("%dKB", l/1024) - } else { - return fmt.Sprintf("%dMb", l/1048576) - } -}