From 5fb92e932af43a9e28bd37a23c83812debc380e2 Mon Sep 17 00:00:00 2001 From: aerosouund Date: Sun, 28 Jul 2024 13:00:20 +0300 Subject: [PATCH 01/12] feat: Introduce the CRI package primitive interfaces The ContainerClient is an abstraction to facilitate using a container runtime on the host regardless of what it is. The CreateOpts type is a holder of the container run options required in kubevirt ci. Signed-off-by: aerosouund --- cluster-provision/gocli/cri/docker/docker.go | 100 +++++++++++ cluster-provision/gocli/cri/main.go | 21 +++ cluster-provision/gocli/cri/podman/podman.go | 177 +++++++++++++++++++ 3 files changed, 298 insertions(+) create mode 100644 cluster-provision/gocli/cri/docker/docker.go create mode 100644 cluster-provision/gocli/cri/main.go create mode 100644 cluster-provision/gocli/cri/podman/podman.go diff --git a/cluster-provision/gocli/cri/docker/docker.go b/cluster-provision/gocli/cri/docker/docker.go new file mode 100644 index 0000000000..e64e03e069 --- /dev/null +++ b/cluster-provision/gocli/cri/docker/docker.go @@ -0,0 +1,100 @@ +package docker + +import ( + "os/exec" + "strconv" + "strings" + + "github.com/sirupsen/logrus" + "kubevirt.io/kubevirtci/cluster-provision/gocli/cri" +) + +type DockerClient struct{} + +func NewDockerClient() *DockerClient { + return &DockerClient{} +} + +func IsAvailable() bool { + cmd := exec.Command("docker", "-v") + out, err := cmd.Output() + if err != nil { + return false + } + return strings.HasPrefix(string(out), "Docker version") +} + +func (dc *DockerClient) ImagePull(image string) error { + cmd := exec.Command("docker", "pull", image) + if err := cmd.Run(); err != nil { + return err + } + + return nil +} + +func (dc *DockerClient) Inspect(containerID, format string) ([]byte, error) { + cmd := exec.Command("docker", "inspect", containerID, "--format", format) + out, err := cmd.Output() + if err != nil { + return nil, err + } + return out, nil +} + +func (dc *DockerClient) Start(containerID string) error { + cmd := exec.Command("docker", + "start", + containerID) + + if _, err := cmd.CombinedOutput(); err != nil { + return err + } + return nil +} + +func (dc *DockerClient) Create(image string, createOpts *cri.CreateOpts) (string, error) { + ports := "" + for containerPort, hostPort := range createOpts.Ports { + ports += "-p " + containerPort + ":" + hostPort + } + + args := []string{ + "--name=" + createOpts.Name, + "--privileged=" + strconv.FormatBool(createOpts.Privileged), + "--rm=" + strconv.FormatBool(createOpts.Remove), + "--restart=" + createOpts.RestartPolicy, + "--network=" + createOpts.Network, + } + + for containerPort, hostPort := range createOpts.Ports { + args = append(args, "-p", containerPort+":"+hostPort) + } + + if len(createOpts.Capabilities) > 0 { + args = append(args, "--cap-add="+strings.Join(createOpts.Capabilities, ",")) + } + + fullArgs := append([]string{"create"}, args...) + fullArgs = append(fullArgs, image) + fullArgs = append(fullArgs, createOpts.Command...) + + cmd := exec.Command("docker", + fullArgs..., + ) + + containerID, err := cmd.CombinedOutput() + if err != nil { + return "", err + } + logrus.Info("created registry container with id: ", string(containerID)) + return strings.TrimSuffix(string(containerID), "\n"), nil +} + +func (dc *DockerClient) Remove(containerID string) error { + cmd := exec.Command("docker", "rm", "-f", containerID) + if err := cmd.Run(); err != nil { + return err + } + return nil +} diff --git a/cluster-provision/gocli/cri/main.go b/cluster-provision/gocli/cri/main.go new file mode 100644 index 0000000000..414957cf33 --- /dev/null +++ b/cluster-provision/gocli/cri/main.go @@ -0,0 +1,21 @@ +package cri + +// maybe just create wrappers around bash after all +type ContainerClient interface { + ImagePull(image string) error + Create(image string, co *CreateOpts) (string, error) + Start(containerID string) error + Remove(containerID string) error + Inspect(containerID, format string) ([]byte, error) +} + +type CreateOpts struct { + Privileged bool + Name string + Ports map[string]string + RestartPolicy string + Network string + Command []string + Remove bool + Capabilities []string +} diff --git a/cluster-provision/gocli/cri/podman/podman.go b/cluster-provision/gocli/cri/podman/podman.go new file mode 100644 index 0000000000..f4663c127d --- /dev/null +++ b/cluster-provision/gocli/cri/podman/podman.go @@ -0,0 +1,177 @@ +package podman + +import ( + "fmt" + "io" + "io/fs" + "os" + "os/exec" + "strconv" + "strings" + + "github.com/sirupsen/logrus" + "kubevirt.io/kubevirtci/cluster-provision/gocli/cri" +) + +type Podman struct{} + +func NewPodman() *Podman { + return &Podman{} +} + +type PodmanSSHClient struct { + containerName string +} + +func NewPodmanSSHClient(containerName string) *PodmanSSHClient { + return &PodmanSSHClient{ + containerName: containerName, + } +} + +func IsAvailable() bool { + cmd := exec.Command("podman", "-v") + out, err := cmd.Output() + if err != nil { + return false + } + return strings.HasPrefix(string(out), "podman version") +} + +func (p *PodmanSSHClient) Command(cmd string, stdOut bool) (string, error) { + logrus.Infof("[node %s]: %s\n", p.containerName, cmd) + command := exec.Command("podman", "exec", p.containerName, "/bin/sh", "-c", cmd) + if !stdOut { + out, err := command.CombinedOutput() + if err != nil { + return "", err + } + return string(out), nil + } + command.Stdout = os.Stdout + command.Stderr = os.Stderr + + if err := command.Run(); err != nil { + return "", err + } + return "", nil +} + +func (p *PodmanSSHClient) CopyRemoteFile(remotePath, localPath string) error { + cmd := exec.Command("podman", "cp", fmt.Sprintf("%s:%s", p.containerName, remotePath), localPath) + + output, err := cmd.CombinedOutput() + if err != nil { + return fmt.Errorf("failed to copy file from container: %w, output: %s", err, output) + } + + return nil +} + +func (p *PodmanSSHClient) SCP(destPath string, contents fs.File) error { + tempFile, err := os.CreateTemp("", "podman_cp_temp") + if err != nil { + return fmt.Errorf("failed to create temp file: %w", err) + } + defer os.Remove(tempFile.Name()) + + fileContents, err := io.ReadAll(contents) + if err != nil { + return fmt.Errorf("failed to read file contents: %w", err) + } + + _, err = tempFile.Write(fileContents) + if err != nil { + return fmt.Errorf("failed to write to temp file: %w", err) + } + + err = tempFile.Close() + if err != nil { + return fmt.Errorf("failed to close temp file: %w", err) + } + + cmd := exec.Command("podman", "cp", tempFile.Name(), p.containerName+":"+destPath) + + output, err := cmd.CombinedOutput() + if err != nil { + return fmt.Errorf("podman cp command failed: %w. Output: %s", err, string(output)) + } + + return nil +} + +func (p *Podman) ImagePull(image string) error { + cmd := exec.Command("podman", "pull", image) + if err := cmd.Run(); err != nil { + return err + } + + return nil +} + +func (p *Podman) Create(image string, createOpts *cri.CreateOpts) (string, error) { + ports := "" + for containerPort, hostPort := range createOpts.Ports { + ports += "-p " + containerPort + ":" + hostPort + } + + args := []string{ + "--name=" + createOpts.Name, + "--privileged=" + strconv.FormatBool(createOpts.Privileged), + "--rm=" + strconv.FormatBool(createOpts.Remove), + "--restart=" + createOpts.RestartPolicy, + "--network=" + createOpts.Network, + } + + for containerPort, hostPort := range createOpts.Ports { + args = append(args, "-p", containerPort+":"+hostPort) + } + + if len(createOpts.Capabilities) > 0 { + args = append(args, "--cap-add="+strings.Join(createOpts.Capabilities, ",")) + } + + fullArgs := append([]string{"create"}, args...) + fullArgs = append(fullArgs, image) + fullArgs = append(fullArgs, createOpts.Command...) + + cmd := exec.Command("podman", + fullArgs..., + ) + fmt.Println(cmd.String()) + + containerID, err := cmd.CombinedOutput() + if err != nil { + return "", err + } + logrus.Info("created registry container with id: ", string(containerID)) + return strings.TrimSuffix(string(containerID), "\n"), nil +} + +func (p *Podman) Start(containerID string) error { + cmd := exec.Command("podman", + "start", + containerID) + + if _, err := cmd.CombinedOutput(); err != nil { + return err + } + return nil +} + +func (p *Podman) Inspect(containerID, format string) ([]byte, error) { + cmd := exec.Command("podman", "inspect", containerID, "--format", format) + out, err := cmd.Output() + if err != nil { + return nil, err + } + return out, nil +} + +func (p *Podman) Remove(containerID string) error { + cmd := exec.Command("podman", "rm", "-f", containerID) + if err := cmd.Run(); err != nil { + return err + } + return nil +} From a30df55405c88204b2c3bc80b1c8f7362a3af956 Mon Sep 17 00:00:00 2001 From: aerosouund Date: Sat, 24 Aug 2024 14:44:57 +0300 Subject: [PATCH 02/12] refactor: Generalize the Docker adapter to execute general commands and extend it The docker adapter was previously used to execute commands when ssh.sh was still used. Its removed and the adapter is changed to be a general interface for container interaction. Implement the full ssh client interface on the adapter. Signed-off-by: aerosouund --- cluster-provision/gocli/docker/adapter.go | 57 ++++++++++++++++------- 1 file changed, 41 insertions(+), 16 deletions(-) diff --git a/cluster-provision/gocli/docker/adapter.go b/cluster-provision/gocli/docker/adapter.go index 823e629e82..780c40ea89 100644 --- a/cluster-provision/gocli/docker/adapter.go +++ b/cluster-provision/gocli/docker/adapter.go @@ -4,6 +4,11 @@ import ( "fmt" "os" + "bytes" + "context" + "io" + + "github.com/docker/docker/api/types" "github.com/docker/docker/client" ) @@ -21,22 +26,7 @@ func NewAdapter(cli *client.Client, nodeName string) *DockerAdapter { } func (d *DockerAdapter) Command(cmd string) error { - if len(cmd) > 0 { - firstCmdChar := cmd[0] - switch string(firstCmdChar) { - // directly runnable script - case "/": - cmd = "ssh.sh sudo /bin/bash < " + cmd - // script with parameters - case "-": - cmd = "ssh.sh sudo /bin/bash " + cmd - // ordinary command - default: - cmd = "ssh.sh " + cmd - } - } - - success, err := Exec(d.dockerClient, d.nodeName, []string{"/bin/bash", "-c", cmd}, os.Stdout) + success, err := Exec(d.dockerClient, d.nodeName, []string{"/bin/sh", "-c", cmd}, os.Stdout) if err != nil { return err } @@ -46,3 +36,38 @@ func (d *DockerAdapter) Command(cmd string) error { } return nil } + +func (d *DockerAdapter) CommandWithNoStdOut(cmd string) (string, error) { + var buf *bytes.Buffer + success, err := Exec(d.dockerClient, d.nodeName, []string{"/bin/sh", "-c", cmd}, buf) + if err != nil { + return "", err + } + + if !success { + return "", fmt.Errorf("Error executing %s on node %s", cmd, d.nodeName) + } + return buf.String(), nil +} + +func (d *DockerAdapter) SCP(destPath string, contents io.Reader) error { + return d.dockerClient.CopyToContainer(context.Background(), d.nodeName, destPath, contents, types.CopyToContainerOptions{}) +} + +func (d *DockerAdapter) CopyRemoteFile(remotePath string, out io.Writer) error { + defer os.Remove("tempfile") + if _, _, err := d.dockerClient.CopyFromContainer(context.Background(), d.nodeName, "tempfile"); err != nil { + return err + } + + tempfile, err := os.ReadFile("tempfile") + if err != nil { + return err + } + + _, err = out.Write(tempfile) + if err != nil { + return err + } + return nil +} From d9a91131376265f0dbef0ec67c19f0eff232b4e3 Mon Sep 17 00:00:00 2001 From: aerosouund Date: Sun, 28 Jul 2024 13:05:49 +0300 Subject: [PATCH 03/12] feat: Docker implementation for the CRI interface The implementation is a wrapper around the docker bash commands to avoid compatiblity issues across different runtime libraries. Signed-off-by: aerosouund --- cluster-provision/gocli/cri/podman/podman.go | 177 ------------------- 1 file changed, 177 deletions(-) delete mode 100644 cluster-provision/gocli/cri/podman/podman.go diff --git a/cluster-provision/gocli/cri/podman/podman.go b/cluster-provision/gocli/cri/podman/podman.go deleted file mode 100644 index f4663c127d..0000000000 --- a/cluster-provision/gocli/cri/podman/podman.go +++ /dev/null @@ -1,177 +0,0 @@ -package podman - -import ( - "fmt" - "io" - "io/fs" - "os" - "os/exec" - "strconv" - "strings" - - "github.com/sirupsen/logrus" - "kubevirt.io/kubevirtci/cluster-provision/gocli/cri" -) - -type Podman struct{} - -func NewPodman() *Podman { - return &Podman{} -} - -type PodmanSSHClient struct { - containerName string -} - -func NewPodmanSSHClient(containerName string) *PodmanSSHClient { - return &PodmanSSHClient{ - containerName: containerName, - } -} - -func IsAvailable() bool { - cmd := exec.Command("podman", "-v") - out, err := cmd.Output() - if err != nil { - return false - } - return strings.HasPrefix(string(out), "podman version") -} - -func (p *PodmanSSHClient) Command(cmd string, stdOut bool) (string, error) { - logrus.Infof("[node %s]: %s\n", p.containerName, cmd) - command := exec.Command("podman", "exec", p.containerName, "/bin/sh", "-c", cmd) - if !stdOut { - out, err := command.CombinedOutput() - if err != nil { - return "", err - } - return string(out), nil - } - command.Stdout = os.Stdout - command.Stderr = os.Stderr - - if err := command.Run(); err != nil { - return "", err - } - return "", nil -} - -func (p *PodmanSSHClient) CopyRemoteFile(remotePath, localPath string) error { - cmd := exec.Command("podman", "cp", fmt.Sprintf("%s:%s", p.containerName, remotePath), localPath) - - output, err := cmd.CombinedOutput() - if err != nil { - return fmt.Errorf("failed to copy file from container: %w, output: %s", err, output) - } - - return nil -} - -func (p *PodmanSSHClient) SCP(destPath string, contents fs.File) error { - tempFile, err := os.CreateTemp("", "podman_cp_temp") - if err != nil { - return fmt.Errorf("failed to create temp file: %w", err) - } - defer os.Remove(tempFile.Name()) - - fileContents, err := io.ReadAll(contents) - if err != nil { - return fmt.Errorf("failed to read file contents: %w", err) - } - - _, err = tempFile.Write(fileContents) - if err != nil { - return fmt.Errorf("failed to write to temp file: %w", err) - } - - err = tempFile.Close() - if err != nil { - return fmt.Errorf("failed to close temp file: %w", err) - } - - cmd := exec.Command("podman", "cp", tempFile.Name(), p.containerName+":"+destPath) - - output, err := cmd.CombinedOutput() - if err != nil { - return fmt.Errorf("podman cp command failed: %w. Output: %s", err, string(output)) - } - - return nil -} - -func (p *Podman) ImagePull(image string) error { - cmd := exec.Command("podman", "pull", image) - if err := cmd.Run(); err != nil { - return err - } - - return nil -} - -func (p *Podman) Create(image string, createOpts *cri.CreateOpts) (string, error) { - ports := "" - for containerPort, hostPort := range createOpts.Ports { - ports += "-p " + containerPort + ":" + hostPort - } - - args := []string{ - "--name=" + createOpts.Name, - "--privileged=" + strconv.FormatBool(createOpts.Privileged), - "--rm=" + strconv.FormatBool(createOpts.Remove), - "--restart=" + createOpts.RestartPolicy, - "--network=" + createOpts.Network, - } - - for containerPort, hostPort := range createOpts.Ports { - args = append(args, "-p", containerPort+":"+hostPort) - } - - if len(createOpts.Capabilities) > 0 { - args = append(args, "--cap-add="+strings.Join(createOpts.Capabilities, ",")) - } - - fullArgs := append([]string{"create"}, args...) - fullArgs = append(fullArgs, image) - fullArgs = append(fullArgs, createOpts.Command...) - - cmd := exec.Command("podman", - fullArgs..., - ) - fmt.Println(cmd.String()) - - containerID, err := cmd.CombinedOutput() - if err != nil { - return "", err - } - logrus.Info("created registry container with id: ", string(containerID)) - return strings.TrimSuffix(string(containerID), "\n"), nil -} - -func (p *Podman) Start(containerID string) error { - cmd := exec.Command("podman", - "start", - containerID) - - if _, err := cmd.CombinedOutput(); err != nil { - return err - } - return nil -} - -func (p *Podman) Inspect(containerID, format string) ([]byte, error) { - cmd := exec.Command("podman", "inspect", containerID, "--format", format) - out, err := cmd.Output() - if err != nil { - return nil, err - } - return out, nil -} - -func (p *Podman) Remove(containerID string) error { - cmd := exec.Command("podman", "rm", "-f", containerID) - if err := cmd.Run(); err != nil { - return err - } - return nil -} From 136bc491355d8e2360011cb937e7ef41f4e46694 Mon Sep 17 00:00:00 2001 From: aerosouund Date: Sun, 28 Jul 2024 13:07:21 +0300 Subject: [PATCH 04/12] feat: Podman Implementation for the CRI interface the PodmanSSHClient is the podman equivalent of the DockerAdapter type for dealing with podman containers through the ssh client interface Signed-off-by: aerosouund --- cluster-provision/gocli/cri/podman/podman.go | 188 +++++++++++++++++++ 1 file changed, 188 insertions(+) create mode 100644 cluster-provision/gocli/cri/podman/podman.go diff --git a/cluster-provision/gocli/cri/podman/podman.go b/cluster-provision/gocli/cri/podman/podman.go new file mode 100644 index 0000000000..1cbff0ed65 --- /dev/null +++ b/cluster-provision/gocli/cri/podman/podman.go @@ -0,0 +1,188 @@ +package podman + +import ( + "fmt" + "io" + "os" + "os/exec" + "strconv" + "strings" + + "github.com/sirupsen/logrus" + "kubevirt.io/kubevirtci/cluster-provision/gocli/cri" +) + +type Podman struct{} + +func NewPodman() *Podman { + return &Podman{} +} + +type PodmanSSHClient struct { + containerName string +} + +func NewPodmanSSHClient(containerName string) *PodmanSSHClient { + return &PodmanSSHClient{ + containerName: containerName, + } +} + +func IsAvailable() bool { + cmd := exec.Command("podman", "-v") + out, err := cmd.Output() + if err != nil { + return false + } + return strings.HasPrefix(string(out), "podman version") +} + +func (p *PodmanSSHClient) Command(cmd string) error { + logrus.Infof("[node %s]: %s\n", p.containerName, cmd) + command := exec.Command("podman", "exec", p.containerName, "/bin/sh", "-c", cmd) + command.Stdout = os.Stdout + command.Stderr = os.Stderr + + if err := command.Run(); err != nil { + return err + } + return nil +} + +func (p *PodmanSSHClient) CommandWithNoStdOut(cmd string) (string, error) { + command := exec.Command("podman", "exec", p.containerName, "/bin/sh", "-c", cmd) + out, err := command.CombinedOutput() + if err != nil { + return "", err + } + return string(out), nil +} + +func (p *PodmanSSHClient) CopyRemoteFile(remotePath string, out io.Writer) error { + defer os.Remove("tempfile") + cmd := exec.Command("podman", "cp", fmt.Sprintf("%s:%s", p.containerName, remotePath), "tempfile") + output, err := cmd.CombinedOutput() + if err != nil { + return fmt.Errorf("failed to copy file from container: %w, output: %s", err, output) + } + + tmpfile, err := os.ReadFile("tempfile") + if err != nil { + return err + } + + _, err = out.Write(tmpfile) + if err != nil { + return err + } + + return nil +} + +func (p *PodmanSSHClient) SCP(destPath string, contents io.Reader) error { + tempFile, err := os.CreateTemp("", "podman_cp_temp") + if err != nil { + return fmt.Errorf("failed to create temp file: %w", err) + } + defer os.Remove(tempFile.Name()) + + fileContents, err := io.ReadAll(contents) + if err != nil { + return fmt.Errorf("failed to read file contents: %w", err) + } + + _, err = tempFile.Write(fileContents) + if err != nil { + return fmt.Errorf("failed to write to temp file: %w", err) + } + + err = tempFile.Close() + if err != nil { + return fmt.Errorf("failed to close temp file: %w", err) + } + + cmd := exec.Command("podman", "cp", tempFile.Name(), p.containerName+":"+destPath) + + output, err := cmd.CombinedOutput() + if err != nil { + return fmt.Errorf("podman cp command failed: %w. Output: %s", err, string(output)) + } + + return nil +} + +func (p *Podman) ImagePull(image string) error { + cmd := exec.Command("podman", "pull", image) + if err := cmd.Run(); err != nil { + return err + } + + return nil +} + +func (p *Podman) Create(image string, createOpts *cri.CreateOpts) (string, error) { + ports := "" + for containerPort, hostPort := range createOpts.Ports { + ports += "-p " + containerPort + ":" + hostPort + } + + args := []string{ + "--name=" + createOpts.Name, + "--privileged=" + strconv.FormatBool(createOpts.Privileged), + "--rm=" + strconv.FormatBool(createOpts.Remove), + "--restart=" + createOpts.RestartPolicy, + "--network=" + createOpts.Network, + } + + for containerPort, hostPort := range createOpts.Ports { + args = append(args, "-p", containerPort+":"+hostPort) + } + + if len(createOpts.Capabilities) > 0 { + args = append(args, "--cap-add="+strings.Join(createOpts.Capabilities, ",")) + } + + fullArgs := append([]string{"create"}, args...) + fullArgs = append(fullArgs, image) + fullArgs = append(fullArgs, createOpts.Command...) + + cmd := exec.Command("podman", + fullArgs..., + ) + fmt.Println(cmd.String()) + + containerID, err := cmd.CombinedOutput() + if err != nil { + return "", err + } + logrus.Info("created registry container with id: ", string(containerID)) + return strings.TrimSuffix(string(containerID), "\n"), nil +} + +func (p *Podman) Start(containerID string) error { + cmd := exec.Command("podman", + "start", + containerID) + + if _, err := cmd.CombinedOutput(); err != nil { + return err + } + return nil +} + +func (p *Podman) Inspect(containerID, format string) ([]byte, error) { + cmd := exec.Command("podman", "inspect", containerID, "--format", format) + out, err := cmd.Output() + if err != nil { + return nil, err + } + return out, nil +} + +func (p *Podman) Remove(containerID string) error { + cmd := exec.Command("podman", "rm", "-f", containerID) + if err := cmd.Run(); err != nil { + return err + } + return nil +} From f4c6609a47468a4b0a2a9df887e1109c539da5e8 Mon Sep 17 00:00:00 2001 From: aerosouund Date: Sun, 28 Jul 2024 13:14:47 +0300 Subject: [PATCH 05/12] feat: Multus SR-IOV opt Provides the necessary functionality for running multus on sr-iov clusters Signed-off-by: aerosouund --- .../opts/multus-sriov/manifests/multus.yaml | 204 ++++++++++++++++++ .../gocli/opts/multus-sriov/mult_sriov.go | 47 ++++ .../opts/multus-sriov/mult_sriov_test.go | 38 ++++ 3 files changed, 289 insertions(+) create mode 100644 cluster-provision/gocli/opts/multus-sriov/manifests/multus.yaml create mode 100644 cluster-provision/gocli/opts/multus-sriov/mult_sriov.go create mode 100644 cluster-provision/gocli/opts/multus-sriov/mult_sriov_test.go diff --git a/cluster-provision/gocli/opts/multus-sriov/manifests/multus.yaml b/cluster-provision/gocli/opts/multus-sriov/manifests/multus.yaml new file mode 100644 index 0000000000..3657246e3e --- /dev/null +++ b/cluster-provision/gocli/opts/multus-sriov/manifests/multus.yaml @@ -0,0 +1,204 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: network-attachment-definitions.k8s.cni.cncf.io +spec: + group: k8s.cni.cncf.io + names: + kind: NetworkAttachmentDefinition + plural: network-attachment-definitions + shortNames: + - net-attach-def + singular: network-attachment-definition + scope: Namespaced + versions: + - name: v1 + schema: + openAPIV3Schema: + description: 'NetworkAttachmentDefinition is a CRD schema specified by the + Network Plumbing Working Group to express the intent for attaching pods + to one or more logical or physical networks. More information available + at: https://github.com/k8snetworkplumbingwg/multi-net-spec' + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this represen + tation of an object. Servers should convert recognized schemas to the + latest internal value, and may reject unrecognized values. More info: + https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: NetworkAttachmentDefinition spec defines the desired state + of a network attachment + properties: + config: + description: NetworkAttachmentDefinition config is a JSON-formatted + CNI configuration + type: string + type: object + type: object + served: true + storage: true +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: multus + namespace: kube-system +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: multus +rules: +- apiGroups: + - k8s.cni.cncf.io + resources: + - '*' + verbs: + - '*' +- apiGroups: + - "" + resources: + - pods + - pods/status + verbs: + - get + - update +- apiGroups: + - "" + - events.k8s.io + resources: + - events + verbs: + - create + - patch + - update +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: multus +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: multus +subjects: +- kind: ServiceAccount + name: multus + namespace: kube-system +--- +apiVersion: v1 +data: + cni-conf.json: | + { + "name": "multus-cni-network", + "type": "multus", + "capabilities": { + "portMappings": true + }, + "delegates": [ + { + "cniVersion": "0.3.1", + "name": "default-cni-network", + "plugins": [ + { + "type": "flannel", + "name": "flannel.1", + "delegate": { + "isDefaultGateway": true, + "hairpinMode": true + } + }, + { + "type": "portmap", + "capabilities": { + "portMappings": true + } + } + ] + } + ], + "kubeconfig": "/etc/cni/net.d/multus.d/multus.kubeconfig" + } +kind: ConfigMap +metadata: + labels: + app: multus + tier: node + name: multus-cni-config + namespace: kube-system +--- +apiVersion: apps/v1 +kind: DaemonSet +metadata: + labels: + app: multus + name: multus + tier: node + name: kube-multus-ds + namespace: kube-system +spec: + selector: + matchLabels: + name: multus + template: + metadata: + labels: + app: multus + name: multus + tier: node + spec: + containers: + - args: + - --multus-conf-file=auto + - --cni-version=0.3.1 + - --multus-log-level=debug + - --multus-log-file=/var/log/multus.log + command: + - /entrypoint.sh + image: ghcr.io/k8snetworkplumbingwg/multus-cni:v3.8 + name: kube-multus + resources: + limits: + cpu: 100m + memory: 50Mi + requests: + cpu: 100m + memory: 50Mi + securityContext: + privileged: true + volumeMounts: + - mountPath: /host/etc/cni/net.d + name: cni + - mountPath: /host/opt/cni/bin + name: cnibin + - mountPath: /tmp/multus-conf + name: multus-cfg + hostNetwork: true + serviceAccountName: multus + terminationGracePeriodSeconds: 10 + tolerations: + - effect: NoSchedule + operator: Exists + volumes: + - hostPath: + path: /etc/cni/net.d + name: cni + - hostPath: + path: /opt/cni/bin + name: cnibin + - configMap: + items: + - key: cni-conf.json + path: 70-multus.conf + name: multus-cni-config + name: multus-cfg + updateStrategy: + type: RollingUpdate diff --git a/cluster-provision/gocli/opts/multus-sriov/mult_sriov.go b/cluster-provision/gocli/opts/multus-sriov/mult_sriov.go new file mode 100644 index 0000000000..a00deb5f3e --- /dev/null +++ b/cluster-provision/gocli/opts/multus-sriov/mult_sriov.go @@ -0,0 +1,47 @@ +package multussriov + +import ( + "embed" + "fmt" + + "bytes" + + "github.com/sirupsen/logrus" + k8s "kubevirt.io/kubevirtci/cluster-provision/gocli/pkg/k8s" +) + +//go:embed manifests/* +var f embed.FS + +type multusSriovOpt struct { + client k8s.K8sDynamicClient +} + +func NewMultusSriovOpt(c k8s.K8sDynamicClient) *multusSriovOpt { + return &multusSriovOpt{ + client: c, + } +} + +func (o *multusSriovOpt) Exec() error { + yamlData, err := f.ReadFile("manifests/multus.yaml") + if err != nil { + return err + } + yamlDocs := bytes.Split(yamlData, []byte("---\n")) + for _, yamlDoc := range yamlDocs { + if len(yamlDoc) == 0 { + continue + } + + obj, err := k8s.SerializeIntoObject(yamlDoc) + if err != nil { + logrus.Info(err.Error()) + continue + } + if err := o.client.Apply(obj); err != nil { + return fmt.Errorf("error applying manifest %s", err) + } + } + return nil +} diff --git a/cluster-provision/gocli/opts/multus-sriov/mult_sriov_test.go b/cluster-provision/gocli/opts/multus-sriov/mult_sriov_test.go new file mode 100644 index 0000000000..1285978721 --- /dev/null +++ b/cluster-provision/gocli/opts/multus-sriov/mult_sriov_test.go @@ -0,0 +1,38 @@ +package multussriov + +import ( + "testing" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + "go.uber.org/mock/gomock" + k8s "kubevirt.io/kubevirtci/cluster-provision/gocli/pkg/k8s" +) + +func TestMultusSriovOpt(t *testing.T) { + RegisterFailHandler(Fail) + RunSpecs(t, "MultusSriovOpt Suite") +} + +var _ = Describe("MultusSriovOpt", func() { + var ( + mockCtrl *gomock.Controller + k8sClient k8s.K8sDynamicClient + opt *multusSriovOpt + ) + + BeforeEach(func() { + mockCtrl = gomock.NewController(GinkgoT()) + k8sClient = k8s.NewTestClient() + opt = NewMultusSriovOpt(k8sClient) + }) + + AfterEach(func() { + mockCtrl.Finish() + }) + + It("should execute MultusSriovOpt successfully", func() { + err := opt.Exec() + Expect(err).NotTo(HaveOccurred()) + }) +}) From 22569344445120c10dd99d44a92ec8548f96316a Mon Sep 17 00:00:00 2001 From: aerosouund Date: Sun, 28 Jul 2024 13:15:42 +0300 Subject: [PATCH 06/12] feat: SR-IOV components Provide functionality to run core k8s constructs on sr-iov clusters Signed-off-by: aerosouund --- .../gocli/opts/sriov-components/components.go | 47 +++ .../opts/sriov-components/components_test.go | 38 +++ .../manifests/components.yaml | 294 ++++++++++++++++++ 3 files changed, 379 insertions(+) create mode 100644 cluster-provision/gocli/opts/sriov-components/components.go create mode 100644 cluster-provision/gocli/opts/sriov-components/components_test.go create mode 100644 cluster-provision/gocli/opts/sriov-components/manifests/components.yaml diff --git a/cluster-provision/gocli/opts/sriov-components/components.go b/cluster-provision/gocli/opts/sriov-components/components.go new file mode 100644 index 0000000000..e804e8c41f --- /dev/null +++ b/cluster-provision/gocli/opts/sriov-components/components.go @@ -0,0 +1,47 @@ +package sriovcomponents + +import ( + "embed" + "fmt" + + "bytes" + + "github.com/sirupsen/logrus" + k8s "kubevirt.io/kubevirtci/cluster-provision/gocli/pkg/k8s" +) + +//go:embed manifests/* +var f embed.FS + +type sriovComponentsOpt struct { + client k8s.K8sDynamicClient +} + +func NewSriovComponentsOpt(c k8s.K8sDynamicClient) *sriovComponentsOpt { + return &sriovComponentsOpt{ + client: c, + } +} + +func (o *sriovComponentsOpt) Exec() error { + yamlData, err := f.ReadFile("manifests/components.yaml") + if err != nil { + return err + } + yamlDocs := bytes.Split(yamlData, []byte("---\n")) + for _, yamlDoc := range yamlDocs { + if len(yamlDoc) == 0 { + continue + } + + obj, err := k8s.SerializeIntoObject(yamlDoc) + if err != nil { + logrus.Info(err.Error()) + continue + } + if err := o.client.Apply(obj); err != nil { + return fmt.Errorf("error applying manifest %s", err) + } + } + return nil +} diff --git a/cluster-provision/gocli/opts/sriov-components/components_test.go b/cluster-provision/gocli/opts/sriov-components/components_test.go new file mode 100644 index 0000000000..9efcf2a254 --- /dev/null +++ b/cluster-provision/gocli/opts/sriov-components/components_test.go @@ -0,0 +1,38 @@ +package sriovcomponents + +import ( + "testing" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + "go.uber.org/mock/gomock" + k8s "kubevirt.io/kubevirtci/cluster-provision/gocli/pkg/k8s" +) + +func TestSriovComponents(t *testing.T) { + RegisterFailHandler(Fail) + RunSpecs(t, "SriovComponents Suite") +} + +var _ = Describe("SriovComponents", func() { + var ( + mockCtrl *gomock.Controller + k8sClient k8s.K8sDynamicClient + opt *sriovComponentsOpt + ) + + BeforeEach(func() { + mockCtrl = gomock.NewController(GinkgoT()) + k8sClient = k8s.NewTestClient() + opt = NewSriovComponentsOpt(k8sClient) + }) + + AfterEach(func() { + mockCtrl.Finish() + }) + + It("should execute sriov components successfully", func() { + err := opt.Exec() + Expect(err).NotTo(HaveOccurred()) + }) +}) diff --git a/cluster-provision/gocli/opts/sriov-components/manifests/components.yaml b/cluster-provision/gocli/opts/sriov-components/manifests/components.yaml new file mode 100644 index 0000000000..f7ca1f7329 --- /dev/null +++ b/cluster-provision/gocli/opts/sriov-components/manifests/components.yaml @@ -0,0 +1,294 @@ +apiVersion: v1 +kind: Namespace +metadata: + name: sriov +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: sriov-device-plugin + namespace: sriov +--- +apiVersion: v1 +data: + config.json: | + { + "resourceList": [{ + "resourceName": "$RESOURCE_NAME", + "selectors": { + "drivers": $DRIVERS, + "pfNames": $PF_NAMES + } + }] + } +kind: ConfigMap +metadata: + name: sriovdp-config + namespace: sriov +--- +apiVersion: apps/v1 +kind: DaemonSet +metadata: + labels: + app: sriov-cni + tier: node + name: kube-sriov-cni-ds-amd64 + namespace: sriov +spec: + selector: + matchLabels: + name: sriov-cni + template: + metadata: + labels: + app: sriov-cni + name: sriov-cni + tier: node + spec: + containers: + - image: ghcr.io/k8snetworkplumbingwg/sriov-cni:v2.7.0 + imagePullPolicy: IfNotPresent + name: kube-sriov-cni + resources: + limits: + cpu: 100m + memory: 50Mi + requests: + cpu: 100m + memory: 50Mi + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + privileged: false + readOnlyRootFilesystem: true + volumeMounts: + - mountPath: /host/opt/cni/bin + name: cnibin + nodeSelector: + $LABEL_KEY: $LABEL_VALUE + kubernetes.io/arch: amd64 + tolerations: + - effect: NoSchedule + key: node-role.kubernetes.io/control-plane + operator: Exists + volumes: + - hostPath: + path: /opt/cni/bin + name: cnibin +--- +apiVersion: apps/v1 +kind: DaemonSet +metadata: + labels: + app: sriovdp + tier: node + name: kube-sriov-device-plugin-amd64 + namespace: sriov +spec: + selector: + matchLabels: + name: sriov-device-plugin + template: + metadata: + labels: + app: sriovdp + name: sriov-device-plugin + tier: node + spec: + containers: + - args: + - --log-dir=sriovdp + - --log-level=10 + - --resource-prefix=$RESOURCE_PREFIX + image: ghcr.io/k8snetworkplumbingwg/sriov-network-device-plugin:v3.4.0 + imagePullPolicy: IfNotPresent + name: kube-sriovdp + resources: + limits: + cpu: 1 + memory: 200Mi + requests: + cpu: 250m + memory: 40Mi + securityContext: + privileged: true + volumeMounts: + - mountPath: /var/lib/kubelet/ + name: devicesock + readOnly: false + - mountPath: /var/log + name: log + - mountPath: /etc/pcidp + name: config-volume + - mountPath: /var/run/k8s.cni.cncf.io/devinfo/dp + name: device-info + hostNetwork: true + nodeSelector: + $LABEL_KEY: $LABEL_VALUE + beta.kubernetes.io/arch: amd64 + serviceAccountName: sriov-device-plugin + tolerations: + - effect: NoSchedule + key: node-role.kubernetes.io/control-plane + operator: Exists + volumes: + - hostPath: + path: /var/lib/kubelet/ + name: devicesock + - hostPath: + path: /var/log + name: log + - hostPath: + path: /var/run/k8s.cni.cncf.io/devinfo/dp + type: DirectoryOrCreate + name: device-info + - configMap: + items: + - key: config.json + path: config.json + name: sriovdp-config + name: config-volume +--- +apiVersion: apps/v1 +kind: DaemonSet +metadata: + labels: + app: sriovdp + tier: node + name: kube-sriov-device-plugin-arm64 + namespace: sriov +spec: + selector: + matchLabels: + name: sriov-device-plugin + template: + metadata: + labels: + app: sriovdp + name: sriov-device-plugin + tier: node + spec: + containers: + - args: + - --log-dir=sriovdp + - --log-level=10 + image: ghcr.io/k8snetworkplumbingwg/sriov-network-device-plugin:latest-arm64 + imagePullPolicy: IfNotPresent + name: kube-sriovdp + resources: + limits: + cpu: 1 + memory: 200Mi + requests: + cpu: 250m + memory: 40Mi + securityContext: + privileged: true + volumeMounts: + - mountPath: /var/lib/kubelet/ + name: devicesock + readOnly: false + - mountPath: /var/log + name: log + - mountPath: /etc/pcidp + name: config-volume + - mountPath: /var/run/k8s.cni.cncf.io/devinfo/dp + name: device-info + hostNetwork: true + nodeSelector: + beta.kubernetes.io/arch: arm64 + serviceAccountName: sriov-device-plugin + tolerations: + - effect: NoSchedule + key: node-role.kubernetes.io/control-plane + operator: Exists + volumes: + - hostPath: + path: /var/lib/kubelet/ + name: devicesock + - hostPath: + path: /var/log + name: log + - hostPath: + path: /var/run/k8s.cni.cncf.io/devinfo/dp + type: DirectoryOrCreate + name: device-info + - configMap: + items: + - key: config.json + path: config.json + name: sriovdp-config + name: config-volume +--- +apiVersion: apps/v1 +kind: DaemonSet +metadata: + labels: + app: sriovdp + tier: node + name: kube-sriov-device-plugin-ppc64le + namespace: sriov +spec: + selector: + matchLabels: + name: sriov-device-plugin + template: + metadata: + labels: + app: sriovdp + name: sriov-device-plugin + tier: node + spec: + containers: + - args: + - --log-dir=sriovdp + - --log-level=10 + image: ghcr.io/k8snetworkplumbingwg/sriov-network-device-plugin:latest-ppc64le + imagePullPolicy: IfNotPresent + name: kube-sriovdp + resources: + limits: + cpu: 1 + memory: 200Mi + requests: + cpu: 250m + memory: 40Mi + securityContext: + privileged: true + volumeMounts: + - mountPath: /var/lib/kubelet/ + name: devicesock + readOnly: false + - mountPath: /var/log + name: log + - mountPath: /etc/pcidp + name: config-volume + - mountPath: /var/run/k8s.cni.cncf.io/devinfo/dp + name: device-info + hostNetwork: true + nodeSelector: + beta.kubernetes.io/arch: ppc64le + serviceAccountName: sriov-device-plugin + tolerations: + - effect: NoSchedule + key: node-role.kubernetes.io/control-plane + operator: Exists + volumes: + - hostPath: + path: /var/lib/kubelet/ + name: devicesock + - hostPath: + path: /var/log + name: log + - hostPath: + path: /var/run/k8s.cni.cncf.io/devinfo/dp + type: DirectoryOrCreate + name: device-info + - configMap: + items: + - key: config.json + path: config.json + name: sriovdp-config + name: config-volume From 44215d25e232fb47655084e1853ada92bfda5719 Mon Sep 17 00:00:00 2001 From: aerosouund Date: Sun, 28 Jul 2024 13:17:00 +0300 Subject: [PATCH 07/12] feat: Introduce opts for multiple system tasks Remounting sysfs as readwrite. K8s networking configuration. Downloading and executing registry proxy scripts. Signed-off-by: aerosouund --- .../gocli/opts/network/network.go | 29 +++++++++++++++++ .../gocli/opts/registryproxy/registryproxy.go | 32 +++++++++++++++++++ .../gocli/opts/remountsysfs/sysfs.go | 28 ++++++++++++++++ .../opts/setup-registry/setup_registry.go | 27 ++++++++++++++++ 4 files changed, 116 insertions(+) create mode 100644 cluster-provision/gocli/opts/network/network.go create mode 100644 cluster-provision/gocli/opts/registryproxy/registryproxy.go create mode 100644 cluster-provision/gocli/opts/remountsysfs/sysfs.go create mode 100644 cluster-provision/gocli/opts/setup-registry/setup_registry.go diff --git a/cluster-provision/gocli/opts/network/network.go b/cluster-provision/gocli/opts/network/network.go new file mode 100644 index 0000000000..230589773c --- /dev/null +++ b/cluster-provision/gocli/opts/network/network.go @@ -0,0 +1,29 @@ +package network + +import "kubevirt.io/kubevirtci/cluster-provision/gocli/pkg/libssh" + +type NetworkOpt struct { + sshClient libssh.Client +} + +func NewNetworkOpt(sshClient libssh.Client) *NetworkOpt { + return &NetworkOpt{ + sshClient: sshClient, + } +} + +func (n *NetworkOpt) Exec() error { + cmds := []string{ + "modprobe br_netfilter", + "sysctl -w net.bridge.bridge-nf-call-arptables=1", + "sysctl -w net.bridge.bridge-nf-call-iptables=1", + "sysctl -w net.bridge.bridge-nf-call-ip6tables=1", + } + + for _, cmd := range cmds { + if err := n.sshClient.Command(cmd); err != nil { + return err + } + } + return nil +} diff --git a/cluster-provision/gocli/opts/registryproxy/registryproxy.go b/cluster-provision/gocli/opts/registryproxy/registryproxy.go new file mode 100644 index 0000000000..eb5d9ea203 --- /dev/null +++ b/cluster-provision/gocli/opts/registryproxy/registryproxy.go @@ -0,0 +1,32 @@ +package registryproxy + +import "kubevirt.io/kubevirtci/cluster-provision/gocli/pkg/libssh" + +type RegistryProxyOpt struct { + sshClient libssh.Client + proxyUrl string +} + +func NewRegistryProxyOpt(sshClient libssh.Client, proxyUrl string) *RegistryProxyOpt { + return &RegistryProxyOpt{ + sshClient: sshClient, + proxyUrl: proxyUrl, + } +} + +func (rp *RegistryProxyOpt) Exec() error { + setupUrl := "http://" + rp.proxyUrl + ":3128/setup/systemd" + cmds := []string{ + "curl " + setupUrl + " > proxyscript.sh", + "sed s/docker.service/containerd.service/g proxyscript.sh", + `sed '/Environment/ s/$/ \"NO_PROXY=127.0.0.0\/8,10.0.0.0\/8,172.16.0.0\/12,192.168.0.0\/16\"/ proxyscript.sh`, + "source proxyscript.sh", + } + for _, cmd := range cmds { + if err := rp.sshClient.Command(cmd); err != nil { + return err + } + } + return nil + +} diff --git a/cluster-provision/gocli/opts/remountsysfs/sysfs.go b/cluster-provision/gocli/opts/remountsysfs/sysfs.go new file mode 100644 index 0000000000..fc0a9f3a22 --- /dev/null +++ b/cluster-provision/gocli/opts/remountsysfs/sysfs.go @@ -0,0 +1,28 @@ +package remountsysfs + +import "kubevirt.io/kubevirtci/cluster-provision/gocli/pkg/libssh" + +type RemountSysFSOpt struct { + sshClient libssh.Client +} + +func NewRemountSysFSOpt(sshClient libssh.Client) *RemountSysFSOpt { + return &RemountSysFSOpt{ + sshClient: sshClient, + } +} + +func (r *RemountSysFSOpt) Exec() error { + cmds := []string{ + "mount -o remount,rw /sys", + "ls -la -Z /dev/vfio", + "chmod 0666 /dev/vfio/vfio", + } + + for _, cmd := range cmds { + if err := r.sshClient.Command(cmd); err != nil { + return err + } + } + return nil +} diff --git a/cluster-provision/gocli/opts/setup-registry/setup_registry.go b/cluster-provision/gocli/opts/setup-registry/setup_registry.go new file mode 100644 index 0000000000..89addda131 --- /dev/null +++ b/cluster-provision/gocli/opts/setup-registry/setup_registry.go @@ -0,0 +1,27 @@ +package setupregistry + +import "kubevirt.io/kubevirtci/cluster-provision/gocli/pkg/libssh" + +type SetupRegistryOpt struct { + sshClient libssh.Client + registryIP string +} + +func NewSetupRegistry(sshClient libssh.Client, registryIP string) *SetupRegistryOpt { + return &SetupRegistryOpt{ + sshClient: sshClient, + registryIP: registryIP, + } +} + +func (sr *SetupRegistryOpt) Exec() error { + cmds := []string{ + "echo " + sr.registryIP + "\tregistry | tee -a /etc/hosts", + } + for _, cmd := range cmds { + if err := sr.sshClient.Command(cmd); err != nil { + return err + } + } + return nil +} From 2b30bb26b9a9f02a75c16f7a610b15c034e2e4bb Mon Sep 17 00:00:00 2001 From: aerosouund Date: Sun, 28 Jul 2024 13:20:58 +0300 Subject: [PATCH 08/12] feat: Kind base provider The base provider is a valid provider that can be run standalone for a stock kind cluster, And is also the base for any other type of kind cluster. It is container runtime agnostic and depends on the sigs.k8s.io/kind/pkg/cluster package which is introduced in this commit as a dependency. It uses this package to provision a kind cluster and provides extra functionality by running a registry container and configuring the nodes to reach it. Signed-off-by: aerosouund --- cluster-provision/gocli/go.mod | 7 + cluster-provision/gocli/go.sum | 8 + .../gocli/providers/kind/kind.go | 1 + .../providers/kind/kindbase/kind_base.go | 324 ++++ .../kind/kindbase/manifests/audit.yaml | 4 + .../kindbase/manifests/cpu-manager-patch.yaml | 9 + .../kindbase/manifests/etcd-in-mem-patch.yaml | 8 + .../kind/kindbase/manifests/ip-family.yaml | 2 + .../kind/kindbase/manifests/kind.yaml | 8 + .../kind/kindbase/manifests/vfio.yaml | 2 + .../kind/kindbase/manifests/worker-patch.yaml | 5 + .../github.com/BurntSushi/toml/.gitignore | 2 + .../vendor/github.com/BurntSushi/toml/COPYING | 21 + .../github.com/BurntSushi/toml/README.md | 120 ++ .../github.com/BurntSushi/toml/decode.go | 613 +++++++ .../github.com/BurntSushi/toml/deprecated.go | 29 + .../vendor/github.com/BurntSushi/toml/doc.go | 8 + .../github.com/BurntSushi/toml/encode.go | 778 +++++++++ .../github.com/BurntSushi/toml/error.go | 356 ++++ .../github.com/BurntSushi/toml/internal/tz.go | 36 + .../vendor/github.com/BurntSushi/toml/lex.go | 1281 ++++++++++++++ .../vendor/github.com/BurntSushi/toml/meta.go | 148 ++ .../github.com/BurntSushi/toml/parse.go | 844 ++++++++++ .../github.com/BurntSushi/toml/type_fields.go | 238 +++ .../github.com/BurntSushi/toml/type_toml.go | 65 + .../github.com/evanphx/json-patch/v5/LICENSE | 25 + .../evanphx/json-patch/v5/errors.go | 38 + .../json-patch/v5/internal/json/decode.go | 1385 ++++++++++++++++ .../json-patch/v5/internal/json/encode.go | 1473 +++++++++++++++++ .../json-patch/v5/internal/json/fold.go | 141 ++ .../json-patch/v5/internal/json/fuzz.go | 42 + .../json-patch/v5/internal/json/indent.go | 143 ++ .../json-patch/v5/internal/json/scanner.go | 610 +++++++ .../json-patch/v5/internal/json/stream.go | 515 ++++++ .../json-patch/v5/internal/json/tables.go | 218 +++ .../json-patch/v5/internal/json/tags.go | 38 + .../github.com/evanphx/json-patch/v5/merge.go | 438 +++++ .../github.com/evanphx/json-patch/v5/patch.go | 1261 ++++++++++++++ .../vendor/github.com/google/safetext/LICENSE | 202 +++ .../google/safetext/common/common.go | 260 +++ .../safetext/yamltemplate/yamltemplate.go | 657 ++++++++ .../vendor/github.com/mattn/go-isatty/LICENSE | 9 + .../github.com/mattn/go-isatty/README.md | 50 + .../vendor/github.com/mattn/go-isatty/doc.go | 2 + .../github.com/mattn/go-isatty/go.test.sh | 12 + .../github.com/mattn/go-isatty/isatty_bsd.go | 20 + .../mattn/go-isatty/isatty_others.go | 17 + .../mattn/go-isatty/isatty_plan9.go | 23 + .../mattn/go-isatty/isatty_solaris.go | 21 + .../mattn/go-isatty/isatty_tcgets.go | 20 + .../mattn/go-isatty/isatty_windows.go | 125 ++ .../pelletier/go-toml/.dockerignore | 2 + .../github.com/pelletier/go-toml/.gitignore | 5 + .../pelletier/go-toml/CONTRIBUTING.md | 132 ++ .../github.com/pelletier/go-toml/Dockerfile | 11 + .../github.com/pelletier/go-toml/LICENSE | 247 +++ .../github.com/pelletier/go-toml/Makefile | 29 + .../go-toml/PULL_REQUEST_TEMPLATE.md | 5 + .../github.com/pelletier/go-toml/README.md | 176 ++ .../github.com/pelletier/go-toml/SECURITY.md | 19 + .../pelletier/go-toml/azure-pipelines.yml | 188 +++ .../github.com/pelletier/go-toml/benchmark.sh | 35 + .../github.com/pelletier/go-toml/doc.go | 23 + .../pelletier/go-toml/example-crlf.toml | 30 + .../github.com/pelletier/go-toml/example.toml | 30 + .../github.com/pelletier/go-toml/fuzz.go | 31 + .../github.com/pelletier/go-toml/fuzz.sh | 15 + .../pelletier/go-toml/keysparsing.go | 112 ++ .../github.com/pelletier/go-toml/lexer.go | 1031 ++++++++++++ .../github.com/pelletier/go-toml/localtime.go | 287 ++++ .../github.com/pelletier/go-toml/marshal.go | 1308 +++++++++++++++ .../go-toml/marshal_OrderPreserve_test.toml | 39 + .../pelletier/go-toml/marshal_test.toml | 39 + .../github.com/pelletier/go-toml/parser.go | 507 ++++++ .../github.com/pelletier/go-toml/position.go | 29 + .../github.com/pelletier/go-toml/token.go | 136 ++ .../github.com/pelletier/go-toml/toml.go | 533 ++++++ .../github.com/pelletier/go-toml/tomlpub.go | 71 + .../pelletier/go-toml/tomltree_create.go | 155 ++ .../pelletier/go-toml/tomltree_write.go | 552 ++++++ .../pelletier/go-toml/tomltree_writepub.go | 6 + cluster-provision/gocli/vendor/modules.txt | 63 + .../gocli/vendor/sigs.k8s.io/kind/LICENSE | 201 +++ .../kind/pkg/apis/config/defaults/image.go | 21 + .../kind/pkg/apis/config/v1alpha4/default.go | 90 + .../kind/pkg/apis/config/v1alpha4/doc.go | 21 + .../kind/pkg/apis/config/v1alpha4/types.go | 319 ++++ .../kind/pkg/apis/config/v1alpha4/yaml.go | 74 + .../config/v1alpha4/zz_generated.deepcopy.go | 213 +++ .../kind/pkg/cluster/constants/constants.go | 49 + .../kind/pkg/cluster/createoption.go | 126 ++ .../sigs.k8s.io/kind/pkg/cluster/doc.go | 18 + .../cluster/internal/create/actions/action.go | 90 + .../internal/create/actions/config/config.go | 279 ++++ .../internal/create/actions/installcni/cni.go | 134 ++ .../create/actions/installstorage/storage.go | 99 ++ .../create/actions/kubeadminit/init.go | 159 ++ .../create/actions/kubeadmjoin/join.go | 139 ++ .../actions/loadbalancer/loadbalancer.go | 100 ++ .../actions/waitforready/waitforready.go | 147 ++ .../pkg/cluster/internal/create/create.go | 257 +++ .../pkg/cluster/internal/delete/delete.go | 54 + .../pkg/cluster/internal/kubeadm/config.go | 534 ++++++ .../pkg/cluster/internal/kubeadm/const.go | 24 + .../kind/pkg/cluster/internal/kubeadm/doc.go | 18 + .../kubeconfig/internal/kubeconfig/encode.go | 64 + .../kubeconfig/internal/kubeconfig/helpers.go | 41 + .../kubeconfig/internal/kubeconfig/lock.go | 49 + .../kubeconfig/internal/kubeconfig/merge.go | 111 ++ .../kubeconfig/internal/kubeconfig/paths.go | 167 ++ .../kubeconfig/internal/kubeconfig/read.go | 82 + .../kubeconfig/internal/kubeconfig/remove.go | 111 ++ .../kubeconfig/internal/kubeconfig/types.go | 89 + .../kubeconfig/internal/kubeconfig/write.go | 44 + .../cluster/internal/kubeconfig/kubeconfig.go | 105 ++ .../cluster/internal/loadbalancer/config.go | 85 + .../cluster/internal/loadbalancer/const.go | 23 + .../pkg/cluster/internal/loadbalancer/doc.go | 18 + .../kind/pkg/cluster/internal/logs/doc.go | 18 + .../kind/pkg/cluster/internal/logs/logs.go | 105 ++ .../internal/providers/common/cgroups.go | 85 + .../internal/providers/common/constants.go | 21 + .../cluster/internal/providers/common/doc.go | 18 + .../internal/providers/common/getport.go | 49 + .../internal/providers/common/images.go | 33 + .../cluster/internal/providers/common/logs.go | 59 + .../internal/providers/common/namer.go | 37 + .../internal/providers/common/proxy.go | 64 + .../cluster/internal/providers/docker/OWNERS | 2 + .../internal/providers/docker/constants.go | 24 + .../internal/providers/docker/images.go | 91 + .../internal/providers/docker/network.go | 329 ++++ .../cluster/internal/providers/docker/node.go | 171 ++ .../internal/providers/docker/provider.go | 344 ++++ .../internal/providers/docker/provision.go | 418 +++++ .../cluster/internal/providers/docker/util.go | 100 ++ .../cluster/internal/providers/nerdctl/OWNERS | 2 + .../internal/providers/nerdctl/constants.go | 24 + .../internal/providers/nerdctl/images.go | 91 + .../internal/providers/nerdctl/network.go | 187 +++ .../internal/providers/nerdctl/node.go | 175 ++ .../internal/providers/nerdctl/provider.go | 392 +++++ .../internal/providers/nerdctl/provision.go | 388 +++++ .../internal/providers/nerdctl/util.go | 52 + .../cluster/internal/providers/podman/OWNERS | 13 + .../internal/providers/podman/constants.go | 24 + .../internal/providers/podman/images.go | 115 ++ .../internal/providers/podman/network.go | 146 ++ .../cluster/internal/providers/podman/node.go | 171 ++ .../internal/providers/podman/provider.go | 442 +++++ .../internal/providers/podman/provision.go | 436 +++++ .../cluster/internal/providers/podman/util.go | 169 ++ .../cluster/internal/providers/provider.go | 59 + .../sigs.k8s.io/kind/pkg/cluster/nodes/doc.go | 18 + .../kind/pkg/cluster/nodes/types.go | 40 + .../kind/pkg/cluster/nodeutils/doc.go | 19 + .../kind/pkg/cluster/nodeutils/roles.go | 153 ++ .../kind/pkg/cluster/nodeutils/util.go | 156 ++ .../sigs.k8s.io/kind/pkg/cluster/provider.go | 257 +++ .../vendor/sigs.k8s.io/kind/pkg/cmd/doc.go | 15 + .../sigs.k8s.io/kind/pkg/cmd/iostreams.go | 41 + .../kind/pkg/cmd/kind/version/version.go | 97 ++ .../vendor/sigs.k8s.io/kind/pkg/cmd/logger.go | 47 + .../sigs.k8s.io/kind/pkg/errors/aggregate.go | 49 + .../kind/pkg/errors/aggregate_forked.go | 167 ++ .../sigs.k8s.io/kind/pkg/errors/concurrent.go | 69 + .../vendor/sigs.k8s.io/kind/pkg/errors/doc.go | 18 + .../sigs.k8s.io/kind/pkg/errors/errors.go | 94 ++ .../sigs.k8s.io/kind/pkg/exec/default.go | 36 + .../vendor/sigs.k8s.io/kind/pkg/exec/doc.go | 20 + .../sigs.k8s.io/kind/pkg/exec/helpers.go | 142 ++ .../vendor/sigs.k8s.io/kind/pkg/exec/local.go | 157 ++ .../vendor/sigs.k8s.io/kind/pkg/exec/types.go | 70 + .../vendor/sigs.k8s.io/kind/pkg/fs/fs.go | 156 ++ .../pkg/internal/apis/config/cluster_util.go | 34 + .../internal/apis/config/convert_v1alpha4.go | 104 ++ .../kind/pkg/internal/apis/config/default.go | 104 ++ .../kind/pkg/internal/apis/config/doc.go | 23 + .../internal/apis/config/encoding/convert.go | 29 + .../pkg/internal/apis/config/encoding/doc.go | 18 + .../pkg/internal/apis/config/encoding/load.go | 93 ++ .../kind/pkg/internal/apis/config/types.go | 277 ++++ .../kind/pkg/internal/apis/config/validate.go | 281 ++++ .../apis/config/zz_generated.deepcopy.go | 196 +++ .../sigs.k8s.io/kind/pkg/internal/env/term.go | 107 ++ .../kind/pkg/internal/patch/doc.go | 18 + .../kind/pkg/internal/patch/json6902patch.go | 53 + .../kind/pkg/internal/patch/kubeyaml.go | 79 + .../kind/pkg/internal/patch/matchinfo.go | 53 + .../kind/pkg/internal/patch/mergepatch.go | 58 + .../kind/pkg/internal/patch/resource.go | 148 ++ .../kind/pkg/internal/patch/toml.go | 100 ++ .../sigs.k8s.io/kind/pkg/internal/sets/doc.go | 25 + .../kind/pkg/internal/sets/empty.go | 23 + .../kind/pkg/internal/sets/string.go | 205 +++ .../kind/pkg/internal/version/doc.go | 22 + .../kind/pkg/internal/version/version.go | 325 ++++ .../vendor/sigs.k8s.io/kind/pkg/log/doc.go | 19 + .../vendor/sigs.k8s.io/kind/pkg/log/noop.go | 47 + .../vendor/sigs.k8s.io/kind/pkg/log/types.go | 66 + 200 files changed, 31845 insertions(+) create mode 100644 cluster-provision/gocli/providers/kind/kind.go create mode 100644 cluster-provision/gocli/providers/kind/kindbase/kind_base.go create mode 100644 cluster-provision/gocli/providers/kind/kindbase/manifests/audit.yaml create mode 100644 cluster-provision/gocli/providers/kind/kindbase/manifests/cpu-manager-patch.yaml create mode 100644 cluster-provision/gocli/providers/kind/kindbase/manifests/etcd-in-mem-patch.yaml create mode 100644 cluster-provision/gocli/providers/kind/kindbase/manifests/ip-family.yaml create mode 100644 cluster-provision/gocli/providers/kind/kindbase/manifests/kind.yaml create mode 100644 cluster-provision/gocli/providers/kind/kindbase/manifests/vfio.yaml create mode 100644 cluster-provision/gocli/providers/kind/kindbase/manifests/worker-patch.yaml create mode 100644 cluster-provision/gocli/vendor/github.com/BurntSushi/toml/.gitignore create mode 100644 cluster-provision/gocli/vendor/github.com/BurntSushi/toml/COPYING create mode 100644 cluster-provision/gocli/vendor/github.com/BurntSushi/toml/README.md create mode 100644 cluster-provision/gocli/vendor/github.com/BurntSushi/toml/decode.go create mode 100644 cluster-provision/gocli/vendor/github.com/BurntSushi/toml/deprecated.go create mode 100644 cluster-provision/gocli/vendor/github.com/BurntSushi/toml/doc.go create mode 100644 cluster-provision/gocli/vendor/github.com/BurntSushi/toml/encode.go create mode 100644 cluster-provision/gocli/vendor/github.com/BurntSushi/toml/error.go create mode 100644 cluster-provision/gocli/vendor/github.com/BurntSushi/toml/internal/tz.go create mode 100644 cluster-provision/gocli/vendor/github.com/BurntSushi/toml/lex.go create mode 100644 cluster-provision/gocli/vendor/github.com/BurntSushi/toml/meta.go create mode 100644 cluster-provision/gocli/vendor/github.com/BurntSushi/toml/parse.go create mode 100644 cluster-provision/gocli/vendor/github.com/BurntSushi/toml/type_fields.go create mode 100644 cluster-provision/gocli/vendor/github.com/BurntSushi/toml/type_toml.go create mode 100644 cluster-provision/gocli/vendor/github.com/evanphx/json-patch/v5/LICENSE create mode 100644 cluster-provision/gocli/vendor/github.com/evanphx/json-patch/v5/errors.go create mode 100644 cluster-provision/gocli/vendor/github.com/evanphx/json-patch/v5/internal/json/decode.go create mode 100644 cluster-provision/gocli/vendor/github.com/evanphx/json-patch/v5/internal/json/encode.go create mode 100644 cluster-provision/gocli/vendor/github.com/evanphx/json-patch/v5/internal/json/fold.go create mode 100644 cluster-provision/gocli/vendor/github.com/evanphx/json-patch/v5/internal/json/fuzz.go create mode 100644 cluster-provision/gocli/vendor/github.com/evanphx/json-patch/v5/internal/json/indent.go create mode 100644 cluster-provision/gocli/vendor/github.com/evanphx/json-patch/v5/internal/json/scanner.go create mode 100644 cluster-provision/gocli/vendor/github.com/evanphx/json-patch/v5/internal/json/stream.go create mode 100644 cluster-provision/gocli/vendor/github.com/evanphx/json-patch/v5/internal/json/tables.go create mode 100644 cluster-provision/gocli/vendor/github.com/evanphx/json-patch/v5/internal/json/tags.go create mode 100644 cluster-provision/gocli/vendor/github.com/evanphx/json-patch/v5/merge.go create mode 100644 cluster-provision/gocli/vendor/github.com/evanphx/json-patch/v5/patch.go create mode 100644 cluster-provision/gocli/vendor/github.com/google/safetext/LICENSE create mode 100644 cluster-provision/gocli/vendor/github.com/google/safetext/common/common.go create mode 100644 cluster-provision/gocli/vendor/github.com/google/safetext/yamltemplate/yamltemplate.go create mode 100644 cluster-provision/gocli/vendor/github.com/mattn/go-isatty/LICENSE create mode 100644 cluster-provision/gocli/vendor/github.com/mattn/go-isatty/README.md create mode 100644 cluster-provision/gocli/vendor/github.com/mattn/go-isatty/doc.go create mode 100644 cluster-provision/gocli/vendor/github.com/mattn/go-isatty/go.test.sh create mode 100644 cluster-provision/gocli/vendor/github.com/mattn/go-isatty/isatty_bsd.go create mode 100644 cluster-provision/gocli/vendor/github.com/mattn/go-isatty/isatty_others.go create mode 100644 cluster-provision/gocli/vendor/github.com/mattn/go-isatty/isatty_plan9.go create mode 100644 cluster-provision/gocli/vendor/github.com/mattn/go-isatty/isatty_solaris.go create mode 100644 cluster-provision/gocli/vendor/github.com/mattn/go-isatty/isatty_tcgets.go create mode 100644 cluster-provision/gocli/vendor/github.com/mattn/go-isatty/isatty_windows.go create mode 100644 cluster-provision/gocli/vendor/github.com/pelletier/go-toml/.dockerignore create mode 100644 cluster-provision/gocli/vendor/github.com/pelletier/go-toml/.gitignore create mode 100644 cluster-provision/gocli/vendor/github.com/pelletier/go-toml/CONTRIBUTING.md create mode 100644 cluster-provision/gocli/vendor/github.com/pelletier/go-toml/Dockerfile create mode 100644 cluster-provision/gocli/vendor/github.com/pelletier/go-toml/LICENSE create mode 100644 cluster-provision/gocli/vendor/github.com/pelletier/go-toml/Makefile create mode 100644 cluster-provision/gocli/vendor/github.com/pelletier/go-toml/PULL_REQUEST_TEMPLATE.md create mode 100644 cluster-provision/gocli/vendor/github.com/pelletier/go-toml/README.md create mode 100644 cluster-provision/gocli/vendor/github.com/pelletier/go-toml/SECURITY.md create mode 100644 cluster-provision/gocli/vendor/github.com/pelletier/go-toml/azure-pipelines.yml create mode 100644 cluster-provision/gocli/vendor/github.com/pelletier/go-toml/benchmark.sh create mode 100644 cluster-provision/gocli/vendor/github.com/pelletier/go-toml/doc.go create mode 100644 cluster-provision/gocli/vendor/github.com/pelletier/go-toml/example-crlf.toml create mode 100644 cluster-provision/gocli/vendor/github.com/pelletier/go-toml/example.toml create mode 100644 cluster-provision/gocli/vendor/github.com/pelletier/go-toml/fuzz.go create mode 100644 cluster-provision/gocli/vendor/github.com/pelletier/go-toml/fuzz.sh create mode 100644 cluster-provision/gocli/vendor/github.com/pelletier/go-toml/keysparsing.go create mode 100644 cluster-provision/gocli/vendor/github.com/pelletier/go-toml/lexer.go create mode 100644 cluster-provision/gocli/vendor/github.com/pelletier/go-toml/localtime.go create mode 100644 cluster-provision/gocli/vendor/github.com/pelletier/go-toml/marshal.go create mode 100644 cluster-provision/gocli/vendor/github.com/pelletier/go-toml/marshal_OrderPreserve_test.toml create mode 100644 cluster-provision/gocli/vendor/github.com/pelletier/go-toml/marshal_test.toml create mode 100644 cluster-provision/gocli/vendor/github.com/pelletier/go-toml/parser.go create mode 100644 cluster-provision/gocli/vendor/github.com/pelletier/go-toml/position.go create mode 100644 cluster-provision/gocli/vendor/github.com/pelletier/go-toml/token.go create mode 100644 cluster-provision/gocli/vendor/github.com/pelletier/go-toml/toml.go create mode 100644 cluster-provision/gocli/vendor/github.com/pelletier/go-toml/tomlpub.go create mode 100644 cluster-provision/gocli/vendor/github.com/pelletier/go-toml/tomltree_create.go create mode 100644 cluster-provision/gocli/vendor/github.com/pelletier/go-toml/tomltree_write.go create mode 100644 cluster-provision/gocli/vendor/github.com/pelletier/go-toml/tomltree_writepub.go create mode 100644 cluster-provision/gocli/vendor/sigs.k8s.io/kind/LICENSE create mode 100644 cluster-provision/gocli/vendor/sigs.k8s.io/kind/pkg/apis/config/defaults/image.go create mode 100644 cluster-provision/gocli/vendor/sigs.k8s.io/kind/pkg/apis/config/v1alpha4/default.go create mode 100644 cluster-provision/gocli/vendor/sigs.k8s.io/kind/pkg/apis/config/v1alpha4/doc.go create mode 100644 cluster-provision/gocli/vendor/sigs.k8s.io/kind/pkg/apis/config/v1alpha4/types.go create mode 100644 cluster-provision/gocli/vendor/sigs.k8s.io/kind/pkg/apis/config/v1alpha4/yaml.go create mode 100644 cluster-provision/gocli/vendor/sigs.k8s.io/kind/pkg/apis/config/v1alpha4/zz_generated.deepcopy.go create mode 100644 cluster-provision/gocli/vendor/sigs.k8s.io/kind/pkg/cluster/constants/constants.go create mode 100644 cluster-provision/gocli/vendor/sigs.k8s.io/kind/pkg/cluster/createoption.go create mode 100644 cluster-provision/gocli/vendor/sigs.k8s.io/kind/pkg/cluster/doc.go create mode 100644 cluster-provision/gocli/vendor/sigs.k8s.io/kind/pkg/cluster/internal/create/actions/action.go create mode 100644 cluster-provision/gocli/vendor/sigs.k8s.io/kind/pkg/cluster/internal/create/actions/config/config.go create mode 100644 cluster-provision/gocli/vendor/sigs.k8s.io/kind/pkg/cluster/internal/create/actions/installcni/cni.go create mode 100644 cluster-provision/gocli/vendor/sigs.k8s.io/kind/pkg/cluster/internal/create/actions/installstorage/storage.go create mode 100644 cluster-provision/gocli/vendor/sigs.k8s.io/kind/pkg/cluster/internal/create/actions/kubeadminit/init.go create mode 100644 cluster-provision/gocli/vendor/sigs.k8s.io/kind/pkg/cluster/internal/create/actions/kubeadmjoin/join.go create mode 100644 cluster-provision/gocli/vendor/sigs.k8s.io/kind/pkg/cluster/internal/create/actions/loadbalancer/loadbalancer.go create mode 100644 cluster-provision/gocli/vendor/sigs.k8s.io/kind/pkg/cluster/internal/create/actions/waitforready/waitforready.go create mode 100644 cluster-provision/gocli/vendor/sigs.k8s.io/kind/pkg/cluster/internal/create/create.go create mode 100644 cluster-provision/gocli/vendor/sigs.k8s.io/kind/pkg/cluster/internal/delete/delete.go create mode 100644 cluster-provision/gocli/vendor/sigs.k8s.io/kind/pkg/cluster/internal/kubeadm/config.go create mode 100644 cluster-provision/gocli/vendor/sigs.k8s.io/kind/pkg/cluster/internal/kubeadm/const.go create mode 100644 cluster-provision/gocli/vendor/sigs.k8s.io/kind/pkg/cluster/internal/kubeadm/doc.go create mode 100644 cluster-provision/gocli/vendor/sigs.k8s.io/kind/pkg/cluster/internal/kubeconfig/internal/kubeconfig/encode.go create mode 100644 cluster-provision/gocli/vendor/sigs.k8s.io/kind/pkg/cluster/internal/kubeconfig/internal/kubeconfig/helpers.go create mode 100644 cluster-provision/gocli/vendor/sigs.k8s.io/kind/pkg/cluster/internal/kubeconfig/internal/kubeconfig/lock.go create mode 100644 cluster-provision/gocli/vendor/sigs.k8s.io/kind/pkg/cluster/internal/kubeconfig/internal/kubeconfig/merge.go create mode 100644 cluster-provision/gocli/vendor/sigs.k8s.io/kind/pkg/cluster/internal/kubeconfig/internal/kubeconfig/paths.go create mode 100644 cluster-provision/gocli/vendor/sigs.k8s.io/kind/pkg/cluster/internal/kubeconfig/internal/kubeconfig/read.go create mode 100644 cluster-provision/gocli/vendor/sigs.k8s.io/kind/pkg/cluster/internal/kubeconfig/internal/kubeconfig/remove.go create mode 100644 cluster-provision/gocli/vendor/sigs.k8s.io/kind/pkg/cluster/internal/kubeconfig/internal/kubeconfig/types.go create mode 100644 cluster-provision/gocli/vendor/sigs.k8s.io/kind/pkg/cluster/internal/kubeconfig/internal/kubeconfig/write.go create mode 100644 cluster-provision/gocli/vendor/sigs.k8s.io/kind/pkg/cluster/internal/kubeconfig/kubeconfig.go create mode 100644 cluster-provision/gocli/vendor/sigs.k8s.io/kind/pkg/cluster/internal/loadbalancer/config.go create mode 100644 cluster-provision/gocli/vendor/sigs.k8s.io/kind/pkg/cluster/internal/loadbalancer/const.go create mode 100644 cluster-provision/gocli/vendor/sigs.k8s.io/kind/pkg/cluster/internal/loadbalancer/doc.go create mode 100644 cluster-provision/gocli/vendor/sigs.k8s.io/kind/pkg/cluster/internal/logs/doc.go create mode 100644 cluster-provision/gocli/vendor/sigs.k8s.io/kind/pkg/cluster/internal/logs/logs.go create mode 100644 cluster-provision/gocli/vendor/sigs.k8s.io/kind/pkg/cluster/internal/providers/common/cgroups.go create mode 100644 cluster-provision/gocli/vendor/sigs.k8s.io/kind/pkg/cluster/internal/providers/common/constants.go create mode 100644 cluster-provision/gocli/vendor/sigs.k8s.io/kind/pkg/cluster/internal/providers/common/doc.go create mode 100644 cluster-provision/gocli/vendor/sigs.k8s.io/kind/pkg/cluster/internal/providers/common/getport.go create mode 100644 cluster-provision/gocli/vendor/sigs.k8s.io/kind/pkg/cluster/internal/providers/common/images.go create mode 100644 cluster-provision/gocli/vendor/sigs.k8s.io/kind/pkg/cluster/internal/providers/common/logs.go create mode 100644 cluster-provision/gocli/vendor/sigs.k8s.io/kind/pkg/cluster/internal/providers/common/namer.go create mode 100644 cluster-provision/gocli/vendor/sigs.k8s.io/kind/pkg/cluster/internal/providers/common/proxy.go create mode 100644 cluster-provision/gocli/vendor/sigs.k8s.io/kind/pkg/cluster/internal/providers/docker/OWNERS create mode 100644 cluster-provision/gocli/vendor/sigs.k8s.io/kind/pkg/cluster/internal/providers/docker/constants.go create mode 100644 cluster-provision/gocli/vendor/sigs.k8s.io/kind/pkg/cluster/internal/providers/docker/images.go create mode 100644 cluster-provision/gocli/vendor/sigs.k8s.io/kind/pkg/cluster/internal/providers/docker/network.go create mode 100644 cluster-provision/gocli/vendor/sigs.k8s.io/kind/pkg/cluster/internal/providers/docker/node.go create mode 100644 cluster-provision/gocli/vendor/sigs.k8s.io/kind/pkg/cluster/internal/providers/docker/provider.go create mode 100644 cluster-provision/gocli/vendor/sigs.k8s.io/kind/pkg/cluster/internal/providers/docker/provision.go create mode 100644 cluster-provision/gocli/vendor/sigs.k8s.io/kind/pkg/cluster/internal/providers/docker/util.go create mode 100644 cluster-provision/gocli/vendor/sigs.k8s.io/kind/pkg/cluster/internal/providers/nerdctl/OWNERS create mode 100644 cluster-provision/gocli/vendor/sigs.k8s.io/kind/pkg/cluster/internal/providers/nerdctl/constants.go create mode 100644 cluster-provision/gocli/vendor/sigs.k8s.io/kind/pkg/cluster/internal/providers/nerdctl/images.go create mode 100644 cluster-provision/gocli/vendor/sigs.k8s.io/kind/pkg/cluster/internal/providers/nerdctl/network.go create mode 100644 cluster-provision/gocli/vendor/sigs.k8s.io/kind/pkg/cluster/internal/providers/nerdctl/node.go create mode 100644 cluster-provision/gocli/vendor/sigs.k8s.io/kind/pkg/cluster/internal/providers/nerdctl/provider.go create mode 100644 cluster-provision/gocli/vendor/sigs.k8s.io/kind/pkg/cluster/internal/providers/nerdctl/provision.go create mode 100644 cluster-provision/gocli/vendor/sigs.k8s.io/kind/pkg/cluster/internal/providers/nerdctl/util.go create mode 100644 cluster-provision/gocli/vendor/sigs.k8s.io/kind/pkg/cluster/internal/providers/podman/OWNERS create mode 100644 cluster-provision/gocli/vendor/sigs.k8s.io/kind/pkg/cluster/internal/providers/podman/constants.go create mode 100644 cluster-provision/gocli/vendor/sigs.k8s.io/kind/pkg/cluster/internal/providers/podman/images.go create mode 100644 cluster-provision/gocli/vendor/sigs.k8s.io/kind/pkg/cluster/internal/providers/podman/network.go create mode 100644 cluster-provision/gocli/vendor/sigs.k8s.io/kind/pkg/cluster/internal/providers/podman/node.go create mode 100644 cluster-provision/gocli/vendor/sigs.k8s.io/kind/pkg/cluster/internal/providers/podman/provider.go create mode 100644 cluster-provision/gocli/vendor/sigs.k8s.io/kind/pkg/cluster/internal/providers/podman/provision.go create mode 100644 cluster-provision/gocli/vendor/sigs.k8s.io/kind/pkg/cluster/internal/providers/podman/util.go create mode 100644 cluster-provision/gocli/vendor/sigs.k8s.io/kind/pkg/cluster/internal/providers/provider.go create mode 100644 cluster-provision/gocli/vendor/sigs.k8s.io/kind/pkg/cluster/nodes/doc.go create mode 100644 cluster-provision/gocli/vendor/sigs.k8s.io/kind/pkg/cluster/nodes/types.go create mode 100644 cluster-provision/gocli/vendor/sigs.k8s.io/kind/pkg/cluster/nodeutils/doc.go create mode 100644 cluster-provision/gocli/vendor/sigs.k8s.io/kind/pkg/cluster/nodeutils/roles.go create mode 100644 cluster-provision/gocli/vendor/sigs.k8s.io/kind/pkg/cluster/nodeutils/util.go create mode 100644 cluster-provision/gocli/vendor/sigs.k8s.io/kind/pkg/cluster/provider.go create mode 100644 cluster-provision/gocli/vendor/sigs.k8s.io/kind/pkg/cmd/doc.go create mode 100644 cluster-provision/gocli/vendor/sigs.k8s.io/kind/pkg/cmd/iostreams.go create mode 100644 cluster-provision/gocli/vendor/sigs.k8s.io/kind/pkg/cmd/kind/version/version.go create mode 100644 cluster-provision/gocli/vendor/sigs.k8s.io/kind/pkg/cmd/logger.go create mode 100644 cluster-provision/gocli/vendor/sigs.k8s.io/kind/pkg/errors/aggregate.go create mode 100644 cluster-provision/gocli/vendor/sigs.k8s.io/kind/pkg/errors/aggregate_forked.go create mode 100644 cluster-provision/gocli/vendor/sigs.k8s.io/kind/pkg/errors/concurrent.go create mode 100644 cluster-provision/gocli/vendor/sigs.k8s.io/kind/pkg/errors/doc.go create mode 100644 cluster-provision/gocli/vendor/sigs.k8s.io/kind/pkg/errors/errors.go create mode 100644 cluster-provision/gocli/vendor/sigs.k8s.io/kind/pkg/exec/default.go create mode 100644 cluster-provision/gocli/vendor/sigs.k8s.io/kind/pkg/exec/doc.go create mode 100644 cluster-provision/gocli/vendor/sigs.k8s.io/kind/pkg/exec/helpers.go create mode 100644 cluster-provision/gocli/vendor/sigs.k8s.io/kind/pkg/exec/local.go create mode 100644 cluster-provision/gocli/vendor/sigs.k8s.io/kind/pkg/exec/types.go create mode 100644 cluster-provision/gocli/vendor/sigs.k8s.io/kind/pkg/fs/fs.go create mode 100644 cluster-provision/gocli/vendor/sigs.k8s.io/kind/pkg/internal/apis/config/cluster_util.go create mode 100644 cluster-provision/gocli/vendor/sigs.k8s.io/kind/pkg/internal/apis/config/convert_v1alpha4.go create mode 100644 cluster-provision/gocli/vendor/sigs.k8s.io/kind/pkg/internal/apis/config/default.go create mode 100644 cluster-provision/gocli/vendor/sigs.k8s.io/kind/pkg/internal/apis/config/doc.go create mode 100644 cluster-provision/gocli/vendor/sigs.k8s.io/kind/pkg/internal/apis/config/encoding/convert.go create mode 100644 cluster-provision/gocli/vendor/sigs.k8s.io/kind/pkg/internal/apis/config/encoding/doc.go create mode 100644 cluster-provision/gocli/vendor/sigs.k8s.io/kind/pkg/internal/apis/config/encoding/load.go create mode 100644 cluster-provision/gocli/vendor/sigs.k8s.io/kind/pkg/internal/apis/config/types.go create mode 100644 cluster-provision/gocli/vendor/sigs.k8s.io/kind/pkg/internal/apis/config/validate.go create mode 100644 cluster-provision/gocli/vendor/sigs.k8s.io/kind/pkg/internal/apis/config/zz_generated.deepcopy.go create mode 100644 cluster-provision/gocli/vendor/sigs.k8s.io/kind/pkg/internal/env/term.go create mode 100644 cluster-provision/gocli/vendor/sigs.k8s.io/kind/pkg/internal/patch/doc.go create mode 100644 cluster-provision/gocli/vendor/sigs.k8s.io/kind/pkg/internal/patch/json6902patch.go create mode 100644 cluster-provision/gocli/vendor/sigs.k8s.io/kind/pkg/internal/patch/kubeyaml.go create mode 100644 cluster-provision/gocli/vendor/sigs.k8s.io/kind/pkg/internal/patch/matchinfo.go create mode 100644 cluster-provision/gocli/vendor/sigs.k8s.io/kind/pkg/internal/patch/mergepatch.go create mode 100644 cluster-provision/gocli/vendor/sigs.k8s.io/kind/pkg/internal/patch/resource.go create mode 100644 cluster-provision/gocli/vendor/sigs.k8s.io/kind/pkg/internal/patch/toml.go create mode 100644 cluster-provision/gocli/vendor/sigs.k8s.io/kind/pkg/internal/sets/doc.go create mode 100644 cluster-provision/gocli/vendor/sigs.k8s.io/kind/pkg/internal/sets/empty.go create mode 100644 cluster-provision/gocli/vendor/sigs.k8s.io/kind/pkg/internal/sets/string.go create mode 100644 cluster-provision/gocli/vendor/sigs.k8s.io/kind/pkg/internal/version/doc.go create mode 100644 cluster-provision/gocli/vendor/sigs.k8s.io/kind/pkg/internal/version/version.go create mode 100644 cluster-provision/gocli/vendor/sigs.k8s.io/kind/pkg/log/doc.go create mode 100644 cluster-provision/gocli/vendor/sigs.k8s.io/kind/pkg/log/noop.go create mode 100644 cluster-provision/gocli/vendor/sigs.k8s.io/kind/pkg/log/types.go diff --git a/cluster-provision/gocli/go.mod b/cluster-provision/gocli/go.mod index 8c735b81e6..fb17de8dc4 100644 --- a/cluster-provision/gocli/go.mod +++ b/cluster-provision/gocli/go.mod @@ -17,6 +17,7 @@ require ( github.com/spf13/afero v1.11.0 github.com/spf13/cobra v1.8.0 github.com/spf13/pflag v1.0.5 + github.com/stretchr/testify v1.9.0 go.uber.org/mock v0.4.0 golang.org/x/crypto v0.24.0 golang.org/x/net v0.26.0 @@ -29,6 +30,7 @@ require ( k8s.io/pod-security-admission v0.22.0-beta.0.0.20240531013614-68e02f3c6735 kubevirt.io/application-aware-quota-api v1.2.3 kubevirt.io/containerized-data-importer-api v1.59.1-0.20240610172909-253d5a6e7f95 + sigs.k8s.io/kind v0.23.0 sigs.k8s.io/yaml v1.4.0 ) @@ -44,6 +46,7 @@ require ( github.com/distribution/reference v0.6.0 // indirect github.com/emicklei/go-restful/v3 v3.12.1 // indirect github.com/evanphx/json-patch v5.9.0+incompatible // indirect + github.com/evanphx/json-patch/v5 v5.8.0 // indirect github.com/felixge/httpsnoop v1.0.3 // indirect github.com/go-jose/go-jose/v4 v4.0.1 // indirect github.com/go-logr/stdr v1.2.2 // indirect @@ -55,6 +58,7 @@ require ( github.com/golang/protobuf v1.5.4 // indirect github.com/google/gnostic-models v0.6.8 // indirect github.com/google/gofuzz v1.2.0 // indirect + github.com/google/safetext v0.0.0-20220905092116-b49f7bc46da2 // indirect github.com/google/uuid v1.6.0 // indirect github.com/hashicorp/errwrap v1.1.0 // indirect github.com/hashicorp/go-cleanhttp v0.5.2 // indirect @@ -75,6 +79,7 @@ require ( github.com/kube-object-storage/lib-bucket-provisioner v0.0.0-20221122204822-d1a8c34382f1 // indirect github.com/libopenstorage/secrets v0.0.0-20240416031220-a17cf7f72c6c // indirect github.com/mailru/easyjson v0.7.7 // indirect + github.com/mattn/go-isatty v0.0.20 // indirect github.com/mitchellh/go-homedir v1.1.0 // indirect github.com/mitchellh/mapstructure v1.5.0 // indirect github.com/moby/docker-image-spec v1.3.1 // indirect @@ -88,6 +93,8 @@ require ( github.com/nxadm/tail v1.4.11 // indirect github.com/openshift/api v0.0.0 // indirect github.com/openshift/custom-resource-status v1.1.2 // indirect + github.com/pelletier/go-toml v1.9.5 // indirect + github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect github.com/ryanuber/go-glob v1.0.0 // indirect go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.45.0 // indirect go.opentelemetry.io/otel v1.20.0 // indirect diff --git a/cluster-provision/gocli/go.sum b/cluster-provision/gocli/go.sum index b2fbed04a0..33d7fe9f49 100644 --- a/cluster-provision/gocli/go.sum +++ b/cluster-provision/gocli/go.sum @@ -810,6 +810,8 @@ github.com/evanphx/json-patch v4.11.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQL github.com/evanphx/json-patch v4.12.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/evanphx/json-patch v5.9.0+incompatible h1:fBXyNpNMuTTDdquAq/uisOr2lShz4oaXpDTX2bLe7ls= github.com/evanphx/json-patch v5.9.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= +github.com/evanphx/json-patch/v5 v5.8.0 h1:lRj6N9Nci7MvzrXuX6HFzU8XjmhPiXPlsKEy1u0KQro= +github.com/evanphx/json-patch/v5 v5.8.0/go.mod h1:VNkHZ/282BpEyt/tObQO8s5CMPmYYq14uClGH4abBuQ= github.com/exponent-io/jsonpath v0.0.0-20151013193312-d6023ce2651d/go.mod h1:ZZMPRZwes7CROmyNKgQzC3XPs6L/G2EJLHddWejkmf4= github.com/fatih/camelcase v1.0.0/go.mod h1:yN2Sb0lFhZJUdVvtELVWefmrXpuZESvPmqwoZc+/fpc= github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= @@ -1035,6 +1037,8 @@ github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm4 github.com/google/s2a-go v0.1.0/go.mod h1:OJpEgntRZo8ugHpF9hkoLJbS5dSI20XZeXJ9JVywLlM= github.com/google/s2a-go v0.1.3/go.mod h1:Ej+mSEMGRnqRzjc7VtF+jdBwYG5fuJfiZ8ELkjEwM0A= github.com/google/s2a-go v0.1.4/go.mod h1:Ej+mSEMGRnqRzjc7VtF+jdBwYG5fuJfiZ8ELkjEwM0A= +github.com/google/safetext v0.0.0-20220905092116-b49f7bc46da2 h1:SJ+NtwL6QaZ21U+IrK7d0gGgpjGGvd2kz+FzTHVzdqI= +github.com/google/safetext v0.0.0-20220905092116-b49f7bc46da2/go.mod h1:Tv1PlzqC9t8wNnpPdctvtSUOPUUg4SHeE6vR1Ir2hmg= github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510/go.mod h1:pupxD2MaaD3pAXIBCelhxNneeOaAeabZDe5s4K6zSpQ= github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= @@ -1397,6 +1401,8 @@ github.com/pborman/uuid v0.0.0-20170612153648-e790cca94e6c/go.mod h1:VyrYX9gd7ir github.com/pborman/uuid v1.2.0 h1:J7Q5mO4ysT1dv8hyrUGHb9+ooztCXu1D8MY8DZYsu3g= github.com/pborman/uuid v1.2.0/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k= github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= +github.com/pelletier/go-toml v1.9.5 h1:4yBQzkHv+7BHq2PQUZF3Mx0IYxG7LsP222s7Agd3ve8= +github.com/pelletier/go-toml v1.9.5/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c= github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= github.com/phpdave11/gofpdf v1.4.2/go.mod h1:zpO6xFn9yxo3YLyMvW8HcKWVdbNqgIfOOp2dXMnm1mY= github.com/phpdave11/gofpdi v1.0.12/go.mod h1:vBmVV0Do6hSBHC8uKUQ71JGW+ZGQq74llk/7bXwjDoI= @@ -2733,6 +2739,8 @@ sigs.k8s.io/json v0.0.0-20211020170558-c049b76a60c6/go.mod h1:p4QtZmO4uMYipTQNza sigs.k8s.io/json v0.0.0-20220713155537-f223a00ba0e2/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0= sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo= sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0= +sigs.k8s.io/kind v0.23.0 h1:8fyDGWbWTeCcCTwA04v4Nfr45KKxbSPH1WO9K+jVrBg= +sigs.k8s.io/kind v0.23.0/go.mod h1:ZQ1iZuJLh3T+O8fzhdi3VWcFTzsdXtNv2ppsHc8JQ7s= sigs.k8s.io/kustomize/api v0.13.4/go.mod h1:Bkaavz5RKK6ZzP0zgPrB7QbpbBJKiHuD3BB0KujY7Ls= sigs.k8s.io/kustomize/api v0.13.5-0.20230601165947-6ce0bf390ce3/go.mod h1:9n16EZKMhXBNSiUC5kSdFQJkdH3zbxS/JoO619G1VAY= sigs.k8s.io/kustomize/cmd/config v0.11.2/go.mod h1:PCpHxyu10daTnbMfn3xhH1vppn7L8jsS3qpRKXb7Lkc= diff --git a/cluster-provision/gocli/providers/kind/kind.go b/cluster-provision/gocli/providers/kind/kind.go new file mode 100644 index 0000000000..eb1ae7caf3 --- /dev/null +++ b/cluster-provision/gocli/providers/kind/kind.go @@ -0,0 +1 @@ +package kind diff --git a/cluster-provision/gocli/providers/kind/kindbase/kind_base.go b/cluster-provision/gocli/providers/kind/kindbase/kind_base.go new file mode 100644 index 0000000000..faad72fffe --- /dev/null +++ b/cluster-provision/gocli/providers/kind/kindbase/kind_base.go @@ -0,0 +1,324 @@ +package kindbase + +import ( + "context" + "embed" + "encoding/json" + "fmt" + "io" + "net/http" + "os" + "runtime" + "strings" + + "github.com/docker/docker/client" + "k8s.io/client-go/rest" + "sigs.k8s.io/yaml" + + "github.com/sirupsen/logrus" + "kubevirt.io/kubevirtci/cluster-provision/gocli/cri" + dockercri "kubevirt.io/kubevirtci/cluster-provision/gocli/cri/docker" + podmancri "kubevirt.io/kubevirtci/cluster-provision/gocli/cri/podman" + "kubevirt.io/kubevirtci/cluster-provision/gocli/docker" + "kubevirt.io/kubevirtci/cluster-provision/gocli/opts/network" + "kubevirt.io/kubevirtci/cluster-provision/gocli/opts/registryproxy" + setupregistry "kubevirt.io/kubevirtci/cluster-provision/gocli/opts/setup-registry" + k8s "kubevirt.io/kubevirtci/cluster-provision/gocli/pkg/k8s" + "kubevirt.io/kubevirtci/cluster-provision/gocli/pkg/libssh" + kind "sigs.k8s.io/kind/pkg/cluster" +) + +//go:embed manifests/* +var f embed.FS + +type KindProvider interface { + Start(ctx context.Context, cancel context.CancelFunc) error + Delete() error +} + +type KindBaseProvider struct { + Client k8s.K8sDynamicClient + CRI cri.ContainerClient + Provider *kind.Provider + Image string + Cluster string + + *KindConfig +} +type KindConfig struct { + Nodes int + RegistryPort string + Version string + RunEtcdOnMemory bool + IpFamily string + WithCPUManager bool + RegistryProxy string + WithExtraMounts bool + WithVfio bool +} + +const ( + kind128Image = "kindest/node:v1.28.0@sha256:b7a4cad12c197af3ba43202d3efe03246b3f0793f162afb40a33c923952d5b31" + cniArchieFilename = "cni-archive.tar.gz" + registryImage = "quay.io/kubevirtci/library-registry:2.7.1" +) + +func NewKindBaseProvider(kindConfig *KindConfig) (*KindBaseProvider, error) { + var ( + cri cri.ContainerClient + k *kind.Provider + ) + + runtime, err := DetectContainerRuntime() + if err != nil { + return nil, err + } + + switch runtime { + case "docker": + logrus.Info("Using Docker as container runtime") + cri = dockercri.NewDockerClient() + k = kind.NewProvider(kind.ProviderWithDocker()) + case "podman": + logrus.Info("Using Podman as container runtime") + cri = podmancri.NewPodman() + k = kind.NewProvider(kind.ProviderWithPodman()) + } + + kp := &KindBaseProvider{ + Image: kind128Image, + CRI: cri, + Provider: k, + KindConfig: kindConfig, + } + cluster, err := kp.PrepareClusterYaml(kindConfig.WithExtraMounts, kindConfig.WithVfio) + if err != nil { + return nil, err + } + + kp.Cluster = cluster + return kp, nil +} + +func DetectContainerRuntime() (string, error) { + if podmancri.IsAvailable() { + return "podman", nil + } + if dockercri.IsAvailable() { + return "docker", nil + } + return "", fmt.Errorf("No valid container runtime found") +} + +func (k *KindBaseProvider) Start(ctx context.Context, cancel context.CancelFunc) error { + err := k.Provider.Create(k.Version, kind.CreateWithRawConfig([]byte(k.Cluster)), kind.CreateWithNodeImage(k.Image)) + if err != nil { + return err + } + logrus.Infof("Kind %s base cluster started\n", k.Version) + + kubeconf, err := k.Provider.KubeConfig(k.Version, true) + if err != nil { + return err + } + + jsonData, err := yaml.YAMLToJSON([]byte(kubeconf)) + if err != nil { + return err + } + config := &rest.Config{} + err = json.Unmarshal(jsonData, config) + if err != nil { + return err + } + + k8sClient, err := k8s.NewDynamicClient(config) + if err != nil { + return err + } + k.Client = k8sClient + nodes, err := k.Provider.ListNodes(k.Version) + if err != nil { + return err + } + + cli, err := client.NewClientWithOpts(client.FromEnv) + if err != nil { + return err + } + + err = k.downloadCNI() + if err != nil { + return nil + } + + _, registryIP, err := k.runRegistry(k.RegistryPort) + if err != nil { + return err + } + + var sshClient libssh.Client + for _, node := range nodes { + switch k.CRI.(type) { + case *dockercri.DockerClient: + sshClient = docker.NewAdapter(cli, node.String()) + case *podmancri.Podman: + sshClient = podmancri.NewPodmanSSHClient(node.String()) + } + + if err := k.setupCNI(sshClient); err != nil { + return err + } + + sr := setupregistry.NewSetupRegistry(sshClient, registryIP) + if err = sr.Exec(); err != nil { + return err + } + + n := network.NewNetworkOpt(sshClient) + if err = n.Exec(); err != nil { + return err + } + + if k.RegistryProxy != "" { + rp := registryproxy.NewRegistryProxyOpt(sshClient, k.RegistryProxy) + if err = rp.Exec(); err != nil { + return err + } + } + } + + return nil +} + +func (k *KindBaseProvider) Delete() error { + if err := k.Provider.Delete(k.Version, ""); err != nil { + return err + } + if err := k.deleteRegistry(); err != nil { + return err + } + return nil +} + +func (k *KindBaseProvider) PrepareClusterYaml(withExtraMounts, withVfio bool) (string, error) { + cluster, err := f.ReadFile("manifests/kind.yaml") + if err != nil { + return "", err + } + + wp, err := f.ReadFile("manifests/worker-patch.yaml") + if err != nil { + return "", err + } + + cpump, err := f.ReadFile("manifests/cpu-manager-patch.yaml") + if err != nil { + return "", err + } + + ipf, err := f.ReadFile("manifests/ip-family.yaml") + if err != nil { + return "", err + } + + if withExtraMounts { + aud, err := f.ReadFile("manifests/audit.yaml") + if err != nil { + return "", err + } + cluster = append(cluster, aud...) + cluster = append(cluster, []byte("\n")...) + } + + if withVfio { + vfio, err := f.ReadFile("manifests/vfio.yaml") + if err != nil { + return "", err + } + cluster = append(cluster, vfio...) + cluster = append(cluster, []byte("\n")...) + } + + for i := 0; i < k.Nodes; i++ { + cluster = append(cluster, wp...) + cluster = append(cluster, []byte("\n")...) + if k.WithCPUManager { + cluster = append(cluster, cpump...) + cluster = append(cluster, []byte("\n")...) + } + } + + if k.IpFamily != "" { + cluster = append(cluster, []byte(string(ipf)+k.IpFamily)...) + } + return string(cluster), nil +} + +func (k *KindBaseProvider) setupCNI(sshClient libssh.Client) error { + file, err := os.Open(cniArchieFilename) + if err != nil { + return err + } + + err = sshClient.SCP("/opt/cni/bin", file) + if err != nil { + return err + } + return nil +} + +func (k *KindBaseProvider) deleteRegistry() error { + return k.CRI.Remove(k.Version + "-registry") +} + +func (k *KindBaseProvider) runRegistry(hostPort string) (string, string, error) { + registryID, err := k.CRI.Create(registryImage, &cri.CreateOpts{ + Name: k.Version + "-registry", + Privileged: true, + Network: "kind", + RestartPolicy: "always", + Ports: map[string]string{ + "5000": hostPort, + }, + }) + if err != nil { + return "", "", err + } + + if err := k.CRI.Start(registryID); err != nil { + return "", "", err + } + + ip, err := k.CRI.Inspect(registryID, "{{.NetworkSettings.Networks.kind.IPAddress}}") + if err != nil { + return "", "", err + } + + return registryID, strings.TrimSuffix(string(ip), "\n"), nil +} + +func (k *KindBaseProvider) downloadCNI() error { + out, err := os.Create(cniArchieFilename) + if err != nil { + return err + } + defer out.Close() + + resp, err := http.Get("https://github.com/containernetworking/plugins/releases/download/v0.8.5/cni-plugins-linux-" + runtime.GOARCH + "-v0.8.5.tgz") + if err != nil { + return err + } + defer resp.Body.Close() + + if resp.StatusCode != http.StatusOK { + return fmt.Errorf("bad status: %s", resp.Status) + } + + _, err = io.Copy(out, resp.Body) + if err != nil { + return err + } + logrus.Info("Downloaded cni archive") + return nil +} diff --git a/cluster-provision/gocli/providers/kind/kindbase/manifests/audit.yaml b/cluster-provision/gocli/providers/kind/kindbase/manifests/audit.yaml new file mode 100644 index 0000000000..d6f172863a --- /dev/null +++ b/cluster-provision/gocli/providers/kind/kindbase/manifests/audit.yaml @@ -0,0 +1,4 @@ + extraMounts: + - containerPath: /var/log/audit + hostPath: /var/log/audit + readOnly: true \ No newline at end of file diff --git a/cluster-provision/gocli/providers/kind/kindbase/manifests/cpu-manager-patch.yaml b/cluster-provision/gocli/providers/kind/kindbase/manifests/cpu-manager-patch.yaml new file mode 100644 index 0000000000..4f56067ced --- /dev/null +++ b/cluster-provision/gocli/providers/kind/kindbase/manifests/cpu-manager-patch.yaml @@ -0,0 +1,9 @@ + kubeadmConfigPatches: + - |- + kind: JoinConfiguration + nodeRegistration: + kubeletExtraArgs: + "feature-gates": "CPUManager=true" + "cpu-manager-policy": "static" + "kube-reserved": "cpu=500m" + "system-reserved": "cpu=500m" \ No newline at end of file diff --git a/cluster-provision/gocli/providers/kind/kindbase/manifests/etcd-in-mem-patch.yaml b/cluster-provision/gocli/providers/kind/kindbase/manifests/etcd-in-mem-patch.yaml new file mode 100644 index 0000000000..aa82a70861 --- /dev/null +++ b/cluster-provision/gocli/providers/kind/kindbase/manifests/etcd-in-mem-patch.yaml @@ -0,0 +1,8 @@ +kubeadmConfigPatches: +- | + kind: ClusterConfiguration + metadata: + name: config + etcd: + local: + dataDir: \ No newline at end of file diff --git a/cluster-provision/gocli/providers/kind/kindbase/manifests/ip-family.yaml b/cluster-provision/gocli/providers/kind/kindbase/manifests/ip-family.yaml new file mode 100644 index 0000000000..357ef75aeb --- /dev/null +++ b/cluster-provision/gocli/providers/kind/kindbase/manifests/ip-family.yaml @@ -0,0 +1,2 @@ +networking: + ipFamily: \ No newline at end of file diff --git a/cluster-provision/gocli/providers/kind/kindbase/manifests/kind.yaml b/cluster-provision/gocli/providers/kind/kindbase/manifests/kind.yaml new file mode 100644 index 0000000000..0e2ff6bc68 --- /dev/null +++ b/cluster-provision/gocli/providers/kind/kindbase/manifests/kind.yaml @@ -0,0 +1,8 @@ +kind: Cluster +apiVersion: kind.x-k8s.io/v1alpha4 +containerdConfigPatches: +- |- + [plugins."io.containerd.grpc.v1.cri".registry.mirrors."registry:5000"] + endpoint = ["http://registry:5000"] +nodes: +- role: control-plane diff --git a/cluster-provision/gocli/providers/kind/kindbase/manifests/vfio.yaml b/cluster-provision/gocli/providers/kind/kindbase/manifests/vfio.yaml new file mode 100644 index 0000000000..0f517c0bef --- /dev/null +++ b/cluster-provision/gocli/providers/kind/kindbase/manifests/vfio.yaml @@ -0,0 +1,2 @@ + - containerPath: /dev/vfio/ + hostPath: /dev/vfio/ \ No newline at end of file diff --git a/cluster-provision/gocli/providers/kind/kindbase/manifests/worker-patch.yaml b/cluster-provision/gocli/providers/kind/kindbase/manifests/worker-patch.yaml new file mode 100644 index 0000000000..c050317624 --- /dev/null +++ b/cluster-provision/gocli/providers/kind/kindbase/manifests/worker-patch.yaml @@ -0,0 +1,5 @@ +- role: worker + extraMounts: + - containerPath: /var/log/audit + hostPath: /var/log/audit + readOnly: true \ No newline at end of file diff --git a/cluster-provision/gocli/vendor/github.com/BurntSushi/toml/.gitignore b/cluster-provision/gocli/vendor/github.com/BurntSushi/toml/.gitignore new file mode 100644 index 0000000000..fe79e3adda --- /dev/null +++ b/cluster-provision/gocli/vendor/github.com/BurntSushi/toml/.gitignore @@ -0,0 +1,2 @@ +/toml.test +/toml-test diff --git a/cluster-provision/gocli/vendor/github.com/BurntSushi/toml/COPYING b/cluster-provision/gocli/vendor/github.com/BurntSushi/toml/COPYING new file mode 100644 index 0000000000..01b5743200 --- /dev/null +++ b/cluster-provision/gocli/vendor/github.com/BurntSushi/toml/COPYING @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2013 TOML authors + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/cluster-provision/gocli/vendor/github.com/BurntSushi/toml/README.md b/cluster-provision/gocli/vendor/github.com/BurntSushi/toml/README.md new file mode 100644 index 0000000000..639e6c3998 --- /dev/null +++ b/cluster-provision/gocli/vendor/github.com/BurntSushi/toml/README.md @@ -0,0 +1,120 @@ +TOML stands for Tom's Obvious, Minimal Language. This Go package provides a +reflection interface similar to Go's standard library `json` and `xml` packages. + +Compatible with TOML version [v1.0.0](https://toml.io/en/v1.0.0). + +Documentation: https://godocs.io/github.com/BurntSushi/toml + +See the [releases page](https://github.com/BurntSushi/toml/releases) for a +changelog; this information is also in the git tag annotations (e.g. `git show +v0.4.0`). + +This library requires Go 1.18 or newer; add it to your go.mod with: + + % go get github.com/BurntSushi/toml@latest + +It also comes with a TOML validator CLI tool: + + % go install github.com/BurntSushi/toml/cmd/tomlv@latest + % tomlv some-toml-file.toml + +### Examples +For the simplest example, consider some TOML file as just a list of keys and +values: + +```toml +Age = 25 +Cats = [ "Cauchy", "Plato" ] +Pi = 3.14 +Perfection = [ 6, 28, 496, 8128 ] +DOB = 1987-07-05T05:45:00Z +``` + +Which can be decoded with: + +```go +type Config struct { + Age int + Cats []string + Pi float64 + Perfection []int + DOB time.Time +} + +var conf Config +_, err := toml.Decode(tomlData, &conf) +``` + +You can also use struct tags if your struct field name doesn't map to a TOML key +value directly: + +```toml +some_key_NAME = "wat" +``` + +```go +type TOML struct { + ObscureKey string `toml:"some_key_NAME"` +} +``` + +Beware that like other decoders **only exported fields** are considered when +encoding and decoding; private fields are silently ignored. + +### Using the `Marshaler` and `encoding.TextUnmarshaler` interfaces +Here's an example that automatically parses values in a `mail.Address`: + +```toml +contacts = [ + "Donald Duck ", + "Scrooge McDuck ", +] +``` + +Can be decoded with: + +```go +// Create address type which satisfies the encoding.TextUnmarshaler interface. +type address struct { + *mail.Address +} + +func (a *address) UnmarshalText(text []byte) error { + var err error + a.Address, err = mail.ParseAddress(string(text)) + return err +} + +// Decode it. +func decode() { + blob := ` + contacts = [ + "Donald Duck ", + "Scrooge McDuck ", + ] + ` + + var contacts struct { + Contacts []address + } + + _, err := toml.Decode(blob, &contacts) + if err != nil { + log.Fatal(err) + } + + for _, c := range contacts.Contacts { + fmt.Printf("%#v\n", c.Address) + } + + // Output: + // &mail.Address{Name:"Donald Duck", Address:"donald@duckburg.com"} + // &mail.Address{Name:"Scrooge McDuck", Address:"scrooge@duckburg.com"} +} +``` + +To target TOML specifically you can implement `UnmarshalTOML` TOML interface in +a similar way. + +### More complex usage +See the [`_example/`](/_example) directory for a more complex example. diff --git a/cluster-provision/gocli/vendor/github.com/BurntSushi/toml/decode.go b/cluster-provision/gocli/vendor/github.com/BurntSushi/toml/decode.go new file mode 100644 index 0000000000..7aaf462c94 --- /dev/null +++ b/cluster-provision/gocli/vendor/github.com/BurntSushi/toml/decode.go @@ -0,0 +1,613 @@ +package toml + +import ( + "bytes" + "encoding" + "encoding/json" + "fmt" + "io" + "io/fs" + "math" + "os" + "reflect" + "strconv" + "strings" + "time" +) + +// Unmarshaler is the interface implemented by objects that can unmarshal a +// TOML description of themselves. +type Unmarshaler interface { + UnmarshalTOML(any) error +} + +// Unmarshal decodes the contents of data in TOML format into a pointer v. +// +// See [Decoder] for a description of the decoding process. +func Unmarshal(data []byte, v any) error { + _, err := NewDecoder(bytes.NewReader(data)).Decode(v) + return err +} + +// Decode the TOML data in to the pointer v. +// +// See [Decoder] for a description of the decoding process. +func Decode(data string, v any) (MetaData, error) { + return NewDecoder(strings.NewReader(data)).Decode(v) +} + +// DecodeFile reads the contents of a file and decodes it with [Decode]. +func DecodeFile(path string, v any) (MetaData, error) { + fp, err := os.Open(path) + if err != nil { + return MetaData{}, err + } + defer fp.Close() + return NewDecoder(fp).Decode(v) +} + +// DecodeFS reads the contents of a file from [fs.FS] and decodes it with +// [Decode]. +func DecodeFS(fsys fs.FS, path string, v any) (MetaData, error) { + fp, err := fsys.Open(path) + if err != nil { + return MetaData{}, err + } + defer fp.Close() + return NewDecoder(fp).Decode(v) +} + +// Primitive is a TOML value that hasn't been decoded into a Go value. +// +// This type can be used for any value, which will cause decoding to be delayed. +// You can use [PrimitiveDecode] to "manually" decode these values. +// +// NOTE: The underlying representation of a `Primitive` value is subject to +// change. Do not rely on it. +// +// NOTE: Primitive values are still parsed, so using them will only avoid the +// overhead of reflection. They can be useful when you don't know the exact type +// of TOML data until runtime. +type Primitive struct { + undecoded any + context Key +} + +// The significand precision for float32 and float64 is 24 and 53 bits; this is +// the range a natural number can be stored in a float without loss of data. +const ( + maxSafeFloat32Int = 16777215 // 2^24-1 + maxSafeFloat64Int = int64(9007199254740991) // 2^53-1 +) + +// Decoder decodes TOML data. +// +// TOML tables correspond to Go structs or maps; they can be used +// interchangeably, but structs offer better type safety. +// +// TOML table arrays correspond to either a slice of structs or a slice of maps. +// +// TOML datetimes correspond to [time.Time]. Local datetimes are parsed in the +// local timezone. +// +// [time.Duration] types are treated as nanoseconds if the TOML value is an +// integer, or they're parsed with time.ParseDuration() if they're strings. +// +// All other TOML types (float, string, int, bool and array) correspond to the +// obvious Go types. +// +// An exception to the above rules is if a type implements the TextUnmarshaler +// interface, in which case any primitive TOML value (floats, strings, integers, +// booleans, datetimes) will be converted to a []byte and given to the value's +// UnmarshalText method. See the Unmarshaler example for a demonstration with +// email addresses. +// +// # Key mapping +// +// TOML keys can map to either keys in a Go map or field names in a Go struct. +// The special `toml` struct tag can be used to map TOML keys to struct fields +// that don't match the key name exactly (see the example). A case insensitive +// match to struct names will be tried if an exact match can't be found. +// +// The mapping between TOML values and Go values is loose. That is, there may +// exist TOML values that cannot be placed into your representation, and there +// may be parts of your representation that do not correspond to TOML values. +// This loose mapping can be made stricter by using the IsDefined and/or +// Undecoded methods on the MetaData returned. +// +// This decoder does not handle cyclic types. Decode will not terminate if a +// cyclic type is passed. +type Decoder struct { + r io.Reader +} + +// NewDecoder creates a new Decoder. +func NewDecoder(r io.Reader) *Decoder { + return &Decoder{r: r} +} + +var ( + unmarshalToml = reflect.TypeOf((*Unmarshaler)(nil)).Elem() + unmarshalText = reflect.TypeOf((*encoding.TextUnmarshaler)(nil)).Elem() + primitiveType = reflect.TypeOf((*Primitive)(nil)).Elem() +) + +// Decode TOML data in to the pointer `v`. +func (dec *Decoder) Decode(v any) (MetaData, error) { + rv := reflect.ValueOf(v) + if rv.Kind() != reflect.Ptr { + s := "%q" + if reflect.TypeOf(v) == nil { + s = "%v" + } + + return MetaData{}, fmt.Errorf("toml: cannot decode to non-pointer "+s, reflect.TypeOf(v)) + } + if rv.IsNil() { + return MetaData{}, fmt.Errorf("toml: cannot decode to nil value of %q", reflect.TypeOf(v)) + } + + // Check if this is a supported type: struct, map, any, or something that + // implements UnmarshalTOML or UnmarshalText. + rv = indirect(rv) + rt := rv.Type() + if rv.Kind() != reflect.Struct && rv.Kind() != reflect.Map && + !(rv.Kind() == reflect.Interface && rv.NumMethod() == 0) && + !rt.Implements(unmarshalToml) && !rt.Implements(unmarshalText) { + return MetaData{}, fmt.Errorf("toml: cannot decode to type %s", rt) + } + + // TODO: parser should read from io.Reader? Or at the very least, make it + // read from []byte rather than string + data, err := io.ReadAll(dec.r) + if err != nil { + return MetaData{}, err + } + + p, err := parse(string(data)) + if err != nil { + return MetaData{}, err + } + + md := MetaData{ + mapping: p.mapping, + keyInfo: p.keyInfo, + keys: p.ordered, + decoded: make(map[string]struct{}, len(p.ordered)), + context: nil, + data: data, + } + return md, md.unify(p.mapping, rv) +} + +// PrimitiveDecode is just like the other Decode* functions, except it decodes a +// TOML value that has already been parsed. Valid primitive values can *only* be +// obtained from values filled by the decoder functions, including this method. +// (i.e., v may contain more [Primitive] values.) +// +// Meta data for primitive values is included in the meta data returned by the +// Decode* functions with one exception: keys returned by the Undecoded method +// will only reflect keys that were decoded. Namely, any keys hidden behind a +// Primitive will be considered undecoded. Executing this method will update the +// undecoded keys in the meta data. (See the example.) +func (md *MetaData) PrimitiveDecode(primValue Primitive, v any) error { + md.context = primValue.context + defer func() { md.context = nil }() + return md.unify(primValue.undecoded, rvalue(v)) +} + +// unify performs a sort of type unification based on the structure of `rv`, +// which is the client representation. +// +// Any type mismatch produces an error. Finding a type that we don't know +// how to handle produces an unsupported type error. +func (md *MetaData) unify(data any, rv reflect.Value) error { + // Special case. Look for a `Primitive` value. + // TODO: #76 would make this superfluous after implemented. + if rv.Type() == primitiveType { + // Save the undecoded data and the key context into the primitive + // value. + context := make(Key, len(md.context)) + copy(context, md.context) + rv.Set(reflect.ValueOf(Primitive{ + undecoded: data, + context: context, + })) + return nil + } + + rvi := rv.Interface() + if v, ok := rvi.(Unmarshaler); ok { + err := v.UnmarshalTOML(data) + if err != nil { + return md.parseErr(err) + } + return nil + } + if v, ok := rvi.(encoding.TextUnmarshaler); ok { + return md.unifyText(data, v) + } + + // TODO: + // The behavior here is incorrect whenever a Go type satisfies the + // encoding.TextUnmarshaler interface but also corresponds to a TOML hash or + // array. In particular, the unmarshaler should only be applied to primitive + // TOML values. But at this point, it will be applied to all kinds of values + // and produce an incorrect error whenever those values are hashes or arrays + // (including arrays of tables). + + k := rv.Kind() + + if k >= reflect.Int && k <= reflect.Uint64 { + return md.unifyInt(data, rv) + } + switch k { + case reflect.Struct: + return md.unifyStruct(data, rv) + case reflect.Map: + return md.unifyMap(data, rv) + case reflect.Array: + return md.unifyArray(data, rv) + case reflect.Slice: + return md.unifySlice(data, rv) + case reflect.String: + return md.unifyString(data, rv) + case reflect.Bool: + return md.unifyBool(data, rv) + case reflect.Interface: + if rv.NumMethod() > 0 { /// Only empty interfaces are supported. + return md.e("unsupported type %s", rv.Type()) + } + return md.unifyAnything(data, rv) + case reflect.Float32, reflect.Float64: + return md.unifyFloat64(data, rv) + } + return md.e("unsupported type %s", rv.Kind()) +} + +func (md *MetaData) unifyStruct(mapping any, rv reflect.Value) error { + tmap, ok := mapping.(map[string]any) + if !ok { + if mapping == nil { + return nil + } + return md.e("type mismatch for %s: expected table but found %s", rv.Type().String(), fmtType(mapping)) + } + + for key, datum := range tmap { + var f *field + fields := cachedTypeFields(rv.Type()) + for i := range fields { + ff := &fields[i] + if ff.name == key { + f = ff + break + } + if f == nil && strings.EqualFold(ff.name, key) { + f = ff + } + } + if f != nil { + subv := rv + for _, i := range f.index { + subv = indirect(subv.Field(i)) + } + + if isUnifiable(subv) { + md.decoded[md.context.add(key).String()] = struct{}{} + md.context = append(md.context, key) + + err := md.unify(datum, subv) + if err != nil { + return err + } + md.context = md.context[0 : len(md.context)-1] + } else if f.name != "" { + return md.e("cannot write unexported field %s.%s", rv.Type().String(), f.name) + } + } + } + return nil +} + +func (md *MetaData) unifyMap(mapping any, rv reflect.Value) error { + keyType := rv.Type().Key().Kind() + if keyType != reflect.String && keyType != reflect.Interface { + return fmt.Errorf("toml: cannot decode to a map with non-string key type (%s in %q)", + keyType, rv.Type()) + } + + tmap, ok := mapping.(map[string]any) + if !ok { + if tmap == nil { + return nil + } + return md.badtype("map", mapping) + } + if rv.IsNil() { + rv.Set(reflect.MakeMap(rv.Type())) + } + for k, v := range tmap { + md.decoded[md.context.add(k).String()] = struct{}{} + md.context = append(md.context, k) + + rvval := reflect.Indirect(reflect.New(rv.Type().Elem())) + + err := md.unify(v, indirect(rvval)) + if err != nil { + return err + } + md.context = md.context[0 : len(md.context)-1] + + rvkey := indirect(reflect.New(rv.Type().Key())) + + switch keyType { + case reflect.Interface: + rvkey.Set(reflect.ValueOf(k)) + case reflect.String: + rvkey.SetString(k) + } + + rv.SetMapIndex(rvkey, rvval) + } + return nil +} + +func (md *MetaData) unifyArray(data any, rv reflect.Value) error { + datav := reflect.ValueOf(data) + if datav.Kind() != reflect.Slice { + if !datav.IsValid() { + return nil + } + return md.badtype("slice", data) + } + if l := datav.Len(); l != rv.Len() { + return md.e("expected array length %d; got TOML array of length %d", rv.Len(), l) + } + return md.unifySliceArray(datav, rv) +} + +func (md *MetaData) unifySlice(data any, rv reflect.Value) error { + datav := reflect.ValueOf(data) + if datav.Kind() != reflect.Slice { + if !datav.IsValid() { + return nil + } + return md.badtype("slice", data) + } + n := datav.Len() + if rv.IsNil() || rv.Cap() < n { + rv.Set(reflect.MakeSlice(rv.Type(), n, n)) + } + rv.SetLen(n) + return md.unifySliceArray(datav, rv) +} + +func (md *MetaData) unifySliceArray(data, rv reflect.Value) error { + l := data.Len() + for i := 0; i < l; i++ { + err := md.unify(data.Index(i).Interface(), indirect(rv.Index(i))) + if err != nil { + return err + } + } + return nil +} + +func (md *MetaData) unifyString(data any, rv reflect.Value) error { + _, ok := rv.Interface().(json.Number) + if ok { + if i, ok := data.(int64); ok { + rv.SetString(strconv.FormatInt(i, 10)) + } else if f, ok := data.(float64); ok { + rv.SetString(strconv.FormatFloat(f, 'f', -1, 64)) + } else { + return md.badtype("string", data) + } + return nil + } + + if s, ok := data.(string); ok { + rv.SetString(s) + return nil + } + return md.badtype("string", data) +} + +func (md *MetaData) unifyFloat64(data any, rv reflect.Value) error { + rvk := rv.Kind() + + if num, ok := data.(float64); ok { + switch rvk { + case reflect.Float32: + if num < -math.MaxFloat32 || num > math.MaxFloat32 { + return md.parseErr(errParseRange{i: num, size: rvk.String()}) + } + fallthrough + case reflect.Float64: + rv.SetFloat(num) + default: + panic("bug") + } + return nil + } + + if num, ok := data.(int64); ok { + if (rvk == reflect.Float32 && (num < -maxSafeFloat32Int || num > maxSafeFloat32Int)) || + (rvk == reflect.Float64 && (num < -maxSafeFloat64Int || num > maxSafeFloat64Int)) { + return md.parseErr(errUnsafeFloat{i: num, size: rvk.String()}) + } + rv.SetFloat(float64(num)) + return nil + } + + return md.badtype("float", data) +} + +func (md *MetaData) unifyInt(data any, rv reflect.Value) error { + _, ok := rv.Interface().(time.Duration) + if ok { + // Parse as string duration, and fall back to regular integer parsing + // (as nanosecond) if this is not a string. + if s, ok := data.(string); ok { + dur, err := time.ParseDuration(s) + if err != nil { + return md.parseErr(errParseDuration{s}) + } + rv.SetInt(int64(dur)) + return nil + } + } + + num, ok := data.(int64) + if !ok { + return md.badtype("integer", data) + } + + rvk := rv.Kind() + switch { + case rvk >= reflect.Int && rvk <= reflect.Int64: + if (rvk == reflect.Int8 && (num < math.MinInt8 || num > math.MaxInt8)) || + (rvk == reflect.Int16 && (num < math.MinInt16 || num > math.MaxInt16)) || + (rvk == reflect.Int32 && (num < math.MinInt32 || num > math.MaxInt32)) { + return md.parseErr(errParseRange{i: num, size: rvk.String()}) + } + rv.SetInt(num) + case rvk >= reflect.Uint && rvk <= reflect.Uint64: + unum := uint64(num) + if rvk == reflect.Uint8 && (num < 0 || unum > math.MaxUint8) || + rvk == reflect.Uint16 && (num < 0 || unum > math.MaxUint16) || + rvk == reflect.Uint32 && (num < 0 || unum > math.MaxUint32) { + return md.parseErr(errParseRange{i: num, size: rvk.String()}) + } + rv.SetUint(unum) + default: + panic("unreachable") + } + return nil +} + +func (md *MetaData) unifyBool(data any, rv reflect.Value) error { + if b, ok := data.(bool); ok { + rv.SetBool(b) + return nil + } + return md.badtype("boolean", data) +} + +func (md *MetaData) unifyAnything(data any, rv reflect.Value) error { + rv.Set(reflect.ValueOf(data)) + return nil +} + +func (md *MetaData) unifyText(data any, v encoding.TextUnmarshaler) error { + var s string + switch sdata := data.(type) { + case Marshaler: + text, err := sdata.MarshalTOML() + if err != nil { + return err + } + s = string(text) + case encoding.TextMarshaler: + text, err := sdata.MarshalText() + if err != nil { + return err + } + s = string(text) + case fmt.Stringer: + s = sdata.String() + case string: + s = sdata + case bool: + s = fmt.Sprintf("%v", sdata) + case int64: + s = fmt.Sprintf("%d", sdata) + case float64: + s = fmt.Sprintf("%f", sdata) + default: + return md.badtype("primitive (string-like)", data) + } + if err := v.UnmarshalText([]byte(s)); err != nil { + return md.parseErr(err) + } + return nil +} + +func (md *MetaData) badtype(dst string, data any) error { + return md.e("incompatible types: TOML value has type %s; destination has type %s", fmtType(data), dst) +} + +func (md *MetaData) parseErr(err error) error { + k := md.context.String() + return ParseError{ + LastKey: k, + Position: md.keyInfo[k].pos, + Line: md.keyInfo[k].pos.Line, + err: err, + input: string(md.data), + } +} + +func (md *MetaData) e(format string, args ...any) error { + f := "toml: " + if len(md.context) > 0 { + f = fmt.Sprintf("toml: (last key %q): ", md.context) + p := md.keyInfo[md.context.String()].pos + if p.Line > 0 { + f = fmt.Sprintf("toml: line %d (last key %q): ", p.Line, md.context) + } + } + return fmt.Errorf(f+format, args...) +} + +// rvalue returns a reflect.Value of `v`. All pointers are resolved. +func rvalue(v any) reflect.Value { + return indirect(reflect.ValueOf(v)) +} + +// indirect returns the value pointed to by a pointer. +// +// Pointers are followed until the value is not a pointer. New values are +// allocated for each nil pointer. +// +// An exception to this rule is if the value satisfies an interface of interest +// to us (like encoding.TextUnmarshaler). +func indirect(v reflect.Value) reflect.Value { + if v.Kind() != reflect.Ptr { + if v.CanSet() { + pv := v.Addr() + pvi := pv.Interface() + if _, ok := pvi.(encoding.TextUnmarshaler); ok { + return pv + } + if _, ok := pvi.(Unmarshaler); ok { + return pv + } + } + return v + } + if v.IsNil() { + v.Set(reflect.New(v.Type().Elem())) + } + return indirect(reflect.Indirect(v)) +} + +func isUnifiable(rv reflect.Value) bool { + if rv.CanSet() { + return true + } + rvi := rv.Interface() + if _, ok := rvi.(encoding.TextUnmarshaler); ok { + return true + } + if _, ok := rvi.(Unmarshaler); ok { + return true + } + return false +} + +// fmt %T with "interface {}" replaced with "any", which is far more readable. +func fmtType(t any) string { + return strings.ReplaceAll(fmt.Sprintf("%T", t), "interface {}", "any") +} diff --git a/cluster-provision/gocli/vendor/github.com/BurntSushi/toml/deprecated.go b/cluster-provision/gocli/vendor/github.com/BurntSushi/toml/deprecated.go new file mode 100644 index 0000000000..155709a80b --- /dev/null +++ b/cluster-provision/gocli/vendor/github.com/BurntSushi/toml/deprecated.go @@ -0,0 +1,29 @@ +package toml + +import ( + "encoding" + "io" +) + +// TextMarshaler is an alias for encoding.TextMarshaler. +// +// Deprecated: use encoding.TextMarshaler +type TextMarshaler encoding.TextMarshaler + +// TextUnmarshaler is an alias for encoding.TextUnmarshaler. +// +// Deprecated: use encoding.TextUnmarshaler +type TextUnmarshaler encoding.TextUnmarshaler + +// DecodeReader is an alias for NewDecoder(r).Decode(v). +// +// Deprecated: use NewDecoder(reader).Decode(&value). +func DecodeReader(r io.Reader, v any) (MetaData, error) { return NewDecoder(r).Decode(v) } + +// PrimitiveDecode is an alias for MetaData.PrimitiveDecode(). +// +// Deprecated: use MetaData.PrimitiveDecode. +func PrimitiveDecode(primValue Primitive, v any) error { + md := MetaData{decoded: make(map[string]struct{})} + return md.unify(primValue.undecoded, rvalue(v)) +} diff --git a/cluster-provision/gocli/vendor/github.com/BurntSushi/toml/doc.go b/cluster-provision/gocli/vendor/github.com/BurntSushi/toml/doc.go new file mode 100644 index 0000000000..82c90a9057 --- /dev/null +++ b/cluster-provision/gocli/vendor/github.com/BurntSushi/toml/doc.go @@ -0,0 +1,8 @@ +// Package toml implements decoding and encoding of TOML files. +// +// This package supports TOML v1.0.0, as specified at https://toml.io +// +// The github.com/BurntSushi/toml/cmd/tomlv package implements a TOML validator, +// and can be used to verify if TOML document is valid. It can also be used to +// print the type of each key. +package toml diff --git a/cluster-provision/gocli/vendor/github.com/BurntSushi/toml/encode.go b/cluster-provision/gocli/vendor/github.com/BurntSushi/toml/encode.go new file mode 100644 index 0000000000..73366c0d9a --- /dev/null +++ b/cluster-provision/gocli/vendor/github.com/BurntSushi/toml/encode.go @@ -0,0 +1,778 @@ +package toml + +import ( + "bufio" + "bytes" + "encoding" + "encoding/json" + "errors" + "fmt" + "io" + "math" + "reflect" + "sort" + "strconv" + "strings" + "time" + + "github.com/BurntSushi/toml/internal" +) + +type tomlEncodeError struct{ error } + +var ( + errArrayNilElement = errors.New("toml: cannot encode array with nil element") + errNonString = errors.New("toml: cannot encode a map with non-string key type") + errNoKey = errors.New("toml: top-level values must be Go maps or structs") + errAnything = errors.New("") // used in testing +) + +var dblQuotedReplacer = strings.NewReplacer( + "\"", "\\\"", + "\\", "\\\\", + "\x00", `\u0000`, + "\x01", `\u0001`, + "\x02", `\u0002`, + "\x03", `\u0003`, + "\x04", `\u0004`, + "\x05", `\u0005`, + "\x06", `\u0006`, + "\x07", `\u0007`, + "\b", `\b`, + "\t", `\t`, + "\n", `\n`, + "\x0b", `\u000b`, + "\f", `\f`, + "\r", `\r`, + "\x0e", `\u000e`, + "\x0f", `\u000f`, + "\x10", `\u0010`, + "\x11", `\u0011`, + "\x12", `\u0012`, + "\x13", `\u0013`, + "\x14", `\u0014`, + "\x15", `\u0015`, + "\x16", `\u0016`, + "\x17", `\u0017`, + "\x18", `\u0018`, + "\x19", `\u0019`, + "\x1a", `\u001a`, + "\x1b", `\u001b`, + "\x1c", `\u001c`, + "\x1d", `\u001d`, + "\x1e", `\u001e`, + "\x1f", `\u001f`, + "\x7f", `\u007f`, +) + +var ( + marshalToml = reflect.TypeOf((*Marshaler)(nil)).Elem() + marshalText = reflect.TypeOf((*encoding.TextMarshaler)(nil)).Elem() + timeType = reflect.TypeOf((*time.Time)(nil)).Elem() +) + +// Marshaler is the interface implemented by types that can marshal themselves +// into valid TOML. +type Marshaler interface { + MarshalTOML() ([]byte, error) +} + +// Marshal returns a TOML representation of the Go value. +// +// See [Encoder] for a description of the encoding process. +func Marshal(v any) ([]byte, error) { + buff := new(bytes.Buffer) + if err := NewEncoder(buff).Encode(v); err != nil { + return nil, err + } + return buff.Bytes(), nil +} + +// Encoder encodes a Go to a TOML document. +// +// The mapping between Go values and TOML values should be precisely the same as +// for [Decode]. +// +// time.Time is encoded as a RFC 3339 string, and time.Duration as its string +// representation. +// +// The [Marshaler] and [encoding.TextMarshaler] interfaces are supported to +// encoding the value as custom TOML. +// +// If you want to write arbitrary binary data then you will need to use +// something like base64 since TOML does not have any binary types. +// +// When encoding TOML hashes (Go maps or structs), keys without any sub-hashes +// are encoded first. +// +// Go maps will be sorted alphabetically by key for deterministic output. +// +// The toml struct tag can be used to provide the key name; if omitted the +// struct field name will be used. If the "omitempty" option is present the +// following value will be skipped: +// +// - arrays, slices, maps, and string with len of 0 +// - struct with all zero values +// - bool false +// +// If omitzero is given all int and float types with a value of 0 will be +// skipped. +// +// Encoding Go values without a corresponding TOML representation will return an +// error. Examples of this includes maps with non-string keys, slices with nil +// elements, embedded non-struct types, and nested slices containing maps or +// structs. (e.g. [][]map[string]string is not allowed but []map[string]string +// is okay, as is []map[string][]string). +// +// NOTE: only exported keys are encoded due to the use of reflection. Unexported +// keys are silently discarded. +type Encoder struct { + Indent string // string for a single indentation level; default is two spaces. + hasWritten bool // written any output to w yet? + w *bufio.Writer +} + +// NewEncoder create a new Encoder. +func NewEncoder(w io.Writer) *Encoder { + return &Encoder{w: bufio.NewWriter(w), Indent: " "} +} + +// Encode writes a TOML representation of the Go value to the [Encoder]'s writer. +// +// An error is returned if the value given cannot be encoded to a valid TOML +// document. +func (enc *Encoder) Encode(v any) error { + rv := eindirect(reflect.ValueOf(v)) + err := enc.safeEncode(Key([]string{}), rv) + if err != nil { + return err + } + return enc.w.Flush() +} + +func (enc *Encoder) safeEncode(key Key, rv reflect.Value) (err error) { + defer func() { + if r := recover(); r != nil { + if terr, ok := r.(tomlEncodeError); ok { + err = terr.error + return + } + panic(r) + } + }() + enc.encode(key, rv) + return nil +} + +func (enc *Encoder) encode(key Key, rv reflect.Value) { + // If we can marshal the type to text, then we use that. This prevents the + // encoder for handling these types as generic structs (or whatever the + // underlying type of a TextMarshaler is). + switch { + case isMarshaler(rv): + enc.writeKeyValue(key, rv, false) + return + case rv.Type() == primitiveType: // TODO: #76 would make this superfluous after implemented. + enc.encode(key, reflect.ValueOf(rv.Interface().(Primitive).undecoded)) + return + } + + k := rv.Kind() + switch k { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, + reflect.Int64, + reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, + reflect.Uint64, + reflect.Float32, reflect.Float64, reflect.String, reflect.Bool: + enc.writeKeyValue(key, rv, false) + case reflect.Array, reflect.Slice: + if typeEqual(tomlArrayHash, tomlTypeOfGo(rv)) { + enc.eArrayOfTables(key, rv) + } else { + enc.writeKeyValue(key, rv, false) + } + case reflect.Interface: + if rv.IsNil() { + return + } + enc.encode(key, rv.Elem()) + case reflect.Map: + if rv.IsNil() { + return + } + enc.eTable(key, rv) + case reflect.Ptr: + if rv.IsNil() { + return + } + enc.encode(key, rv.Elem()) + case reflect.Struct: + enc.eTable(key, rv) + default: + encPanic(fmt.Errorf("unsupported type for key '%s': %s", key, k)) + } +} + +// eElement encodes any value that can be an array element. +func (enc *Encoder) eElement(rv reflect.Value) { + switch v := rv.Interface().(type) { + case time.Time: // Using TextMarshaler adds extra quotes, which we don't want. + format := time.RFC3339Nano + switch v.Location() { + case internal.LocalDatetime: + format = "2006-01-02T15:04:05.999999999" + case internal.LocalDate: + format = "2006-01-02" + case internal.LocalTime: + format = "15:04:05.999999999" + } + switch v.Location() { + default: + enc.wf(v.Format(format)) + case internal.LocalDatetime, internal.LocalDate, internal.LocalTime: + enc.wf(v.In(time.UTC).Format(format)) + } + return + case Marshaler: + s, err := v.MarshalTOML() + if err != nil { + encPanic(err) + } + if s == nil { + encPanic(errors.New("MarshalTOML returned nil and no error")) + } + enc.w.Write(s) + return + case encoding.TextMarshaler: + s, err := v.MarshalText() + if err != nil { + encPanic(err) + } + if s == nil { + encPanic(errors.New("MarshalText returned nil and no error")) + } + enc.writeQuoted(string(s)) + return + case time.Duration: + enc.writeQuoted(v.String()) + return + case json.Number: + n, _ := rv.Interface().(json.Number) + + if n == "" { /// Useful zero value. + enc.w.WriteByte('0') + return + } else if v, err := n.Int64(); err == nil { + enc.eElement(reflect.ValueOf(v)) + return + } else if v, err := n.Float64(); err == nil { + enc.eElement(reflect.ValueOf(v)) + return + } + encPanic(fmt.Errorf("unable to convert %q to int64 or float64", n)) + } + + switch rv.Kind() { + case reflect.Ptr: + enc.eElement(rv.Elem()) + return + case reflect.String: + enc.writeQuoted(rv.String()) + case reflect.Bool: + enc.wf(strconv.FormatBool(rv.Bool())) + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + enc.wf(strconv.FormatInt(rv.Int(), 10)) + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + enc.wf(strconv.FormatUint(rv.Uint(), 10)) + case reflect.Float32: + f := rv.Float() + if math.IsNaN(f) { + if math.Signbit(f) { + enc.wf("-") + } + enc.wf("nan") + } else if math.IsInf(f, 0) { + if math.Signbit(f) { + enc.wf("-") + } + enc.wf("inf") + } else { + enc.wf(floatAddDecimal(strconv.FormatFloat(f, 'f', -1, 32))) + } + case reflect.Float64: + f := rv.Float() + if math.IsNaN(f) { + if math.Signbit(f) { + enc.wf("-") + } + enc.wf("nan") + } else if math.IsInf(f, 0) { + if math.Signbit(f) { + enc.wf("-") + } + enc.wf("inf") + } else { + enc.wf(floatAddDecimal(strconv.FormatFloat(f, 'f', -1, 64))) + } + case reflect.Array, reflect.Slice: + enc.eArrayOrSliceElement(rv) + case reflect.Struct: + enc.eStruct(nil, rv, true) + case reflect.Map: + enc.eMap(nil, rv, true) + case reflect.Interface: + enc.eElement(rv.Elem()) + default: + encPanic(fmt.Errorf("unexpected type: %s", fmtType(rv.Interface()))) + } +} + +// By the TOML spec, all floats must have a decimal with at least one number on +// either side. +func floatAddDecimal(fstr string) string { + if !strings.Contains(fstr, ".") { + return fstr + ".0" + } + return fstr +} + +func (enc *Encoder) writeQuoted(s string) { + enc.wf("\"%s\"", dblQuotedReplacer.Replace(s)) +} + +func (enc *Encoder) eArrayOrSliceElement(rv reflect.Value) { + length := rv.Len() + enc.wf("[") + for i := 0; i < length; i++ { + elem := eindirect(rv.Index(i)) + enc.eElement(elem) + if i != length-1 { + enc.wf(", ") + } + } + enc.wf("]") +} + +func (enc *Encoder) eArrayOfTables(key Key, rv reflect.Value) { + if len(key) == 0 { + encPanic(errNoKey) + } + for i := 0; i < rv.Len(); i++ { + trv := eindirect(rv.Index(i)) + if isNil(trv) { + continue + } + enc.newline() + enc.wf("%s[[%s]]", enc.indentStr(key), key) + enc.newline() + enc.eMapOrStruct(key, trv, false) + } +} + +func (enc *Encoder) eTable(key Key, rv reflect.Value) { + if len(key) == 1 { + // Output an extra newline between top-level tables. + // (The newline isn't written if nothing else has been written though.) + enc.newline() + } + if len(key) > 0 { + enc.wf("%s[%s]", enc.indentStr(key), key) + enc.newline() + } + enc.eMapOrStruct(key, rv, false) +} + +func (enc *Encoder) eMapOrStruct(key Key, rv reflect.Value, inline bool) { + switch rv.Kind() { + case reflect.Map: + enc.eMap(key, rv, inline) + case reflect.Struct: + enc.eStruct(key, rv, inline) + default: + // Should never happen? + panic("eTable: unhandled reflect.Value Kind: " + rv.Kind().String()) + } +} + +func (enc *Encoder) eMap(key Key, rv reflect.Value, inline bool) { + rt := rv.Type() + if rt.Key().Kind() != reflect.String { + encPanic(errNonString) + } + + // Sort keys so that we have deterministic output. And write keys directly + // underneath this key first, before writing sub-structs or sub-maps. + var mapKeysDirect, mapKeysSub []string + for _, mapKey := range rv.MapKeys() { + k := mapKey.String() + if typeIsTable(tomlTypeOfGo(eindirect(rv.MapIndex(mapKey)))) { + mapKeysSub = append(mapKeysSub, k) + } else { + mapKeysDirect = append(mapKeysDirect, k) + } + } + + var writeMapKeys = func(mapKeys []string, trailC bool) { + sort.Strings(mapKeys) + for i, mapKey := range mapKeys { + val := eindirect(rv.MapIndex(reflect.ValueOf(mapKey))) + if isNil(val) { + continue + } + + if inline { + enc.writeKeyValue(Key{mapKey}, val, true) + if trailC || i != len(mapKeys)-1 { + enc.wf(", ") + } + } else { + enc.encode(key.add(mapKey), val) + } + } + } + + if inline { + enc.wf("{") + } + writeMapKeys(mapKeysDirect, len(mapKeysSub) > 0) + writeMapKeys(mapKeysSub, false) + if inline { + enc.wf("}") + } +} + +const is32Bit = (32 << (^uint(0) >> 63)) == 32 + +func pointerTo(t reflect.Type) reflect.Type { + if t.Kind() == reflect.Ptr { + return pointerTo(t.Elem()) + } + return t +} + +func (enc *Encoder) eStruct(key Key, rv reflect.Value, inline bool) { + // Write keys for fields directly under this key first, because if we write + // a field that creates a new table then all keys under it will be in that + // table (not the one we're writing here). + // + // Fields is a [][]int: for fieldsDirect this always has one entry (the + // struct index). For fieldsSub it contains two entries: the parent field + // index from tv, and the field indexes for the fields of the sub. + var ( + rt = rv.Type() + fieldsDirect, fieldsSub [][]int + addFields func(rt reflect.Type, rv reflect.Value, start []int) + ) + addFields = func(rt reflect.Type, rv reflect.Value, start []int) { + for i := 0; i < rt.NumField(); i++ { + f := rt.Field(i) + isEmbed := f.Anonymous && pointerTo(f.Type).Kind() == reflect.Struct + if f.PkgPath != "" && !isEmbed { /// Skip unexported fields. + continue + } + opts := getOptions(f.Tag) + if opts.skip { + continue + } + + frv := eindirect(rv.Field(i)) + + if is32Bit { + // Copy so it works correct on 32bit archs; not clear why this + // is needed. See #314, and https://www.reddit.com/r/golang/comments/pnx8v4 + // This also works fine on 64bit, but 32bit archs are somewhat + // rare and this is a wee bit faster. + copyStart := make([]int, len(start)) + copy(copyStart, start) + start = copyStart + } + + // Treat anonymous struct fields with tag names as though they are + // not anonymous, like encoding/json does. + // + // Non-struct anonymous fields use the normal encoding logic. + if isEmbed { + if getOptions(f.Tag).name == "" && frv.Kind() == reflect.Struct { + addFields(frv.Type(), frv, append(start, f.Index...)) + continue + } + } + + if typeIsTable(tomlTypeOfGo(frv)) { + fieldsSub = append(fieldsSub, append(start, f.Index...)) + } else { + fieldsDirect = append(fieldsDirect, append(start, f.Index...)) + } + } + } + addFields(rt, rv, nil) + + writeFields := func(fields [][]int) { + for _, fieldIndex := range fields { + fieldType := rt.FieldByIndex(fieldIndex) + fieldVal := rv.FieldByIndex(fieldIndex) + + opts := getOptions(fieldType.Tag) + if opts.skip { + continue + } + if opts.omitempty && isEmpty(fieldVal) { + continue + } + + fieldVal = eindirect(fieldVal) + + if isNil(fieldVal) { /// Don't write anything for nil fields. + continue + } + + keyName := fieldType.Name + if opts.name != "" { + keyName = opts.name + } + + if opts.omitzero && isZero(fieldVal) { + continue + } + + if inline { + enc.writeKeyValue(Key{keyName}, fieldVal, true) + if fieldIndex[0] != len(fields)-1 { + enc.wf(", ") + } + } else { + enc.encode(key.add(keyName), fieldVal) + } + } + } + + if inline { + enc.wf("{") + } + writeFields(fieldsDirect) + writeFields(fieldsSub) + if inline { + enc.wf("}") + } +} + +// tomlTypeOfGo returns the TOML type name of the Go value's type. +// +// It is used to determine whether the types of array elements are mixed (which +// is forbidden). If the Go value is nil, then it is illegal for it to be an +// array element, and valueIsNil is returned as true. +// +// The type may be `nil`, which means no concrete TOML type could be found. +func tomlTypeOfGo(rv reflect.Value) tomlType { + if isNil(rv) || !rv.IsValid() { + return nil + } + + if rv.Kind() == reflect.Struct { + if rv.Type() == timeType { + return tomlDatetime + } + if isMarshaler(rv) { + return tomlString + } + return tomlHash + } + + if isMarshaler(rv) { + return tomlString + } + + switch rv.Kind() { + case reflect.Bool: + return tomlBool + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, + reflect.Int64, + reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, + reflect.Uint64: + return tomlInteger + case reflect.Float32, reflect.Float64: + return tomlFloat + case reflect.Array, reflect.Slice: + if isTableArray(rv) { + return tomlArrayHash + } + return tomlArray + case reflect.Ptr, reflect.Interface: + return tomlTypeOfGo(rv.Elem()) + case reflect.String: + return tomlString + case reflect.Map: + return tomlHash + default: + encPanic(errors.New("unsupported type: " + rv.Kind().String())) + panic("unreachable") + } +} + +func isMarshaler(rv reflect.Value) bool { + return rv.Type().Implements(marshalText) || rv.Type().Implements(marshalToml) +} + +// isTableArray reports if all entries in the array or slice are a table. +func isTableArray(arr reflect.Value) bool { + if isNil(arr) || !arr.IsValid() || arr.Len() == 0 { + return false + } + + ret := true + for i := 0; i < arr.Len(); i++ { + tt := tomlTypeOfGo(eindirect(arr.Index(i))) + // Don't allow nil. + if tt == nil { + encPanic(errArrayNilElement) + } + + if ret && !typeEqual(tomlHash, tt) { + ret = false + } + } + return ret +} + +type tagOptions struct { + skip bool // "-" + name string + omitempty bool + omitzero bool +} + +func getOptions(tag reflect.StructTag) tagOptions { + t := tag.Get("toml") + if t == "-" { + return tagOptions{skip: true} + } + var opts tagOptions + parts := strings.Split(t, ",") + opts.name = parts[0] + for _, s := range parts[1:] { + switch s { + case "omitempty": + opts.omitempty = true + case "omitzero": + opts.omitzero = true + } + } + return opts +} + +func isZero(rv reflect.Value) bool { + switch rv.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return rv.Int() == 0 + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + return rv.Uint() == 0 + case reflect.Float32, reflect.Float64: + return rv.Float() == 0.0 + } + return false +} + +func isEmpty(rv reflect.Value) bool { + switch rv.Kind() { + case reflect.Array, reflect.Slice, reflect.Map, reflect.String: + return rv.Len() == 0 + case reflect.Struct: + if rv.Type().Comparable() { + return reflect.Zero(rv.Type()).Interface() == rv.Interface() + } + // Need to also check if all the fields are empty, otherwise something + // like this with uncomparable types will always return true: + // + // type a struct{ field b } + // type b struct{ s []string } + // s := a{field: b{s: []string{"AAA"}}} + for i := 0; i < rv.NumField(); i++ { + if !isEmpty(rv.Field(i)) { + return false + } + } + return true + case reflect.Bool: + return !rv.Bool() + case reflect.Ptr: + return rv.IsNil() + } + return false +} + +func (enc *Encoder) newline() { + if enc.hasWritten { + enc.wf("\n") + } +} + +// Write a key/value pair: +// +// key = +// +// This is also used for "k = v" in inline tables; so something like this will +// be written in three calls: +// +// ┌───────────────────┐ +// │ ┌───┐ ┌────┐│ +// v v v v vv +// key = {k = 1, k2 = 2} +func (enc *Encoder) writeKeyValue(key Key, val reflect.Value, inline bool) { + /// Marshaler used on top-level document; call eElement() to just call + /// Marshal{TOML,Text}. + if len(key) == 0 { + enc.eElement(val) + return + } + enc.wf("%s%s = ", enc.indentStr(key), key.maybeQuoted(len(key)-1)) + enc.eElement(val) + if !inline { + enc.newline() + } +} + +func (enc *Encoder) wf(format string, v ...any) { + _, err := fmt.Fprintf(enc.w, format, v...) + if err != nil { + encPanic(err) + } + enc.hasWritten = true +} + +func (enc *Encoder) indentStr(key Key) string { + return strings.Repeat(enc.Indent, len(key)-1) +} + +func encPanic(err error) { + panic(tomlEncodeError{err}) +} + +// Resolve any level of pointers to the actual value (e.g. **string → string). +func eindirect(v reflect.Value) reflect.Value { + if v.Kind() != reflect.Ptr && v.Kind() != reflect.Interface { + if isMarshaler(v) { + return v + } + if v.CanAddr() { /// Special case for marshalers; see #358. + if pv := v.Addr(); isMarshaler(pv) { + return pv + } + } + return v + } + + if v.IsNil() { + return v + } + + return eindirect(v.Elem()) +} + +func isNil(rv reflect.Value) bool { + switch rv.Kind() { + case reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice: + return rv.IsNil() + default: + return false + } +} diff --git a/cluster-provision/gocli/vendor/github.com/BurntSushi/toml/error.go b/cluster-provision/gocli/vendor/github.com/BurntSushi/toml/error.go new file mode 100644 index 0000000000..b45a3f45f6 --- /dev/null +++ b/cluster-provision/gocli/vendor/github.com/BurntSushi/toml/error.go @@ -0,0 +1,356 @@ +package toml + +import ( + "fmt" + "strings" +) + +// ParseError is returned when there is an error parsing the TOML syntax such as +// invalid syntax, duplicate keys, etc. +// +// In addition to the error message itself, you can also print detailed location +// information with context by using [ErrorWithPosition]: +// +// toml: error: Key 'fruit' was already created and cannot be used as an array. +// +// At line 4, column 2-7: +// +// 2 | fruit = [] +// 3 | +// 4 | [[fruit]] # Not allowed +// ^^^^^ +// +// [ErrorWithUsage] can be used to print the above with some more detailed usage +// guidance: +// +// toml: error: newlines not allowed within inline tables +// +// At line 1, column 18: +// +// 1 | x = [{ key = 42 # +// ^ +// +// Error help: +// +// Inline tables must always be on a single line: +// +// table = {key = 42, second = 43} +// +// It is invalid to split them over multiple lines like so: +// +// # INVALID +// table = { +// key = 42, +// second = 43 +// } +// +// Use regular for this: +// +// [table] +// key = 42 +// second = 43 +type ParseError struct { + Message string // Short technical message. + Usage string // Longer message with usage guidance; may be blank. + Position Position // Position of the error + LastKey string // Last parsed key, may be blank. + + // Line the error occurred. + // + // Deprecated: use [Position]. + Line int + + err error + input string +} + +// Position of an error. +type Position struct { + Line int // Line number, starting at 1. + Start int // Start of error, as byte offset starting at 0. + Len int // Lenght in bytes. +} + +func (pe ParseError) Error() string { + msg := pe.Message + if msg == "" { // Error from errorf() + msg = pe.err.Error() + } + + if pe.LastKey == "" { + return fmt.Sprintf("toml: line %d: %s", pe.Position.Line, msg) + } + return fmt.Sprintf("toml: line %d (last key %q): %s", + pe.Position.Line, pe.LastKey, msg) +} + +// ErrorWithPosition returns the error with detailed location context. +// +// See the documentation on [ParseError]. +func (pe ParseError) ErrorWithPosition() string { + if pe.input == "" { // Should never happen, but just in case. + return pe.Error() + } + + var ( + lines = strings.Split(pe.input, "\n") + col = pe.column(lines) + b = new(strings.Builder) + ) + + msg := pe.Message + if msg == "" { + msg = pe.err.Error() + } + + // TODO: don't show control characters as literals? This may not show up + // well everywhere. + + if pe.Position.Len == 1 { + fmt.Fprintf(b, "toml: error: %s\n\nAt line %d, column %d:\n\n", + msg, pe.Position.Line, col+1) + } else { + fmt.Fprintf(b, "toml: error: %s\n\nAt line %d, column %d-%d:\n\n", + msg, pe.Position.Line, col, col+pe.Position.Len) + } + if pe.Position.Line > 2 { + fmt.Fprintf(b, "% 7d | %s\n", pe.Position.Line-2, expandTab(lines[pe.Position.Line-3])) + } + if pe.Position.Line > 1 { + fmt.Fprintf(b, "% 7d | %s\n", pe.Position.Line-1, expandTab(lines[pe.Position.Line-2])) + } + + /// Expand tabs, so that the ^^^s are at the correct position, but leave + /// "column 10-13" intact. Adjusting this to the visual column would be + /// better, but we don't know the tabsize of the user in their editor, which + /// can be 8, 4, 2, or something else. We can't know. So leaving it as the + /// character index is probably the "most correct". + expanded := expandTab(lines[pe.Position.Line-1]) + diff := len(expanded) - len(lines[pe.Position.Line-1]) + + fmt.Fprintf(b, "% 7d | %s\n", pe.Position.Line, expanded) + fmt.Fprintf(b, "% 10s%s%s\n", "", strings.Repeat(" ", col+diff), strings.Repeat("^", pe.Position.Len)) + return b.String() +} + +// ErrorWithUsage returns the error with detailed location context and usage +// guidance. +// +// See the documentation on [ParseError]. +func (pe ParseError) ErrorWithUsage() string { + m := pe.ErrorWithPosition() + if u, ok := pe.err.(interface{ Usage() string }); ok && u.Usage() != "" { + lines := strings.Split(strings.TrimSpace(u.Usage()), "\n") + for i := range lines { + if lines[i] != "" { + lines[i] = " " + lines[i] + } + } + return m + "Error help:\n\n" + strings.Join(lines, "\n") + "\n" + } + return m +} + +func (pe ParseError) column(lines []string) int { + var pos, col int + for i := range lines { + ll := len(lines[i]) + 1 // +1 for the removed newline + if pos+ll >= pe.Position.Start { + col = pe.Position.Start - pos + if col < 0 { // Should never happen, but just in case. + col = 0 + } + break + } + pos += ll + } + + return col +} + +func expandTab(s string) string { + var ( + b strings.Builder + l int + fill = func(n int) string { + b := make([]byte, n) + for i := range b { + b[i] = ' ' + } + return string(b) + } + ) + b.Grow(len(s)) + for _, r := range s { + switch r { + case '\t': + tw := 8 - l%8 + b.WriteString(fill(tw)) + l += tw + default: + b.WriteRune(r) + l += 1 + } + } + return b.String() +} + +type ( + errLexControl struct{ r rune } + errLexEscape struct{ r rune } + errLexUTF8 struct{ b byte } + errParseDate struct{ v string } + errLexInlineTableNL struct{} + errLexStringNL struct{} + errParseRange struct { + i any // int or float + size string // "int64", "uint16", etc. + } + errUnsafeFloat struct { + i interface{} // float32 or float64 + size string // "float32" or "float64" + } + errParseDuration struct{ d string } +) + +func (e errLexControl) Error() string { + return fmt.Sprintf("TOML files cannot contain control characters: '0x%02x'", e.r) +} +func (e errLexControl) Usage() string { return "" } + +func (e errLexEscape) Error() string { return fmt.Sprintf(`invalid escape in string '\%c'`, e.r) } +func (e errLexEscape) Usage() string { return usageEscape } +func (e errLexUTF8) Error() string { return fmt.Sprintf("invalid UTF-8 byte: 0x%02x", e.b) } +func (e errLexUTF8) Usage() string { return "" } +func (e errParseDate) Error() string { return fmt.Sprintf("invalid datetime: %q", e.v) } +func (e errParseDate) Usage() string { return usageDate } +func (e errLexInlineTableNL) Error() string { return "newlines not allowed within inline tables" } +func (e errLexInlineTableNL) Usage() string { return usageInlineNewline } +func (e errLexStringNL) Error() string { return "strings cannot contain newlines" } +func (e errLexStringNL) Usage() string { return usageStringNewline } +func (e errParseRange) Error() string { return fmt.Sprintf("%v is out of range for %s", e.i, e.size) } +func (e errParseRange) Usage() string { return usageIntOverflow } +func (e errUnsafeFloat) Error() string { + return fmt.Sprintf("%v is out of the safe %s range", e.i, e.size) +} +func (e errUnsafeFloat) Usage() string { return usageUnsafeFloat } +func (e errParseDuration) Error() string { return fmt.Sprintf("invalid duration: %q", e.d) } +func (e errParseDuration) Usage() string { return usageDuration } + +const usageEscape = ` +A '\' inside a "-delimited string is interpreted as an escape character. + +The following escape sequences are supported: +\b, \t, \n, \f, \r, \", \\, \uXXXX, and \UXXXXXXXX + +To prevent a '\' from being recognized as an escape character, use either: + +- a ' or '''-delimited string; escape characters aren't processed in them; or +- write two backslashes to get a single backslash: '\\'. + +If you're trying to add a Windows path (e.g. "C:\Users\martin") then using '/' +instead of '\' will usually also work: "C:/Users/martin". +` + +const usageInlineNewline = ` +Inline tables must always be on a single line: + + table = {key = 42, second = 43} + +It is invalid to split them over multiple lines like so: + + # INVALID + table = { + key = 42, + second = 43 + } + +Use regular for this: + + [table] + key = 42 + second = 43 +` + +const usageStringNewline = ` +Strings must always be on a single line, and cannot span more than one line: + + # INVALID + string = "Hello, + world!" + +Instead use """ or ''' to split strings over multiple lines: + + string = """Hello, + world!""" +` + +const usageIntOverflow = ` +This number is too large; this may be an error in the TOML, but it can also be a +bug in the program that uses too small of an integer. + +The maximum and minimum values are: + + size │ lowest │ highest + ───────┼────────────────┼────────────── + int8 │ -128 │ 127 + int16 │ -32,768 │ 32,767 + int32 │ -2,147,483,648 │ 2,147,483,647 + int64 │ -9.2 × 10¹⁷ │ 9.2 × 10¹⁷ + uint8 │ 0 │ 255 + uint16 │ 0 │ 65,535 + uint32 │ 0 │ 4,294,967,295 + uint64 │ 0 │ 1.8 × 10¹⁸ + +int refers to int32 on 32-bit systems and int64 on 64-bit systems. +` + +const usageUnsafeFloat = ` +This number is outside of the "safe" range for floating point numbers; whole +(non-fractional) numbers outside the below range can not always be represented +accurately in a float, leading to some loss of accuracy. + +Explicitly mark a number as a fractional unit by adding ".0", which will incur +some loss of accuracy; for example: + + f = 2_000_000_000.0 + +Accuracy ranges: + + float32 = 16,777,215 + float64 = 9,007,199,254,740,991 +` + +const usageDuration = ` +A duration must be as "number", without any spaces. Valid units are: + + ns nanoseconds (billionth of a second) + us, µs microseconds (millionth of a second) + ms milliseconds (thousands of a second) + s seconds + m minutes + h hours + +You can combine multiple units; for example "5m10s" for 5 minutes and 10 +seconds. +` + +const usageDate = ` +A TOML datetime must be in one of the following formats: + + 2006-01-02T15:04:05Z07:00 Date and time, with timezone. + 2006-01-02T15:04:05 Date and time, but without timezone. + 2006-01-02 Date without a time or timezone. + 15:04:05 Just a time, without any timezone. + +Seconds may optionally have a fraction, up to nanosecond precision: + + 15:04:05.123 + 15:04:05.856018510 +` + +// TOML 1.1: +// The seconds part in times is optional, and may be omitted: +// 2006-01-02T15:04Z07:00 +// 2006-01-02T15:04 +// 15:04 diff --git a/cluster-provision/gocli/vendor/github.com/BurntSushi/toml/internal/tz.go b/cluster-provision/gocli/vendor/github.com/BurntSushi/toml/internal/tz.go new file mode 100644 index 0000000000..022f15bc2b --- /dev/null +++ b/cluster-provision/gocli/vendor/github.com/BurntSushi/toml/internal/tz.go @@ -0,0 +1,36 @@ +package internal + +import "time" + +// Timezones used for local datetime, date, and time TOML types. +// +// The exact way times and dates without a timezone should be interpreted is not +// well-defined in the TOML specification and left to the implementation. These +// defaults to current local timezone offset of the computer, but this can be +// changed by changing these variables before decoding. +// +// TODO: +// Ideally we'd like to offer people the ability to configure the used timezone +// by setting Decoder.Timezone and Encoder.Timezone; however, this is a bit +// tricky: the reason we use three different variables for this is to support +// round-tripping – without these specific TZ names we wouldn't know which +// format to use. +// +// There isn't a good way to encode this right now though, and passing this sort +// of information also ties in to various related issues such as string format +// encoding, encoding of comments, etc. +// +// So, for the time being, just put this in internal until we can write a good +// comprehensive API for doing all of this. +// +// The reason they're exported is because they're referred from in e.g. +// internal/tag. +// +// Note that this behaviour is valid according to the TOML spec as the exact +// behaviour is left up to implementations. +var ( + localOffset = func() int { _, o := time.Now().Zone(); return o }() + LocalDatetime = time.FixedZone("datetime-local", localOffset) + LocalDate = time.FixedZone("date-local", localOffset) + LocalTime = time.FixedZone("time-local", localOffset) +) diff --git a/cluster-provision/gocli/vendor/github.com/BurntSushi/toml/lex.go b/cluster-provision/gocli/vendor/github.com/BurntSushi/toml/lex.go new file mode 100644 index 0000000000..a1016d98a8 --- /dev/null +++ b/cluster-provision/gocli/vendor/github.com/BurntSushi/toml/lex.go @@ -0,0 +1,1281 @@ +package toml + +import ( + "fmt" + "reflect" + "runtime" + "strings" + "unicode" + "unicode/utf8" +) + +type itemType int + +const ( + itemError itemType = iota + itemNIL // used in the parser to indicate no type + itemEOF + itemText + itemString + itemStringEsc + itemRawString + itemMultilineString + itemRawMultilineString + itemBool + itemInteger + itemFloat + itemDatetime + itemArray // the start of an array + itemArrayEnd + itemTableStart + itemTableEnd + itemArrayTableStart + itemArrayTableEnd + itemKeyStart + itemKeyEnd + itemCommentStart + itemInlineTableStart + itemInlineTableEnd +) + +const eof = 0 + +type stateFn func(lx *lexer) stateFn + +func (p Position) String() string { + return fmt.Sprintf("at line %d; start %d; length %d", p.Line, p.Start, p.Len) +} + +type lexer struct { + input string + start int + pos int + line int + state stateFn + items chan item + tomlNext bool + esc bool + + // Allow for backing up up to 4 runes. This is necessary because TOML + // contains 3-rune tokens (""" and '''). + prevWidths [4]int + nprev int // how many of prevWidths are in use + atEOF bool // If we emit an eof, we can still back up, but it is not OK to call next again. + + // A stack of state functions used to maintain context. + // + // The idea is to reuse parts of the state machine in various places. For + // example, values can appear at the top level or within arbitrarily nested + // arrays. The last state on the stack is used after a value has been lexed. + // Similarly for comments. + stack []stateFn +} + +type item struct { + typ itemType + val string + err error + pos Position +} + +func (lx *lexer) nextItem() item { + for { + select { + case item := <-lx.items: + return item + default: + lx.state = lx.state(lx) + //fmt.Printf(" STATE %-24s current: %-10s stack: %s\n", lx.state, lx.current(), lx.stack) + } + } +} + +func lex(input string, tomlNext bool) *lexer { + lx := &lexer{ + input: input, + state: lexTop, + items: make(chan item, 10), + stack: make([]stateFn, 0, 10), + line: 1, + tomlNext: tomlNext, + } + return lx +} + +func (lx *lexer) push(state stateFn) { + lx.stack = append(lx.stack, state) +} + +func (lx *lexer) pop() stateFn { + if len(lx.stack) == 0 { + return lx.errorf("BUG in lexer: no states to pop") + } + last := lx.stack[len(lx.stack)-1] + lx.stack = lx.stack[0 : len(lx.stack)-1] + return last +} + +func (lx *lexer) current() string { + return lx.input[lx.start:lx.pos] +} + +func (lx lexer) getPos() Position { + p := Position{ + Line: lx.line, + Start: lx.start, + Len: lx.pos - lx.start, + } + if p.Len <= 0 { + p.Len = 1 + } + return p +} + +func (lx *lexer) emit(typ itemType) { + // Needed for multiline strings ending with an incomplete UTF-8 sequence. + if lx.start > lx.pos { + lx.error(errLexUTF8{lx.input[lx.pos]}) + return + } + lx.items <- item{typ: typ, pos: lx.getPos(), val: lx.current()} + lx.start = lx.pos +} + +func (lx *lexer) emitTrim(typ itemType) { + lx.items <- item{typ: typ, pos: lx.getPos(), val: strings.TrimSpace(lx.current())} + lx.start = lx.pos +} + +func (lx *lexer) next() (r rune) { + if lx.atEOF { + panic("BUG in lexer: next called after EOF") + } + if lx.pos >= len(lx.input) { + lx.atEOF = true + return eof + } + + if lx.input[lx.pos] == '\n' { + lx.line++ + } + lx.prevWidths[3] = lx.prevWidths[2] + lx.prevWidths[2] = lx.prevWidths[1] + lx.prevWidths[1] = lx.prevWidths[0] + if lx.nprev < 4 { + lx.nprev++ + } + + r, w := utf8.DecodeRuneInString(lx.input[lx.pos:]) + if r == utf8.RuneError && w == 1 { + lx.error(errLexUTF8{lx.input[lx.pos]}) + return utf8.RuneError + } + + // Note: don't use peek() here, as this calls next(). + if isControl(r) || (r == '\r' && (len(lx.input)-1 == lx.pos || lx.input[lx.pos+1] != '\n')) { + lx.errorControlChar(r) + return utf8.RuneError + } + + lx.prevWidths[0] = w + lx.pos += w + return r +} + +// ignore skips over the pending input before this point. +func (lx *lexer) ignore() { + lx.start = lx.pos +} + +// backup steps back one rune. Can be called 4 times between calls to next. +func (lx *lexer) backup() { + if lx.atEOF { + lx.atEOF = false + return + } + if lx.nprev < 1 { + panic("BUG in lexer: backed up too far") + } + w := lx.prevWidths[0] + lx.prevWidths[0] = lx.prevWidths[1] + lx.prevWidths[1] = lx.prevWidths[2] + lx.prevWidths[2] = lx.prevWidths[3] + lx.nprev-- + + lx.pos -= w + if lx.pos < len(lx.input) && lx.input[lx.pos] == '\n' { + lx.line-- + } +} + +// accept consumes the next rune if it's equal to `valid`. +func (lx *lexer) accept(valid rune) bool { + if lx.next() == valid { + return true + } + lx.backup() + return false +} + +// peek returns but does not consume the next rune in the input. +func (lx *lexer) peek() rune { + r := lx.next() + lx.backup() + return r +} + +// skip ignores all input that matches the given predicate. +func (lx *lexer) skip(pred func(rune) bool) { + for { + r := lx.next() + if pred(r) { + continue + } + lx.backup() + lx.ignore() + return + } +} + +// error stops all lexing by emitting an error and returning `nil`. +// +// Note that any value that is a character is escaped if it's a special +// character (newlines, tabs, etc.). +func (lx *lexer) error(err error) stateFn { + if lx.atEOF { + return lx.errorPrevLine(err) + } + lx.items <- item{typ: itemError, pos: lx.getPos(), err: err} + return nil +} + +// errorfPrevline is like error(), but sets the position to the last column of +// the previous line. +// +// This is so that unexpected EOF or NL errors don't show on a new blank line. +func (lx *lexer) errorPrevLine(err error) stateFn { + pos := lx.getPos() + pos.Line-- + pos.Len = 1 + pos.Start = lx.pos - 1 + lx.items <- item{typ: itemError, pos: pos, err: err} + return nil +} + +// errorPos is like error(), but allows explicitly setting the position. +func (lx *lexer) errorPos(start, length int, err error) stateFn { + pos := lx.getPos() + pos.Start = start + pos.Len = length + lx.items <- item{typ: itemError, pos: pos, err: err} + return nil +} + +// errorf is like error, and creates a new error. +func (lx *lexer) errorf(format string, values ...any) stateFn { + if lx.atEOF { + pos := lx.getPos() + pos.Line-- + pos.Len = 1 + pos.Start = lx.pos - 1 + lx.items <- item{typ: itemError, pos: pos, err: fmt.Errorf(format, values...)} + return nil + } + lx.items <- item{typ: itemError, pos: lx.getPos(), err: fmt.Errorf(format, values...)} + return nil +} + +func (lx *lexer) errorControlChar(cc rune) stateFn { + return lx.errorPos(lx.pos-1, 1, errLexControl{cc}) +} + +// lexTop consumes elements at the top level of TOML data. +func lexTop(lx *lexer) stateFn { + r := lx.next() + if isWhitespace(r) || isNL(r) { + return lexSkip(lx, lexTop) + } + switch r { + case '#': + lx.push(lexTop) + return lexCommentStart + case '[': + return lexTableStart + case eof: + if lx.pos > lx.start { + return lx.errorf("unexpected EOF") + } + lx.emit(itemEOF) + return nil + } + + // At this point, the only valid item can be a key, so we back up + // and let the key lexer do the rest. + lx.backup() + lx.push(lexTopEnd) + return lexKeyStart +} + +// lexTopEnd is entered whenever a top-level item has been consumed. (A value +// or a table.) It must see only whitespace, and will turn back to lexTop +// upon a newline. If it sees EOF, it will quit the lexer successfully. +func lexTopEnd(lx *lexer) stateFn { + r := lx.next() + switch { + case r == '#': + // a comment will read to a newline for us. + lx.push(lexTop) + return lexCommentStart + case isWhitespace(r): + return lexTopEnd + case isNL(r): + lx.ignore() + return lexTop + case r == eof: + lx.emit(itemEOF) + return nil + } + return lx.errorf("expected a top-level item to end with a newline, comment, or EOF, but got %q instead", r) +} + +// lexTable lexes the beginning of a table. Namely, it makes sure that +// it starts with a character other than '.' and ']'. +// It assumes that '[' has already been consumed. +// It also handles the case that this is an item in an array of tables. +// e.g., '[[name]]'. +func lexTableStart(lx *lexer) stateFn { + if lx.peek() == '[' { + lx.next() + lx.emit(itemArrayTableStart) + lx.push(lexArrayTableEnd) + } else { + lx.emit(itemTableStart) + lx.push(lexTableEnd) + } + return lexTableNameStart +} + +func lexTableEnd(lx *lexer) stateFn { + lx.emit(itemTableEnd) + return lexTopEnd +} + +func lexArrayTableEnd(lx *lexer) stateFn { + if r := lx.next(); r != ']' { + return lx.errorf("expected end of table array name delimiter ']', but got %q instead", r) + } + lx.emit(itemArrayTableEnd) + return lexTopEnd +} + +func lexTableNameStart(lx *lexer) stateFn { + lx.skip(isWhitespace) + switch r := lx.peek(); { + case r == ']' || r == eof: + return lx.errorf("unexpected end of table name (table names cannot be empty)") + case r == '.': + return lx.errorf("unexpected table separator (table names cannot be empty)") + case r == '"' || r == '\'': + lx.ignore() + lx.push(lexTableNameEnd) + return lexQuotedName + default: + lx.push(lexTableNameEnd) + return lexBareName + } +} + +// lexTableNameEnd reads the end of a piece of a table name, optionally +// consuming whitespace. +func lexTableNameEnd(lx *lexer) stateFn { + lx.skip(isWhitespace) + switch r := lx.next(); { + case isWhitespace(r): + return lexTableNameEnd + case r == '.': + lx.ignore() + return lexTableNameStart + case r == ']': + return lx.pop() + default: + return lx.errorf("expected '.' or ']' to end table name, but got %q instead", r) + } +} + +// lexBareName lexes one part of a key or table. +// +// It assumes that at least one valid character for the table has already been +// read. +// +// Lexes only one part, e.g. only 'a' inside 'a.b'. +func lexBareName(lx *lexer) stateFn { + r := lx.next() + if isBareKeyChar(r, lx.tomlNext) { + return lexBareName + } + lx.backup() + lx.emit(itemText) + return lx.pop() +} + +// lexBareName lexes one part of a key or table. +// +// It assumes that at least one valid character for the table has already been +// read. +// +// Lexes only one part, e.g. only '"a"' inside '"a".b'. +func lexQuotedName(lx *lexer) stateFn { + r := lx.next() + switch { + case isWhitespace(r): + return lexSkip(lx, lexValue) + case r == '"': + lx.ignore() // ignore the '"' + return lexString + case r == '\'': + lx.ignore() // ignore the "'" + return lexRawString + case r == eof: + return lx.errorf("unexpected EOF; expected value") + default: + return lx.errorf("expected value but found %q instead", r) + } +} + +// lexKeyStart consumes all key parts until a '='. +func lexKeyStart(lx *lexer) stateFn { + lx.skip(isWhitespace) + switch r := lx.peek(); { + case r == '=' || r == eof: + return lx.errorf("unexpected '=': key name appears blank") + case r == '.': + return lx.errorf("unexpected '.': keys cannot start with a '.'") + case r == '"' || r == '\'': + lx.ignore() + fallthrough + default: // Bare key + lx.emit(itemKeyStart) + return lexKeyNameStart + } +} + +func lexKeyNameStart(lx *lexer) stateFn { + lx.skip(isWhitespace) + switch r := lx.peek(); { + case r == '=' || r == eof: + return lx.errorf("unexpected '='") + case r == '.': + return lx.errorf("unexpected '.'") + case r == '"' || r == '\'': + lx.ignore() + lx.push(lexKeyEnd) + return lexQuotedName + default: + lx.push(lexKeyEnd) + return lexBareName + } +} + +// lexKeyEnd consumes the end of a key and trims whitespace (up to the key +// separator). +func lexKeyEnd(lx *lexer) stateFn { + lx.skip(isWhitespace) + switch r := lx.next(); { + case isWhitespace(r): + return lexSkip(lx, lexKeyEnd) + case r == eof: + return lx.errorf("unexpected EOF; expected key separator '='") + case r == '.': + lx.ignore() + return lexKeyNameStart + case r == '=': + lx.emit(itemKeyEnd) + return lexSkip(lx, lexValue) + default: + return lx.errorf("expected '.' or '=', but got %q instead", r) + } +} + +// lexValue starts the consumption of a value anywhere a value is expected. +// lexValue will ignore whitespace. +// After a value is lexed, the last state on the next is popped and returned. +func lexValue(lx *lexer) stateFn { + // We allow whitespace to precede a value, but NOT newlines. + // In array syntax, the array states are responsible for ignoring newlines. + r := lx.next() + switch { + case isWhitespace(r): + return lexSkip(lx, lexValue) + case isDigit(r): + lx.backup() // avoid an extra state and use the same as above + return lexNumberOrDateStart + } + switch r { + case '[': + lx.ignore() + lx.emit(itemArray) + return lexArrayValue + case '{': + lx.ignore() + lx.emit(itemInlineTableStart) + return lexInlineTableValue + case '"': + if lx.accept('"') { + if lx.accept('"') { + lx.ignore() // Ignore """ + return lexMultilineString + } + lx.backup() + } + lx.ignore() // ignore the '"' + return lexString + case '\'': + if lx.accept('\'') { + if lx.accept('\'') { + lx.ignore() // Ignore """ + return lexMultilineRawString + } + lx.backup() + } + lx.ignore() // ignore the "'" + return lexRawString + case '.': // special error case, be kind to users + return lx.errorf("floats must start with a digit, not '.'") + case 'i', 'n': + if (lx.accept('n') && lx.accept('f')) || (lx.accept('a') && lx.accept('n')) { + lx.emit(itemFloat) + return lx.pop() + } + case '-', '+': + return lexDecimalNumberStart + } + if unicode.IsLetter(r) { + // Be permissive here; lexBool will give a nice error if the + // user wrote something like + // x = foo + // (i.e. not 'true' or 'false' but is something else word-like.) + lx.backup() + return lexBool + } + if r == eof { + return lx.errorf("unexpected EOF; expected value") + } + return lx.errorf("expected value but found %q instead", r) +} + +// lexArrayValue consumes one value in an array. It assumes that '[' or ',' +// have already been consumed. All whitespace and newlines are ignored. +func lexArrayValue(lx *lexer) stateFn { + r := lx.next() + switch { + case isWhitespace(r) || isNL(r): + return lexSkip(lx, lexArrayValue) + case r == '#': + lx.push(lexArrayValue) + return lexCommentStart + case r == ',': + return lx.errorf("unexpected comma") + case r == ']': + return lexArrayEnd + } + + lx.backup() + lx.push(lexArrayValueEnd) + return lexValue +} + +// lexArrayValueEnd consumes everything between the end of an array value and +// the next value (or the end of the array): it ignores whitespace and newlines +// and expects either a ',' or a ']'. +func lexArrayValueEnd(lx *lexer) stateFn { + switch r := lx.next(); { + case isWhitespace(r) || isNL(r): + return lexSkip(lx, lexArrayValueEnd) + case r == '#': + lx.push(lexArrayValueEnd) + return lexCommentStart + case r == ',': + lx.ignore() + return lexArrayValue // move on to the next value + case r == ']': + return lexArrayEnd + default: + return lx.errorf("expected a comma (',') or array terminator (']'), but got %s", runeOrEOF(r)) + } +} + +// lexArrayEnd finishes the lexing of an array. +// It assumes that a ']' has just been consumed. +func lexArrayEnd(lx *lexer) stateFn { + lx.ignore() + lx.emit(itemArrayEnd) + return lx.pop() +} + +// lexInlineTableValue consumes one key/value pair in an inline table. +// It assumes that '{' or ',' have already been consumed. Whitespace is ignored. +func lexInlineTableValue(lx *lexer) stateFn { + r := lx.next() + switch { + case isWhitespace(r): + return lexSkip(lx, lexInlineTableValue) + case isNL(r): + if lx.tomlNext { + return lexSkip(lx, lexInlineTableValue) + } + return lx.errorPrevLine(errLexInlineTableNL{}) + case r == '#': + lx.push(lexInlineTableValue) + return lexCommentStart + case r == ',': + return lx.errorf("unexpected comma") + case r == '}': + return lexInlineTableEnd + } + lx.backup() + lx.push(lexInlineTableValueEnd) + return lexKeyStart +} + +// lexInlineTableValueEnd consumes everything between the end of an inline table +// key/value pair and the next pair (or the end of the table): +// it ignores whitespace and expects either a ',' or a '}'. +func lexInlineTableValueEnd(lx *lexer) stateFn { + switch r := lx.next(); { + case isWhitespace(r): + return lexSkip(lx, lexInlineTableValueEnd) + case isNL(r): + if lx.tomlNext { + return lexSkip(lx, lexInlineTableValueEnd) + } + return lx.errorPrevLine(errLexInlineTableNL{}) + case r == '#': + lx.push(lexInlineTableValueEnd) + return lexCommentStart + case r == ',': + lx.ignore() + lx.skip(isWhitespace) + if lx.peek() == '}' { + if lx.tomlNext { + return lexInlineTableValueEnd + } + return lx.errorf("trailing comma not allowed in inline tables") + } + return lexInlineTableValue + case r == '}': + return lexInlineTableEnd + default: + return lx.errorf("expected a comma or an inline table terminator '}', but got %s instead", runeOrEOF(r)) + } +} + +func runeOrEOF(r rune) string { + if r == eof { + return "end of file" + } + return "'" + string(r) + "'" +} + +// lexInlineTableEnd finishes the lexing of an inline table. +// It assumes that a '}' has just been consumed. +func lexInlineTableEnd(lx *lexer) stateFn { + lx.ignore() + lx.emit(itemInlineTableEnd) + return lx.pop() +} + +// lexString consumes the inner contents of a string. It assumes that the +// beginning '"' has already been consumed and ignored. +func lexString(lx *lexer) stateFn { + r := lx.next() + switch { + case r == eof: + return lx.errorf(`unexpected EOF; expected '"'`) + case isNL(r): + return lx.errorPrevLine(errLexStringNL{}) + case r == '\\': + lx.push(lexString) + return lexStringEscape + case r == '"': + lx.backup() + if lx.esc { + lx.esc = false + lx.emit(itemStringEsc) + } else { + lx.emit(itemString) + } + lx.next() + lx.ignore() + return lx.pop() + } + return lexString +} + +// lexMultilineString consumes the inner contents of a string. It assumes that +// the beginning '"""' has already been consumed and ignored. +func lexMultilineString(lx *lexer) stateFn { + r := lx.next() + switch r { + default: + return lexMultilineString + case eof: + return lx.errorf(`unexpected EOF; expected '"""'`) + case '\\': + return lexMultilineStringEscape + case '"': + /// Found " → try to read two more "". + if lx.accept('"') { + if lx.accept('"') { + /// Peek ahead: the string can contain " and "", including at the + /// end: """str""""" + /// 6 or more at the end, however, is an error. + if lx.peek() == '"' { + /// Check if we already lexed 5 's; if so we have 6 now, and + /// that's just too many man! + /// + /// Second check is for the edge case: + /// + /// two quotes allowed. + /// vv + /// """lol \"""""" + /// ^^ ^^^---- closing three + /// escaped + /// + /// But ugly, but it works + if strings.HasSuffix(lx.current(), `"""""`) && !strings.HasSuffix(lx.current(), `\"""""`) { + return lx.errorf(`unexpected '""""""'`) + } + lx.backup() + lx.backup() + return lexMultilineString + } + + lx.backup() /// backup: don't include the """ in the item. + lx.backup() + lx.backup() + lx.esc = false + lx.emit(itemMultilineString) + lx.next() /// Read over ''' again and discard it. + lx.next() + lx.next() + lx.ignore() + return lx.pop() + } + lx.backup() + } + return lexMultilineString + } +} + +// lexRawString consumes a raw string. Nothing can be escaped in such a string. +// It assumes that the beginning "'" has already been consumed and ignored. +func lexRawString(lx *lexer) stateFn { + r := lx.next() + switch { + default: + return lexRawString + case r == eof: + return lx.errorf(`unexpected EOF; expected "'"`) + case isNL(r): + return lx.errorPrevLine(errLexStringNL{}) + case r == '\'': + lx.backup() + lx.emit(itemRawString) + lx.next() + lx.ignore() + return lx.pop() + } +} + +// lexMultilineRawString consumes a raw string. Nothing can be escaped in such a +// string. It assumes that the beginning triple-' has already been consumed and +// ignored. +func lexMultilineRawString(lx *lexer) stateFn { + r := lx.next() + switch r { + default: + return lexMultilineRawString + case eof: + return lx.errorf(`unexpected EOF; expected "'''"`) + case '\'': + /// Found ' → try to read two more ''. + if lx.accept('\'') { + if lx.accept('\'') { + /// Peek ahead: the string can contain ' and '', including at the + /// end: '''str''''' + /// 6 or more at the end, however, is an error. + if lx.peek() == '\'' { + /// Check if we already lexed 5 's; if so we have 6 now, and + /// that's just too many man! + if strings.HasSuffix(lx.current(), "'''''") { + return lx.errorf(`unexpected "''''''"`) + } + lx.backup() + lx.backup() + return lexMultilineRawString + } + + lx.backup() /// backup: don't include the ''' in the item. + lx.backup() + lx.backup() + lx.emit(itemRawMultilineString) + lx.next() /// Read over ''' again and discard it. + lx.next() + lx.next() + lx.ignore() + return lx.pop() + } + lx.backup() + } + return lexMultilineRawString + } +} + +// lexMultilineStringEscape consumes an escaped character. It assumes that the +// preceding '\\' has already been consumed. +func lexMultilineStringEscape(lx *lexer) stateFn { + if isNL(lx.next()) { /// \ escaping newline. + return lexMultilineString + } + lx.backup() + lx.push(lexMultilineString) + return lexStringEscape(lx) +} + +func lexStringEscape(lx *lexer) stateFn { + lx.esc = true + r := lx.next() + switch r { + case 'e': + if !lx.tomlNext { + return lx.error(errLexEscape{r}) + } + fallthrough + case 'b': + fallthrough + case 't': + fallthrough + case 'n': + fallthrough + case 'f': + fallthrough + case 'r': + fallthrough + case '"': + fallthrough + case ' ', '\t': + // Inside """ .. """ strings you can use \ to escape newlines, and any + // amount of whitespace can be between the \ and \n. + fallthrough + case '\\': + return lx.pop() + case 'x': + if !lx.tomlNext { + return lx.error(errLexEscape{r}) + } + return lexHexEscape + case 'u': + return lexShortUnicodeEscape + case 'U': + return lexLongUnicodeEscape + } + return lx.error(errLexEscape{r}) +} + +func lexHexEscape(lx *lexer) stateFn { + var r rune + for i := 0; i < 2; i++ { + r = lx.next() + if !isHex(r) { + return lx.errorf(`expected two hexadecimal digits after '\x', but got %q instead`, lx.current()) + } + } + return lx.pop() +} + +func lexShortUnicodeEscape(lx *lexer) stateFn { + var r rune + for i := 0; i < 4; i++ { + r = lx.next() + if !isHex(r) { + return lx.errorf(`expected four hexadecimal digits after '\u', but got %q instead`, lx.current()) + } + } + return lx.pop() +} + +func lexLongUnicodeEscape(lx *lexer) stateFn { + var r rune + for i := 0; i < 8; i++ { + r = lx.next() + if !isHex(r) { + return lx.errorf(`expected eight hexadecimal digits after '\U', but got %q instead`, lx.current()) + } + } + return lx.pop() +} + +// lexNumberOrDateStart processes the first character of a value which begins +// with a digit. It exists to catch values starting with '0', so that +// lexBaseNumberOrDate can differentiate base prefixed integers from other +// types. +func lexNumberOrDateStart(lx *lexer) stateFn { + r := lx.next() + switch r { + case '0': + return lexBaseNumberOrDate + } + + if !isDigit(r) { + // The only way to reach this state is if the value starts + // with a digit, so specifically treat anything else as an + // error. + return lx.errorf("expected a digit but got %q", r) + } + + return lexNumberOrDate +} + +// lexNumberOrDate consumes either an integer, float or datetime. +func lexNumberOrDate(lx *lexer) stateFn { + r := lx.next() + if isDigit(r) { + return lexNumberOrDate + } + switch r { + case '-', ':': + return lexDatetime + case '_': + return lexDecimalNumber + case '.', 'e', 'E': + return lexFloat + } + + lx.backup() + lx.emit(itemInteger) + return lx.pop() +} + +// lexDatetime consumes a Datetime, to a first approximation. +// The parser validates that it matches one of the accepted formats. +func lexDatetime(lx *lexer) stateFn { + r := lx.next() + if isDigit(r) { + return lexDatetime + } + switch r { + case '-', ':', 'T', 't', ' ', '.', 'Z', 'z', '+': + return lexDatetime + } + + lx.backup() + lx.emitTrim(itemDatetime) + return lx.pop() +} + +// lexHexInteger consumes a hexadecimal integer after seeing the '0x' prefix. +func lexHexInteger(lx *lexer) stateFn { + r := lx.next() + if isHex(r) { + return lexHexInteger + } + switch r { + case '_': + return lexHexInteger + } + + lx.backup() + lx.emit(itemInteger) + return lx.pop() +} + +// lexOctalInteger consumes an octal integer after seeing the '0o' prefix. +func lexOctalInteger(lx *lexer) stateFn { + r := lx.next() + if isOctal(r) { + return lexOctalInteger + } + switch r { + case '_': + return lexOctalInteger + } + + lx.backup() + lx.emit(itemInteger) + return lx.pop() +} + +// lexBinaryInteger consumes a binary integer after seeing the '0b' prefix. +func lexBinaryInteger(lx *lexer) stateFn { + r := lx.next() + if isBinary(r) { + return lexBinaryInteger + } + switch r { + case '_': + return lexBinaryInteger + } + + lx.backup() + lx.emit(itemInteger) + return lx.pop() +} + +// lexDecimalNumber consumes a decimal float or integer. +func lexDecimalNumber(lx *lexer) stateFn { + r := lx.next() + if isDigit(r) { + return lexDecimalNumber + } + switch r { + case '.', 'e', 'E': + return lexFloat + case '_': + return lexDecimalNumber + } + + lx.backup() + lx.emit(itemInteger) + return lx.pop() +} + +// lexDecimalNumber consumes the first digit of a number beginning with a sign. +// It assumes the sign has already been consumed. Values which start with a sign +// are only allowed to be decimal integers or floats. +// +// The special "nan" and "inf" values are also recognized. +func lexDecimalNumberStart(lx *lexer) stateFn { + r := lx.next() + + // Special error cases to give users better error messages + switch r { + case 'i': + if !lx.accept('n') || !lx.accept('f') { + return lx.errorf("invalid float: '%s'", lx.current()) + } + lx.emit(itemFloat) + return lx.pop() + case 'n': + if !lx.accept('a') || !lx.accept('n') { + return lx.errorf("invalid float: '%s'", lx.current()) + } + lx.emit(itemFloat) + return lx.pop() + case '0': + p := lx.peek() + switch p { + case 'b', 'o', 'x': + return lx.errorf("cannot use sign with non-decimal numbers: '%s%c'", lx.current(), p) + } + case '.': + return lx.errorf("floats must start with a digit, not '.'") + } + + if isDigit(r) { + return lexDecimalNumber + } + + return lx.errorf("expected a digit but got %q", r) +} + +// lexBaseNumberOrDate differentiates between the possible values which +// start with '0'. It assumes that before reaching this state, the initial '0' +// has been consumed. +func lexBaseNumberOrDate(lx *lexer) stateFn { + r := lx.next() + // Note: All datetimes start with at least two digits, so we don't + // handle date characters (':', '-', etc.) here. + if isDigit(r) { + return lexNumberOrDate + } + switch r { + case '_': + // Can only be decimal, because there can't be an underscore + // between the '0' and the base designator, and dates can't + // contain underscores. + return lexDecimalNumber + case '.', 'e', 'E': + return lexFloat + case 'b': + r = lx.peek() + if !isBinary(r) { + lx.errorf("not a binary number: '%s%c'", lx.current(), r) + } + return lexBinaryInteger + case 'o': + r = lx.peek() + if !isOctal(r) { + lx.errorf("not an octal number: '%s%c'", lx.current(), r) + } + return lexOctalInteger + case 'x': + r = lx.peek() + if !isHex(r) { + lx.errorf("not a hexidecimal number: '%s%c'", lx.current(), r) + } + return lexHexInteger + } + + lx.backup() + lx.emit(itemInteger) + return lx.pop() +} + +// lexFloat consumes the elements of a float. It allows any sequence of +// float-like characters, so floats emitted by the lexer are only a first +// approximation and must be validated by the parser. +func lexFloat(lx *lexer) stateFn { + r := lx.next() + if isDigit(r) { + return lexFloat + } + switch r { + case '_', '.', '-', '+', 'e', 'E': + return lexFloat + } + + lx.backup() + lx.emit(itemFloat) + return lx.pop() +} + +// lexBool consumes a bool string: 'true' or 'false. +func lexBool(lx *lexer) stateFn { + var rs []rune + for { + r := lx.next() + if !unicode.IsLetter(r) { + lx.backup() + break + } + rs = append(rs, r) + } + s := string(rs) + switch s { + case "true", "false": + lx.emit(itemBool) + return lx.pop() + } + return lx.errorf("expected value but found %q instead", s) +} + +// lexCommentStart begins the lexing of a comment. It will emit +// itemCommentStart and consume no characters, passing control to lexComment. +func lexCommentStart(lx *lexer) stateFn { + lx.ignore() + lx.emit(itemCommentStart) + return lexComment +} + +// lexComment lexes an entire comment. It assumes that '#' has been consumed. +// It will consume *up to* the first newline character, and pass control +// back to the last state on the stack. +func lexComment(lx *lexer) stateFn { + switch r := lx.next(); { + case isNL(r) || r == eof: + lx.backup() + lx.emit(itemText) + return lx.pop() + default: + return lexComment + } +} + +// lexSkip ignores all slurped input and moves on to the next state. +func lexSkip(lx *lexer, nextState stateFn) stateFn { + lx.ignore() + return nextState +} + +func (s stateFn) String() string { + name := runtime.FuncForPC(reflect.ValueOf(s).Pointer()).Name() + if i := strings.LastIndexByte(name, '.'); i > -1 { + name = name[i+1:] + } + if s == nil { + name = "" + } + return name + "()" +} + +func (itype itemType) String() string { + switch itype { + case itemError: + return "Error" + case itemNIL: + return "NIL" + case itemEOF: + return "EOF" + case itemText: + return "Text" + case itemString, itemStringEsc, itemRawString, itemMultilineString, itemRawMultilineString: + return "String" + case itemBool: + return "Bool" + case itemInteger: + return "Integer" + case itemFloat: + return "Float" + case itemDatetime: + return "DateTime" + case itemTableStart: + return "TableStart" + case itemTableEnd: + return "TableEnd" + case itemKeyStart: + return "KeyStart" + case itemKeyEnd: + return "KeyEnd" + case itemArray: + return "Array" + case itemArrayEnd: + return "ArrayEnd" + case itemCommentStart: + return "CommentStart" + case itemInlineTableStart: + return "InlineTableStart" + case itemInlineTableEnd: + return "InlineTableEnd" + } + panic(fmt.Sprintf("BUG: Unknown type '%d'.", int(itype))) +} + +func (item item) String() string { + return fmt.Sprintf("(%s, %s)", item.typ, item.val) +} + +func isWhitespace(r rune) bool { return r == '\t' || r == ' ' } +func isNL(r rune) bool { return r == '\n' || r == '\r' } +func isControl(r rune) bool { // Control characters except \t, \r, \n + switch r { + case '\t', '\r', '\n': + return false + default: + return (r >= 0x00 && r <= 0x1f) || r == 0x7f + } +} +func isDigit(r rune) bool { return r >= '0' && r <= '9' } +func isBinary(r rune) bool { return r == '0' || r == '1' } +func isOctal(r rune) bool { return r >= '0' && r <= '7' } +func isHex(r rune) bool { return (r >= '0' && r <= '9') || (r|0x20 >= 'a' && r|0x20 <= 'f') } +func isBareKeyChar(r rune, tomlNext bool) bool { + if tomlNext { + return (r >= 'A' && r <= 'Z') || + (r >= 'a' && r <= 'z') || + (r >= '0' && r <= '9') || + r == '_' || r == '-' || + r == 0xb2 || r == 0xb3 || r == 0xb9 || (r >= 0xbc && r <= 0xbe) || + (r >= 0xc0 && r <= 0xd6) || (r >= 0xd8 && r <= 0xf6) || (r >= 0xf8 && r <= 0x037d) || + (r >= 0x037f && r <= 0x1fff) || + (r >= 0x200c && r <= 0x200d) || (r >= 0x203f && r <= 0x2040) || + (r >= 0x2070 && r <= 0x218f) || (r >= 0x2460 && r <= 0x24ff) || + (r >= 0x2c00 && r <= 0x2fef) || (r >= 0x3001 && r <= 0xd7ff) || + (r >= 0xf900 && r <= 0xfdcf) || (r >= 0xfdf0 && r <= 0xfffd) || + (r >= 0x10000 && r <= 0xeffff) + } + + return (r >= 'A' && r <= 'Z') || + (r >= 'a' && r <= 'z') || + (r >= '0' && r <= '9') || + r == '_' || r == '-' +} diff --git a/cluster-provision/gocli/vendor/github.com/BurntSushi/toml/meta.go b/cluster-provision/gocli/vendor/github.com/BurntSushi/toml/meta.go new file mode 100644 index 0000000000..e614537300 --- /dev/null +++ b/cluster-provision/gocli/vendor/github.com/BurntSushi/toml/meta.go @@ -0,0 +1,148 @@ +package toml + +import ( + "strings" +) + +// MetaData allows access to meta information about TOML data that's not +// accessible otherwise. +// +// It allows checking if a key is defined in the TOML data, whether any keys +// were undecoded, and the TOML type of a key. +type MetaData struct { + context Key // Used only during decoding. + + keyInfo map[string]keyInfo + mapping map[string]any + keys []Key + decoded map[string]struct{} + data []byte // Input file; for errors. +} + +// IsDefined reports if the key exists in the TOML data. +// +// The key should be specified hierarchically, for example to access the TOML +// key "a.b.c" you would use IsDefined("a", "b", "c"). Keys are case sensitive. +// +// Returns false for an empty key. +func (md *MetaData) IsDefined(key ...string) bool { + if len(key) == 0 { + return false + } + + var ( + hash map[string]any + ok bool + hashOrVal any = md.mapping + ) + for _, k := range key { + if hash, ok = hashOrVal.(map[string]any); !ok { + return false + } + if hashOrVal, ok = hash[k]; !ok { + return false + } + } + return true +} + +// Type returns a string representation of the type of the key specified. +// +// Type will return the empty string if given an empty key or a key that does +// not exist. Keys are case sensitive. +func (md *MetaData) Type(key ...string) string { + if ki, ok := md.keyInfo[Key(key).String()]; ok { + return ki.tomlType.typeString() + } + return "" +} + +// Keys returns a slice of every key in the TOML data, including key groups. +// +// Each key is itself a slice, where the first element is the top of the +// hierarchy and the last is the most specific. The list will have the same +// order as the keys appeared in the TOML data. +// +// All keys returned are non-empty. +func (md *MetaData) Keys() []Key { + return md.keys +} + +// Undecoded returns all keys that have not been decoded in the order in which +// they appear in the original TOML document. +// +// This includes keys that haven't been decoded because of a [Primitive] value. +// Once the Primitive value is decoded, the keys will be considered decoded. +// +// Also note that decoding into an empty interface will result in no decoding, +// and so no keys will be considered decoded. +// +// In this sense, the Undecoded keys correspond to keys in the TOML document +// that do not have a concrete type in your representation. +func (md *MetaData) Undecoded() []Key { + undecoded := make([]Key, 0, len(md.keys)) + for _, key := range md.keys { + if _, ok := md.decoded[key.String()]; !ok { + undecoded = append(undecoded, key) + } + } + return undecoded +} + +// Key represents any TOML key, including key groups. Use [MetaData.Keys] to get +// values of this type. +type Key []string + +func (k Key) String() string { + // This is called quite often, so it's a bit funky to make it faster. + var b strings.Builder + b.Grow(len(k) * 25) +outer: + for i, kk := range k { + if i > 0 { + b.WriteByte('.') + } + if kk == "" { + b.WriteString(`""`) + } else { + for _, r := range kk { + // "Inline" isBareKeyChar + if !((r >= 'A' && r <= 'Z') || (r >= 'a' && r <= 'z') || (r >= '0' && r <= '9') || r == '_' || r == '-') { + b.WriteByte('"') + b.WriteString(dblQuotedReplacer.Replace(kk)) + b.WriteByte('"') + continue outer + } + } + b.WriteString(kk) + } + } + return b.String() +} + +func (k Key) maybeQuoted(i int) string { + if k[i] == "" { + return `""` + } + for _, r := range k[i] { + if (r >= 'A' && r <= 'Z') || (r >= 'a' && r <= 'z') || (r >= '0' && r <= '9') || r == '_' || r == '-' { + continue + } + return `"` + dblQuotedReplacer.Replace(k[i]) + `"` + } + return k[i] +} + +// Like append(), but only increase the cap by 1. +func (k Key) add(piece string) Key { + if cap(k) > len(k) { + return append(k, piece) + } + newKey := make(Key, len(k)+1) + copy(newKey, k) + newKey[len(k)] = piece + return newKey +} + +func (k Key) parent() Key { return k[:len(k)-1] } // all except the last piece. +func (k Key) last() string { return k[len(k)-1] } // last piece of this key. diff --git a/cluster-provision/gocli/vendor/github.com/BurntSushi/toml/parse.go b/cluster-provision/gocli/vendor/github.com/BurntSushi/toml/parse.go new file mode 100644 index 0000000000..11ac3108be --- /dev/null +++ b/cluster-provision/gocli/vendor/github.com/BurntSushi/toml/parse.go @@ -0,0 +1,844 @@ +package toml + +import ( + "fmt" + "math" + "os" + "strconv" + "strings" + "time" + "unicode/utf8" + + "github.com/BurntSushi/toml/internal" +) + +type parser struct { + lx *lexer + context Key // Full key for the current hash in scope. + currentKey string // Base key name for everything except hashes. + pos Position // Current position in the TOML file. + tomlNext bool + + ordered []Key // List of keys in the order that they appear in the TOML data. + + keyInfo map[string]keyInfo // Map keyname → info about the TOML key. + mapping map[string]any // Map keyname → key value. + implicits map[string]struct{} // Record implicit keys (e.g. "key.group.names"). +} + +type keyInfo struct { + pos Position + tomlType tomlType +} + +func parse(data string) (p *parser, err error) { + _, tomlNext := os.LookupEnv("BURNTSUSHI_TOML_110") + + defer func() { + if r := recover(); r != nil { + if pErr, ok := r.(ParseError); ok { + pErr.input = data + err = pErr + return + } + panic(r) + } + }() + + // Read over BOM; do this here as the lexer calls utf8.DecodeRuneInString() + // which mangles stuff. UTF-16 BOM isn't strictly valid, but some tools add + // it anyway. + if strings.HasPrefix(data, "\xff\xfe") || strings.HasPrefix(data, "\xfe\xff") { // UTF-16 + data = data[2:] + //lint:ignore S1017 https://github.com/dominikh/go-tools/issues/1447 + } else if strings.HasPrefix(data, "\xef\xbb\xbf") { // UTF-8 + data = data[3:] + } + + // Examine first few bytes for NULL bytes; this probably means it's a UTF-16 + // file (second byte in surrogate pair being NULL). Again, do this here to + // avoid having to deal with UTF-8/16 stuff in the lexer. + ex := 6 + if len(data) < 6 { + ex = len(data) + } + if i := strings.IndexRune(data[:ex], 0); i > -1 { + return nil, ParseError{ + Message: "files cannot contain NULL bytes; probably using UTF-16; TOML files must be UTF-8", + Position: Position{Line: 1, Start: i, Len: 1}, + Line: 1, + input: data, + } + } + + p = &parser{ + keyInfo: make(map[string]keyInfo), + mapping: make(map[string]any), + lx: lex(data, tomlNext), + ordered: make([]Key, 0), + implicits: make(map[string]struct{}), + tomlNext: tomlNext, + } + for { + item := p.next() + if item.typ == itemEOF { + break + } + p.topLevel(item) + } + + return p, nil +} + +func (p *parser) panicErr(it item, err error) { + panic(ParseError{ + err: err, + Position: it.pos, + Line: it.pos.Len, + LastKey: p.current(), + }) +} + +func (p *parser) panicItemf(it item, format string, v ...any) { + panic(ParseError{ + Message: fmt.Sprintf(format, v...), + Position: it.pos, + Line: it.pos.Len, + LastKey: p.current(), + }) +} + +func (p *parser) panicf(format string, v ...any) { + panic(ParseError{ + Message: fmt.Sprintf(format, v...), + Position: p.pos, + Line: p.pos.Line, + LastKey: p.current(), + }) +} + +func (p *parser) next() item { + it := p.lx.nextItem() + //fmt.Printf("ITEM %-18s line %-3d │ %q\n", it.typ, it.pos.Line, it.val) + if it.typ == itemError { + if it.err != nil { + panic(ParseError{ + Position: it.pos, + Line: it.pos.Line, + LastKey: p.current(), + err: it.err, + }) + } + + p.panicItemf(it, "%s", it.val) + } + return it +} + +func (p *parser) nextPos() item { + it := p.next() + p.pos = it.pos + return it +} + +func (p *parser) bug(format string, v ...any) { + panic(fmt.Sprintf("BUG: "+format+"\n\n", v...)) +} + +func (p *parser) expect(typ itemType) item { + it := p.next() + p.assertEqual(typ, it.typ) + return it +} + +func (p *parser) assertEqual(expected, got itemType) { + if expected != got { + p.bug("Expected '%s' but got '%s'.", expected, got) + } +} + +func (p *parser) topLevel(item item) { + switch item.typ { + case itemCommentStart: // # .. + p.expect(itemText) + case itemTableStart: // [ .. ] + name := p.nextPos() + + var key Key + for ; name.typ != itemTableEnd && name.typ != itemEOF; name = p.next() { + key = append(key, p.keyString(name)) + } + p.assertEqual(itemTableEnd, name.typ) + + p.addContext(key, false) + p.setType("", tomlHash, item.pos) + p.ordered = append(p.ordered, key) + case itemArrayTableStart: // [[ .. ]] + name := p.nextPos() + + var key Key + for ; name.typ != itemArrayTableEnd && name.typ != itemEOF; name = p.next() { + key = append(key, p.keyString(name)) + } + p.assertEqual(itemArrayTableEnd, name.typ) + + p.addContext(key, true) + p.setType("", tomlArrayHash, item.pos) + p.ordered = append(p.ordered, key) + case itemKeyStart: // key = .. + outerContext := p.context + /// Read all the key parts (e.g. 'a' and 'b' in 'a.b') + k := p.nextPos() + var key Key + for ; k.typ != itemKeyEnd && k.typ != itemEOF; k = p.next() { + key = append(key, p.keyString(k)) + } + p.assertEqual(itemKeyEnd, k.typ) + + /// The current key is the last part. + p.currentKey = key.last() + + /// All the other parts (if any) are the context; need to set each part + /// as implicit. + context := key.parent() + for i := range context { + p.addImplicitContext(append(p.context, context[i:i+1]...)) + } + p.ordered = append(p.ordered, p.context.add(p.currentKey)) + + /// Set value. + vItem := p.next() + val, typ := p.value(vItem, false) + p.setValue(p.currentKey, val) + p.setType(p.currentKey, typ, vItem.pos) + + /// Remove the context we added (preserving any context from [tbl] lines). + p.context = outerContext + p.currentKey = "" + default: + p.bug("Unexpected type at top level: %s", item.typ) + } +} + +// Gets a string for a key (or part of a key in a table name). +func (p *parser) keyString(it item) string { + switch it.typ { + case itemText: + return it.val + case itemString, itemStringEsc, itemMultilineString, + itemRawString, itemRawMultilineString: + s, _ := p.value(it, false) + return s.(string) + default: + p.bug("Unexpected key type: %s", it.typ) + } + panic("unreachable") +} + +var datetimeRepl = strings.NewReplacer( + "z", "Z", + "t", "T", + " ", "T") + +// value translates an expected value from the lexer into a Go value wrapped +// as an empty interface. +func (p *parser) value(it item, parentIsArray bool) (any, tomlType) { + switch it.typ { + case itemString: + return it.val, p.typeOfPrimitive(it) + case itemStringEsc: + return p.replaceEscapes(it, it.val), p.typeOfPrimitive(it) + case itemMultilineString: + return p.replaceEscapes(it, p.stripEscapedNewlines(stripFirstNewline(it.val))), p.typeOfPrimitive(it) + case itemRawString: + return it.val, p.typeOfPrimitive(it) + case itemRawMultilineString: + return stripFirstNewline(it.val), p.typeOfPrimitive(it) + case itemInteger: + return p.valueInteger(it) + case itemFloat: + return p.valueFloat(it) + case itemBool: + switch it.val { + case "true": + return true, p.typeOfPrimitive(it) + case "false": + return false, p.typeOfPrimitive(it) + default: + p.bug("Expected boolean value, but got '%s'.", it.val) + } + case itemDatetime: + return p.valueDatetime(it) + case itemArray: + return p.valueArray(it) + case itemInlineTableStart: + return p.valueInlineTable(it, parentIsArray) + default: + p.bug("Unexpected value type: %s", it.typ) + } + panic("unreachable") +} + +func (p *parser) valueInteger(it item) (any, tomlType) { + if !numUnderscoresOK(it.val) { + p.panicItemf(it, "Invalid integer %q: underscores must be surrounded by digits", it.val) + } + if numHasLeadingZero(it.val) { + p.panicItemf(it, "Invalid integer %q: cannot have leading zeroes", it.val) + } + + num, err := strconv.ParseInt(it.val, 0, 64) + if err != nil { + // Distinguish integer values. Normally, it'd be a bug if the lexer + // provides an invalid integer, but it's possible that the number is + // out of range of valid values (which the lexer cannot determine). + // So mark the former as a bug but the latter as a legitimate user + // error. + if e, ok := err.(*strconv.NumError); ok && e.Err == strconv.ErrRange { + p.panicErr(it, errParseRange{i: it.val, size: "int64"}) + } else { + p.bug("Expected integer value, but got '%s'.", it.val) + } + } + return num, p.typeOfPrimitive(it) +} + +func (p *parser) valueFloat(it item) (any, tomlType) { + parts := strings.FieldsFunc(it.val, func(r rune) bool { + switch r { + case '.', 'e', 'E': + return true + } + return false + }) + for _, part := range parts { + if !numUnderscoresOK(part) { + p.panicItemf(it, "Invalid float %q: underscores must be surrounded by digits", it.val) + } + } + if len(parts) > 0 && numHasLeadingZero(parts[0]) { + p.panicItemf(it, "Invalid float %q: cannot have leading zeroes", it.val) + } + if !numPeriodsOK(it.val) { + // As a special case, numbers like '123.' or '1.e2', + // which are valid as far as Go/strconv are concerned, + // must be rejected because TOML says that a fractional + // part consists of '.' followed by 1+ digits. + p.panicItemf(it, "Invalid float %q: '.' must be followed by one or more digits", it.val) + } + val := strings.Replace(it.val, "_", "", -1) + signbit := false + if val == "+nan" || val == "-nan" { + signbit = val == "-nan" + val = "nan" + } + num, err := strconv.ParseFloat(val, 64) + if err != nil { + if e, ok := err.(*strconv.NumError); ok && e.Err == strconv.ErrRange { + p.panicErr(it, errParseRange{i: it.val, size: "float64"}) + } else { + p.panicItemf(it, "Invalid float value: %q", it.val) + } + } + if signbit { + num = math.Copysign(num, -1) + } + return num, p.typeOfPrimitive(it) +} + +var dtTypes = []struct { + fmt string + zone *time.Location + next bool +}{ + {time.RFC3339Nano, time.Local, false}, + {"2006-01-02T15:04:05.999999999", internal.LocalDatetime, false}, + {"2006-01-02", internal.LocalDate, false}, + {"15:04:05.999999999", internal.LocalTime, false}, + + // tomlNext + {"2006-01-02T15:04Z07:00", time.Local, true}, + {"2006-01-02T15:04", internal.LocalDatetime, true}, + {"15:04", internal.LocalTime, true}, +} + +func (p *parser) valueDatetime(it item) (any, tomlType) { + it.val = datetimeRepl.Replace(it.val) + var ( + t time.Time + ok bool + err error + ) + for _, dt := range dtTypes { + if dt.next && !p.tomlNext { + continue + } + t, err = time.ParseInLocation(dt.fmt, it.val, dt.zone) + if err == nil { + if missingLeadingZero(it.val, dt.fmt) { + p.panicErr(it, errParseDate{it.val}) + } + ok = true + break + } + } + if !ok { + p.panicErr(it, errParseDate{it.val}) + } + return t, p.typeOfPrimitive(it) +} + +// Go's time.Parse() will accept numbers without a leading zero; there isn't any +// way to require it. https://github.com/golang/go/issues/29911 +// +// Depend on the fact that the separators (- and :) should always be at the same +// location. +func missingLeadingZero(d, l string) bool { + for i, c := range []byte(l) { + if c == '.' || c == 'Z' { + return false + } + if (c < '0' || c > '9') && d[i] != c { + return true + } + } + return false +} + +func (p *parser) valueArray(it item) (any, tomlType) { + p.setType(p.currentKey, tomlArray, it.pos) + + var ( + // Initialize to a non-nil slice to make it consistent with how S = [] + // decodes into a non-nil slice inside something like struct { S + // []string }. See #338 + array = make([]any, 0, 2) + ) + for it = p.next(); it.typ != itemArrayEnd; it = p.next() { + if it.typ == itemCommentStart { + p.expect(itemText) + continue + } + + val, typ := p.value(it, true) + array = append(array, val) + + // XXX: type isn't used here, we need it to record the accurate type + // information. + // + // Not entirely sure how to best store this; could use "key[0]", + // "key[1]" notation, or maybe store it on the Array type? + _ = typ + } + return array, tomlArray +} + +func (p *parser) valueInlineTable(it item, parentIsArray bool) (any, tomlType) { + var ( + topHash = make(map[string]any) + outerContext = p.context + outerKey = p.currentKey + ) + + p.context = append(p.context, p.currentKey) + prevContext := p.context + p.currentKey = "" + + p.addImplicit(p.context) + p.addContext(p.context, parentIsArray) + + /// Loop over all table key/value pairs. + for it := p.next(); it.typ != itemInlineTableEnd; it = p.next() { + if it.typ == itemCommentStart { + p.expect(itemText) + continue + } + + /// Read all key parts. + k := p.nextPos() + var key Key + for ; k.typ != itemKeyEnd && k.typ != itemEOF; k = p.next() { + key = append(key, p.keyString(k)) + } + p.assertEqual(itemKeyEnd, k.typ) + + /// The current key is the last part. + p.currentKey = key.last() + + /// All the other parts (if any) are the context; need to set each part + /// as implicit. + context := key.parent() + for i := range context { + p.addImplicitContext(append(p.context, context[i:i+1]...)) + } + p.ordered = append(p.ordered, p.context.add(p.currentKey)) + + /// Set the value. + val, typ := p.value(p.next(), false) + p.setValue(p.currentKey, val) + p.setType(p.currentKey, typ, it.pos) + + hash := topHash + for _, c := range context { + h, ok := hash[c] + if !ok { + h = make(map[string]any) + hash[c] = h + } + hash, ok = h.(map[string]any) + if !ok { + p.panicf("%q is not a table", p.context) + } + } + hash[p.currentKey] = val + + /// Restore context. + p.context = prevContext + } + p.context = outerContext + p.currentKey = outerKey + return topHash, tomlHash +} + +// numHasLeadingZero checks if this number has leading zeroes, allowing for '0', +// +/- signs, and base prefixes. +func numHasLeadingZero(s string) bool { + if len(s) > 1 && s[0] == '0' && !(s[1] == 'b' || s[1] == 'o' || s[1] == 'x') { // Allow 0b, 0o, 0x + return true + } + if len(s) > 2 && (s[0] == '-' || s[0] == '+') && s[1] == '0' { + return true + } + return false +} + +// numUnderscoresOK checks whether each underscore in s is surrounded by +// characters that are not underscores. +func numUnderscoresOK(s string) bool { + switch s { + case "nan", "+nan", "-nan", "inf", "-inf", "+inf": + return true + } + accept := false + for _, r := range s { + if r == '_' { + if !accept { + return false + } + } + + // isHexis a superset of all the permissable characters surrounding an + // underscore. + accept = isHex(r) + } + return accept +} + +// numPeriodsOK checks whether every period in s is followed by a digit. +func numPeriodsOK(s string) bool { + period := false + for _, r := range s { + if period && !isDigit(r) { + return false + } + period = r == '.' + } + return !period +} + +// Set the current context of the parser, where the context is either a hash or +// an array of hashes, depending on the value of the `array` parameter. +// +// Establishing the context also makes sure that the key isn't a duplicate, and +// will create implicit hashes automatically. +func (p *parser) addContext(key Key, array bool) { + /// Always start at the top level and drill down for our context. + hashContext := p.mapping + keyContext := make(Key, 0, len(key)-1) + + /// We only need implicit hashes for the parents. + for _, k := range key.parent() { + _, ok := hashContext[k] + keyContext = append(keyContext, k) + + // No key? Make an implicit hash and move on. + if !ok { + p.addImplicit(keyContext) + hashContext[k] = make(map[string]any) + } + + // If the hash context is actually an array of tables, then set + // the hash context to the last element in that array. + // + // Otherwise, it better be a table, since this MUST be a key group (by + // virtue of it not being the last element in a key). + switch t := hashContext[k].(type) { + case []map[string]any: + hashContext = t[len(t)-1] + case map[string]any: + hashContext = t + default: + p.panicf("Key '%s' was already created as a hash.", keyContext) + } + } + + p.context = keyContext + if array { + // If this is the first element for this array, then allocate a new + // list of tables for it. + k := key.last() + if _, ok := hashContext[k]; !ok { + hashContext[k] = make([]map[string]any, 0, 4) + } + + // Add a new table. But make sure the key hasn't already been used + // for something else. + if hash, ok := hashContext[k].([]map[string]any); ok { + hashContext[k] = append(hash, make(map[string]any)) + } else { + p.panicf("Key '%s' was already created and cannot be used as an array.", key) + } + } else { + p.setValue(key.last(), make(map[string]any)) + } + p.context = append(p.context, key.last()) +} + +// setValue sets the given key to the given value in the current context. +// It will make sure that the key hasn't already been defined, account for +// implicit key groups. +func (p *parser) setValue(key string, value any) { + var ( + tmpHash any + ok bool + hash = p.mapping + keyContext = make(Key, 0, len(p.context)+1) + ) + for _, k := range p.context { + keyContext = append(keyContext, k) + if tmpHash, ok = hash[k]; !ok { + p.bug("Context for key '%s' has not been established.", keyContext) + } + switch t := tmpHash.(type) { + case []map[string]any: + // The context is a table of hashes. Pick the most recent table + // defined as the current hash. + hash = t[len(t)-1] + case map[string]any: + hash = t + default: + p.panicf("Key '%s' has already been defined.", keyContext) + } + } + keyContext = append(keyContext, key) + + if _, ok := hash[key]; ok { + // Normally redefining keys isn't allowed, but the key could have been + // defined implicitly and it's allowed to be redefined concretely. (See + // the `valid/implicit-and-explicit-after.toml` in toml-test) + // + // But we have to make sure to stop marking it as an implicit. (So that + // another redefinition provokes an error.) + // + // Note that since it has already been defined (as a hash), we don't + // want to overwrite it. So our business is done. + if p.isArray(keyContext) { + p.removeImplicit(keyContext) + hash[key] = value + return + } + if p.isImplicit(keyContext) { + p.removeImplicit(keyContext) + return + } + // Otherwise, we have a concrete key trying to override a previous key, + // which is *always* wrong. + p.panicf("Key '%s' has already been defined.", keyContext) + } + + hash[key] = value +} + +// setType sets the type of a particular value at a given key. It should be +// called immediately AFTER setValue. +// +// Note that if `key` is empty, then the type given will be applied to the +// current context (which is either a table or an array of tables). +func (p *parser) setType(key string, typ tomlType, pos Position) { + keyContext := make(Key, 0, len(p.context)+1) + keyContext = append(keyContext, p.context...) + if len(key) > 0 { // allow type setting for hashes + keyContext = append(keyContext, key) + } + // Special case to make empty keys ("" = 1) work. + // Without it it will set "" rather than `""`. + // TODO: why is this needed? And why is this only needed here? + if len(keyContext) == 0 { + keyContext = Key{""} + } + p.keyInfo[keyContext.String()] = keyInfo{tomlType: typ, pos: pos} +} + +// Implicit keys need to be created when tables are implied in "a.b.c.d = 1" and +// "[a.b.c]" (the "a", "b", and "c" hashes are never created explicitly). +func (p *parser) addImplicit(key Key) { p.implicits[key.String()] = struct{}{} } +func (p *parser) removeImplicit(key Key) { delete(p.implicits, key.String()) } +func (p *parser) isImplicit(key Key) bool { _, ok := p.implicits[key.String()]; return ok } +func (p *parser) isArray(key Key) bool { return p.keyInfo[key.String()].tomlType == tomlArray } +func (p *parser) addImplicitContext(key Key) { p.addImplicit(key); p.addContext(key, false) } + +// current returns the full key name of the current context. +func (p *parser) current() string { + if len(p.currentKey) == 0 { + return p.context.String() + } + if len(p.context) == 0 { + return p.currentKey + } + return fmt.Sprintf("%s.%s", p.context, p.currentKey) +} + +func stripFirstNewline(s string) string { + if len(s) > 0 && s[0] == '\n' { + return s[1:] + } + if len(s) > 1 && s[0] == '\r' && s[1] == '\n' { + return s[2:] + } + return s +} + +// stripEscapedNewlines removes whitespace after line-ending backslashes in +// multiline strings. +// +// A line-ending backslash is an unescaped \ followed only by whitespace until +// the next newline. After a line-ending backslash, all whitespace is removed +// until the next non-whitespace character. +func (p *parser) stripEscapedNewlines(s string) string { + var ( + b strings.Builder + i int + ) + b.Grow(len(s)) + for { + ix := strings.Index(s[i:], `\`) + if ix < 0 { + b.WriteString(s) + return b.String() + } + i += ix + + if len(s) > i+1 && s[i+1] == '\\' { + // Escaped backslash. + i += 2 + continue + } + // Scan until the next non-whitespace. + j := i + 1 + whitespaceLoop: + for ; j < len(s); j++ { + switch s[j] { + case ' ', '\t', '\r', '\n': + default: + break whitespaceLoop + } + } + if j == i+1 { + // Not a whitespace escape. + i++ + continue + } + if !strings.Contains(s[i:j], "\n") { + // This is not a line-ending backslash. (It's a bad escape sequence, + // but we can let replaceEscapes catch it.) + i++ + continue + } + b.WriteString(s[:i]) + s = s[j:] + i = 0 + } +} + +func (p *parser) replaceEscapes(it item, str string) string { + var ( + b strings.Builder + skip = 0 + ) + b.Grow(len(str)) + for i, c := range str { + if skip > 0 { + skip-- + continue + } + if c != '\\' { + b.WriteRune(c) + continue + } + + if i >= len(str) { + p.bug("Escape sequence at end of string.") + return "" + } + switch str[i+1] { + default: + p.bug("Expected valid escape code after \\, but got %q.", str[i+1]) + case ' ', '\t': + p.panicItemf(it, "invalid escape: '\\%c'", str[i+1]) + case 'b': + b.WriteByte(0x08) + skip = 1 + case 't': + b.WriteByte(0x09) + skip = 1 + case 'n': + b.WriteByte(0x0a) + skip = 1 + case 'f': + b.WriteByte(0x0c) + skip = 1 + case 'r': + b.WriteByte(0x0d) + skip = 1 + case 'e': + if p.tomlNext { + b.WriteByte(0x1b) + skip = 1 + } + case '"': + b.WriteByte(0x22) + skip = 1 + case '\\': + b.WriteByte(0x5c) + skip = 1 + // The lexer guarantees the correct number of characters are present; + // don't need to check here. + case 'x': + if p.tomlNext { + escaped := p.asciiEscapeToUnicode(it, str[i+2:i+4]) + b.WriteRune(escaped) + skip = 3 + } + case 'u': + escaped := p.asciiEscapeToUnicode(it, str[i+2:i+6]) + b.WriteRune(escaped) + skip = 5 + case 'U': + escaped := p.asciiEscapeToUnicode(it, str[i+2:i+10]) + b.WriteRune(escaped) + skip = 9 + } + } + return b.String() +} + +func (p *parser) asciiEscapeToUnicode(it item, s string) rune { + hex, err := strconv.ParseUint(strings.ToLower(s), 16, 32) + if err != nil { + p.bug("Could not parse '%s' as a hexadecimal number, but the lexer claims it's OK: %s", s, err) + } + if !utf8.ValidRune(rune(hex)) { + p.panicItemf(it, "Escaped character '\\u%s' is not valid UTF-8.", s) + } + return rune(hex) +} diff --git a/cluster-provision/gocli/vendor/github.com/BurntSushi/toml/type_fields.go b/cluster-provision/gocli/vendor/github.com/BurntSushi/toml/type_fields.go new file mode 100644 index 0000000000..10c51f7eeb --- /dev/null +++ b/cluster-provision/gocli/vendor/github.com/BurntSushi/toml/type_fields.go @@ -0,0 +1,238 @@ +package toml + +// Struct field handling is adapted from code in encoding/json: +// +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the Go distribution. + +import ( + "reflect" + "sort" + "sync" +) + +// A field represents a single field found in a struct. +type field struct { + name string // the name of the field (`toml` tag included) + tag bool // whether field has a `toml` tag + index []int // represents the depth of an anonymous field + typ reflect.Type // the type of the field +} + +// byName sorts field by name, breaking ties with depth, +// then breaking ties with "name came from toml tag", then +// breaking ties with index sequence. +type byName []field + +func (x byName) Len() int { return len(x) } +func (x byName) Swap(i, j int) { x[i], x[j] = x[j], x[i] } +func (x byName) Less(i, j int) bool { + if x[i].name != x[j].name { + return x[i].name < x[j].name + } + if len(x[i].index) != len(x[j].index) { + return len(x[i].index) < len(x[j].index) + } + if x[i].tag != x[j].tag { + return x[i].tag + } + return byIndex(x).Less(i, j) +} + +// byIndex sorts field by index sequence. +type byIndex []field + +func (x byIndex) Len() int { return len(x) } +func (x byIndex) Swap(i, j int) { x[i], x[j] = x[j], x[i] } +func (x byIndex) Less(i, j int) bool { + for k, xik := range x[i].index { + if k >= len(x[j].index) { + return false + } + if xik != x[j].index[k] { + return xik < x[j].index[k] + } + } + return len(x[i].index) < len(x[j].index) +} + +// typeFields returns a list of fields that TOML should recognize for the given +// type. The algorithm is breadth-first search over the set of structs to +// include - the top struct and then any reachable anonymous structs. +func typeFields(t reflect.Type) []field { + // Anonymous fields to explore at the current level and the next. + current := []field{} + next := []field{{typ: t}} + + // Count of queued names for current level and the next. + var count map[reflect.Type]int + var nextCount map[reflect.Type]int + + // Types already visited at an earlier level. + visited := map[reflect.Type]bool{} + + // Fields found. + var fields []field + + for len(next) > 0 { + current, next = next, current[:0] + count, nextCount = nextCount, map[reflect.Type]int{} + + for _, f := range current { + if visited[f.typ] { + continue + } + visited[f.typ] = true + + // Scan f.typ for fields to include. + for i := 0; i < f.typ.NumField(); i++ { + sf := f.typ.Field(i) + if sf.PkgPath != "" && !sf.Anonymous { // unexported + continue + } + opts := getOptions(sf.Tag) + if opts.skip { + continue + } + index := make([]int, len(f.index)+1) + copy(index, f.index) + index[len(f.index)] = i + + ft := sf.Type + if ft.Name() == "" && ft.Kind() == reflect.Ptr { + // Follow pointer. + ft = ft.Elem() + } + + // Record found field and index sequence. + if opts.name != "" || !sf.Anonymous || ft.Kind() != reflect.Struct { + tagged := opts.name != "" + name := opts.name + if name == "" { + name = sf.Name + } + fields = append(fields, field{name, tagged, index, ft}) + if count[f.typ] > 1 { + // If there were multiple instances, add a second, + // so that the annihilation code will see a duplicate. + // It only cares about the distinction between 1 or 2, + // so don't bother generating any more copies. + fields = append(fields, fields[len(fields)-1]) + } + continue + } + + // Record new anonymous struct to explore in next round. + nextCount[ft]++ + if nextCount[ft] == 1 { + f := field{name: ft.Name(), index: index, typ: ft} + next = append(next, f) + } + } + } + } + + sort.Sort(byName(fields)) + + // Delete all fields that are hidden by the Go rules for embedded fields, + // except that fields with TOML tags are promoted. + + // The fields are sorted in primary order of name, secondary order + // of field index length. Loop over names; for each name, delete + // hidden fields by choosing the one dominant field that survives. + out := fields[:0] + for advance, i := 0, 0; i < len(fields); i += advance { + // One iteration per name. + // Find the sequence of fields with the name of this first field. + fi := fields[i] + name := fi.name + for advance = 1; i+advance < len(fields); advance++ { + fj := fields[i+advance] + if fj.name != name { + break + } + } + if advance == 1 { // Only one field with this name + out = append(out, fi) + continue + } + dominant, ok := dominantField(fields[i : i+advance]) + if ok { + out = append(out, dominant) + } + } + + fields = out + sort.Sort(byIndex(fields)) + + return fields +} + +// dominantField looks through the fields, all of which are known to +// have the same name, to find the single field that dominates the +// others using Go's embedding rules, modified by the presence of +// TOML tags. If there are multiple top-level fields, the boolean +// will be false: This condition is an error in Go and we skip all +// the fields. +func dominantField(fields []field) (field, bool) { + // The fields are sorted in increasing index-length order. The winner + // must therefore be one with the shortest index length. Drop all + // longer entries, which is easy: just truncate the slice. + length := len(fields[0].index) + tagged := -1 // Index of first tagged field. + for i, f := range fields { + if len(f.index) > length { + fields = fields[:i] + break + } + if f.tag { + if tagged >= 0 { + // Multiple tagged fields at the same level: conflict. + // Return no field. + return field{}, false + } + tagged = i + } + } + if tagged >= 0 { + return fields[tagged], true + } + // All remaining fields have the same length. If there's more than one, + // we have a conflict (two fields named "X" at the same level) and we + // return no field. + if len(fields) > 1 { + return field{}, false + } + return fields[0], true +} + +var fieldCache struct { + sync.RWMutex + m map[reflect.Type][]field +} + +// cachedTypeFields is like typeFields but uses a cache to avoid repeated work. +func cachedTypeFields(t reflect.Type) []field { + fieldCache.RLock() + f := fieldCache.m[t] + fieldCache.RUnlock() + if f != nil { + return f + } + + // Compute fields without lock. + // Might duplicate effort but won't hold other computations back. + f = typeFields(t) + if f == nil { + f = []field{} + } + + fieldCache.Lock() + if fieldCache.m == nil { + fieldCache.m = map[reflect.Type][]field{} + } + fieldCache.m[t] = f + fieldCache.Unlock() + return f +} diff --git a/cluster-provision/gocli/vendor/github.com/BurntSushi/toml/type_toml.go b/cluster-provision/gocli/vendor/github.com/BurntSushi/toml/type_toml.go new file mode 100644 index 0000000000..1c090d331e --- /dev/null +++ b/cluster-provision/gocli/vendor/github.com/BurntSushi/toml/type_toml.go @@ -0,0 +1,65 @@ +package toml + +// tomlType represents any Go type that corresponds to a TOML type. +// While the first draft of the TOML spec has a simplistic type system that +// probably doesn't need this level of sophistication, we seem to be militating +// toward adding real composite types. +type tomlType interface { + typeString() string +} + +// typeEqual accepts any two types and returns true if they are equal. +func typeEqual(t1, t2 tomlType) bool { + if t1 == nil || t2 == nil { + return false + } + return t1.typeString() == t2.typeString() +} + +func typeIsTable(t tomlType) bool { + return typeEqual(t, tomlHash) || typeEqual(t, tomlArrayHash) +} + +type tomlBaseType string + +func (btype tomlBaseType) typeString() string { return string(btype) } +func (btype tomlBaseType) String() string { return btype.typeString() } + +var ( + tomlInteger tomlBaseType = "Integer" + tomlFloat tomlBaseType = "Float" + tomlDatetime tomlBaseType = "Datetime" + tomlString tomlBaseType = "String" + tomlBool tomlBaseType = "Bool" + tomlArray tomlBaseType = "Array" + tomlHash tomlBaseType = "Hash" + tomlArrayHash tomlBaseType = "ArrayHash" +) + +// typeOfPrimitive returns a tomlType of any primitive value in TOML. +// Primitive values are: Integer, Float, Datetime, String and Bool. +// +// Passing a lexer item other than the following will cause a BUG message +// to occur: itemString, itemBool, itemInteger, itemFloat, itemDatetime. +func (p *parser) typeOfPrimitive(lexItem item) tomlType { + switch lexItem.typ { + case itemInteger: + return tomlInteger + case itemFloat: + return tomlFloat + case itemDatetime: + return tomlDatetime + case itemString, itemStringEsc: + return tomlString + case itemMultilineString: + return tomlString + case itemRawString: + return tomlString + case itemRawMultilineString: + return tomlString + case itemBool: + return tomlBool + } + p.bug("Cannot infer primitive type of lex item '%s'.", lexItem) + panic("unreachable") +} diff --git a/cluster-provision/gocli/vendor/github.com/evanphx/json-patch/v5/LICENSE b/cluster-provision/gocli/vendor/github.com/evanphx/json-patch/v5/LICENSE new file mode 100644 index 0000000000..df76d7d771 --- /dev/null +++ b/cluster-provision/gocli/vendor/github.com/evanphx/json-patch/v5/LICENSE @@ -0,0 +1,25 @@ +Copyright (c) 2014, Evan Phoenix +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +* Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. +* Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. +* Neither the name of the Evan Phoenix nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/cluster-provision/gocli/vendor/github.com/evanphx/json-patch/v5/errors.go b/cluster-provision/gocli/vendor/github.com/evanphx/json-patch/v5/errors.go new file mode 100644 index 0000000000..75304b4437 --- /dev/null +++ b/cluster-provision/gocli/vendor/github.com/evanphx/json-patch/v5/errors.go @@ -0,0 +1,38 @@ +package jsonpatch + +import "fmt" + +// AccumulatedCopySizeError is an error type returned when the accumulated size +// increase caused by copy operations in a patch operation has exceeded the +// limit. +type AccumulatedCopySizeError struct { + limit int64 + accumulated int64 +} + +// NewAccumulatedCopySizeError returns an AccumulatedCopySizeError. +func NewAccumulatedCopySizeError(l, a int64) *AccumulatedCopySizeError { + return &AccumulatedCopySizeError{limit: l, accumulated: a} +} + +// Error implements the error interface. +func (a *AccumulatedCopySizeError) Error() string { + return fmt.Sprintf("Unable to complete the copy, the accumulated size increase of copy is %d, exceeding the limit %d", a.accumulated, a.limit) +} + +// ArraySizeError is an error type returned when the array size has exceeded +// the limit. +type ArraySizeError struct { + limit int + size int +} + +// NewArraySizeError returns an ArraySizeError. +func NewArraySizeError(l, s int) *ArraySizeError { + return &ArraySizeError{limit: l, size: s} +} + +// Error implements the error interface. +func (a *ArraySizeError) Error() string { + return fmt.Sprintf("Unable to create array of size %d, limit is %d", a.size, a.limit) +} diff --git a/cluster-provision/gocli/vendor/github.com/evanphx/json-patch/v5/internal/json/decode.go b/cluster-provision/gocli/vendor/github.com/evanphx/json-patch/v5/internal/json/decode.go new file mode 100644 index 0000000000..e9bb0efe77 --- /dev/null +++ b/cluster-provision/gocli/vendor/github.com/evanphx/json-patch/v5/internal/json/decode.go @@ -0,0 +1,1385 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Represents JSON data structure using native Go types: booleans, floats, +// strings, arrays, and maps. + +package json + +import ( + "encoding" + "encoding/base64" + "fmt" + "reflect" + "strconv" + "strings" + "sync" + "unicode" + "unicode/utf16" + "unicode/utf8" +) + +// Unmarshal parses the JSON-encoded data and stores the result +// in the value pointed to by v. If v is nil or not a pointer, +// Unmarshal returns an InvalidUnmarshalError. +// +// Unmarshal uses the inverse of the encodings that +// Marshal uses, allocating maps, slices, and pointers as necessary, +// with the following additional rules: +// +// To unmarshal JSON into a pointer, Unmarshal first handles the case of +// the JSON being the JSON literal null. In that case, Unmarshal sets +// the pointer to nil. Otherwise, Unmarshal unmarshals the JSON into +// the value pointed at by the pointer. If the pointer is nil, Unmarshal +// allocates a new value for it to point to. +// +// To unmarshal JSON into a value implementing the Unmarshaler interface, +// Unmarshal calls that value's UnmarshalJSON method, including +// when the input is a JSON null. +// Otherwise, if the value implements encoding.TextUnmarshaler +// and the input is a JSON quoted string, Unmarshal calls that value's +// UnmarshalText method with the unquoted form of the string. +// +// To unmarshal JSON into a struct, Unmarshal matches incoming object +// keys to the keys used by Marshal (either the struct field name or its tag), +// preferring an exact match but also accepting a case-insensitive match. By +// default, object keys which don't have a corresponding struct field are +// ignored (see Decoder.DisallowUnknownFields for an alternative). +// +// To unmarshal JSON into an interface value, +// Unmarshal stores one of these in the interface value: +// +// bool, for JSON booleans +// float64, for JSON numbers +// string, for JSON strings +// []interface{}, for JSON arrays +// map[string]interface{}, for JSON objects +// nil for JSON null +// +// To unmarshal a JSON array into a slice, Unmarshal resets the slice length +// to zero and then appends each element to the slice. +// As a special case, to unmarshal an empty JSON array into a slice, +// Unmarshal replaces the slice with a new empty slice. +// +// To unmarshal a JSON array into a Go array, Unmarshal decodes +// JSON array elements into corresponding Go array elements. +// If the Go array is smaller than the JSON array, +// the additional JSON array elements are discarded. +// If the JSON array is smaller than the Go array, +// the additional Go array elements are set to zero values. +// +// To unmarshal a JSON object into a map, Unmarshal first establishes a map to +// use. If the map is nil, Unmarshal allocates a new map. Otherwise Unmarshal +// reuses the existing map, keeping existing entries. Unmarshal then stores +// key-value pairs from the JSON object into the map. The map's key type must +// either be any string type, an integer, implement json.Unmarshaler, or +// implement encoding.TextUnmarshaler. +// +// If the JSON-encoded data contain a syntax error, Unmarshal returns a SyntaxError. +// +// If a JSON value is not appropriate for a given target type, +// or if a JSON number overflows the target type, Unmarshal +// skips that field and completes the unmarshaling as best it can. +// If no more serious errors are encountered, Unmarshal returns +// an UnmarshalTypeError describing the earliest such error. In any +// case, it's not guaranteed that all the remaining fields following +// the problematic one will be unmarshaled into the target object. +// +// The JSON null value unmarshals into an interface, map, pointer, or slice +// by setting that Go value to nil. Because null is often used in JSON to mean +// “not present,” unmarshaling a JSON null into any other Go type has no effect +// on the value and produces no error. +// +// When unmarshaling quoted strings, invalid UTF-8 or +// invalid UTF-16 surrogate pairs are not treated as an error. +// Instead, they are replaced by the Unicode replacement +// character U+FFFD. +func Unmarshal(data []byte, v any) error { + // Check for well-formedness. + // Avoids filling out half a data structure + // before discovering a JSON syntax error. + d := ds.Get().(*decodeState) + defer ds.Put(d) + //var d decodeState + d.useNumber = true + err := checkValid(data, &d.scan) + if err != nil { + return err + } + + d.init(data) + return d.unmarshal(v) +} + +var ds = sync.Pool{ + New: func() any { + return new(decodeState) + }, +} + +func UnmarshalWithKeys(data []byte, v any) ([]string, error) { + // Check for well-formedness. + // Avoids filling out half a data structure + // before discovering a JSON syntax error. + + d := ds.Get().(*decodeState) + defer ds.Put(d) + //var d decodeState + d.useNumber = true + err := checkValid(data, &d.scan) + if err != nil { + return nil, err + } + + d.init(data) + err = d.unmarshal(v) + if err != nil { + return nil, err + } + + return d.lastKeys, nil +} + +func UnmarshalValid(data []byte, v any) error { + // Check for well-formedness. + // Avoids filling out half a data structure + // before discovering a JSON syntax error. + d := ds.Get().(*decodeState) + defer ds.Put(d) + //var d decodeState + d.useNumber = true + + d.init(data) + return d.unmarshal(v) +} + +func UnmarshalValidWithKeys(data []byte, v any) ([]string, error) { + // Check for well-formedness. + // Avoids filling out half a data structure + // before discovering a JSON syntax error. + + d := ds.Get().(*decodeState) + defer ds.Put(d) + //var d decodeState + d.useNumber = true + + d.init(data) + err := d.unmarshal(v) + if err != nil { + return nil, err + } + + return d.lastKeys, nil +} + +// Unmarshaler is the interface implemented by types +// that can unmarshal a JSON description of themselves. +// The input can be assumed to be a valid encoding of +// a JSON value. UnmarshalJSON must copy the JSON data +// if it wishes to retain the data after returning. +// +// By convention, to approximate the behavior of Unmarshal itself, +// Unmarshalers implement UnmarshalJSON([]byte("null")) as a no-op. +type Unmarshaler interface { + UnmarshalJSON([]byte) error +} + +// An UnmarshalTypeError describes a JSON value that was +// not appropriate for a value of a specific Go type. +type UnmarshalTypeError struct { + Value string // description of JSON value - "bool", "array", "number -5" + Type reflect.Type // type of Go value it could not be assigned to + Offset int64 // error occurred after reading Offset bytes + Struct string // name of the struct type containing the field + Field string // the full path from root node to the field +} + +func (e *UnmarshalTypeError) Error() string { + if e.Struct != "" || e.Field != "" { + return "json: cannot unmarshal " + e.Value + " into Go struct field " + e.Struct + "." + e.Field + " of type " + e.Type.String() + } + return "json: cannot unmarshal " + e.Value + " into Go value of type " + e.Type.String() +} + +// An UnmarshalFieldError describes a JSON object key that +// led to an unexported (and therefore unwritable) struct field. +// +// Deprecated: No longer used; kept for compatibility. +type UnmarshalFieldError struct { + Key string + Type reflect.Type + Field reflect.StructField +} + +func (e *UnmarshalFieldError) Error() string { + return "json: cannot unmarshal object key " + strconv.Quote(e.Key) + " into unexported field " + e.Field.Name + " of type " + e.Type.String() +} + +// An InvalidUnmarshalError describes an invalid argument passed to Unmarshal. +// (The argument to Unmarshal must be a non-nil pointer.) +type InvalidUnmarshalError struct { + Type reflect.Type +} + +func (e *InvalidUnmarshalError) Error() string { + if e.Type == nil { + return "json: Unmarshal(nil)" + } + + if e.Type.Kind() != reflect.Pointer { + return "json: Unmarshal(non-pointer " + e.Type.String() + ")" + } + return "json: Unmarshal(nil " + e.Type.String() + ")" +} + +func (d *decodeState) unmarshal(v any) error { + rv := reflect.ValueOf(v) + if rv.Kind() != reflect.Pointer || rv.IsNil() { + return &InvalidUnmarshalError{reflect.TypeOf(v)} + } + + d.scan.reset() + d.scanWhile(scanSkipSpace) + // We decode rv not rv.Elem because the Unmarshaler interface + // test must be applied at the top level of the value. + err := d.value(rv) + if err != nil { + return d.addErrorContext(err) + } + return d.savedError +} + +// A Number represents a JSON number literal. +type Number string + +// String returns the literal text of the number. +func (n Number) String() string { return string(n) } + +// Float64 returns the number as a float64. +func (n Number) Float64() (float64, error) { + return strconv.ParseFloat(string(n), 64) +} + +// Int64 returns the number as an int64. +func (n Number) Int64() (int64, error) { + return strconv.ParseInt(string(n), 10, 64) +} + +// An errorContext provides context for type errors during decoding. +type errorContext struct { + Struct reflect.Type + FieldStack []string +} + +// decodeState represents the state while decoding a JSON value. +type decodeState struct { + data []byte + off int // next read offset in data + opcode int // last read result + scan scanner + errorContext *errorContext + savedError error + useNumber bool + disallowUnknownFields bool + lastKeys []string +} + +// readIndex returns the position of the last byte read. +func (d *decodeState) readIndex() int { + return d.off - 1 +} + +// phasePanicMsg is used as a panic message when we end up with something that +// shouldn't happen. It can indicate a bug in the JSON decoder, or that +// something is editing the data slice while the decoder executes. +const phasePanicMsg = "JSON decoder out of sync - data changing underfoot?" + +func (d *decodeState) init(data []byte) *decodeState { + d.data = data + d.off = 0 + d.savedError = nil + if d.errorContext != nil { + d.errorContext.Struct = nil + // Reuse the allocated space for the FieldStack slice. + d.errorContext.FieldStack = d.errorContext.FieldStack[:0] + } + return d +} + +// saveError saves the first err it is called with, +// for reporting at the end of the unmarshal. +func (d *decodeState) saveError(err error) { + if d.savedError == nil { + d.savedError = d.addErrorContext(err) + } +} + +// addErrorContext returns a new error enhanced with information from d.errorContext +func (d *decodeState) addErrorContext(err error) error { + if d.errorContext != nil && (d.errorContext.Struct != nil || len(d.errorContext.FieldStack) > 0) { + switch err := err.(type) { + case *UnmarshalTypeError: + err.Struct = d.errorContext.Struct.Name() + err.Field = strings.Join(d.errorContext.FieldStack, ".") + } + } + return err +} + +// skip scans to the end of what was started. +func (d *decodeState) skip() { + s, data, i := &d.scan, d.data, d.off + depth := len(s.parseState) + for { + op := s.step(s, data[i]) + i++ + if len(s.parseState) < depth { + d.off = i + d.opcode = op + return + } + } +} + +// scanNext processes the byte at d.data[d.off]. +func (d *decodeState) scanNext() { + if d.off < len(d.data) { + d.opcode = d.scan.step(&d.scan, d.data[d.off]) + d.off++ + } else { + d.opcode = d.scan.eof() + d.off = len(d.data) + 1 // mark processed EOF with len+1 + } +} + +// scanWhile processes bytes in d.data[d.off:] until it +// receives a scan code not equal to op. +func (d *decodeState) scanWhile(op int) { + s, data, i := &d.scan, d.data, d.off + for i < len(data) { + newOp := s.step(s, data[i]) + i++ + if newOp != op { + d.opcode = newOp + d.off = i + return + } + } + + d.off = len(data) + 1 // mark processed EOF with len+1 + d.opcode = d.scan.eof() +} + +// rescanLiteral is similar to scanWhile(scanContinue), but it specialises the +// common case where we're decoding a literal. The decoder scans the input +// twice, once for syntax errors and to check the length of the value, and the +// second to perform the decoding. +// +// Only in the second step do we use decodeState to tokenize literals, so we +// know there aren't any syntax errors. We can take advantage of that knowledge, +// and scan a literal's bytes much more quickly. +func (d *decodeState) rescanLiteral() { + data, i := d.data, d.off +Switch: + switch data[i-1] { + case '"': // string + for ; i < len(data); i++ { + switch data[i] { + case '\\': + i++ // escaped char + case '"': + i++ // tokenize the closing quote too + break Switch + } + } + case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', '-': // number + for ; i < len(data); i++ { + switch data[i] { + case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', + '.', 'e', 'E', '+', '-': + default: + break Switch + } + } + case 't': // true + i += len("rue") + case 'f': // false + i += len("alse") + case 'n': // null + i += len("ull") + } + if i < len(data) { + d.opcode = stateEndValue(&d.scan, data[i]) + } else { + d.opcode = scanEnd + } + d.off = i + 1 +} + +// value consumes a JSON value from d.data[d.off-1:], decoding into v, and +// reads the following byte ahead. If v is invalid, the value is discarded. +// The first byte of the value has been read already. +func (d *decodeState) value(v reflect.Value) error { + switch d.opcode { + default: + panic(phasePanicMsg) + + case scanBeginArray: + if v.IsValid() { + if err := d.array(v); err != nil { + return err + } + } else { + d.skip() + } + d.scanNext() + + case scanBeginObject: + if v.IsValid() { + if err := d.object(v); err != nil { + return err + } + } else { + d.skip() + } + d.scanNext() + + case scanBeginLiteral: + // All bytes inside literal return scanContinue op code. + start := d.readIndex() + d.rescanLiteral() + + if v.IsValid() { + if err := d.literalStore(d.data[start:d.readIndex()], v, false); err != nil { + return err + } + } + } + return nil +} + +type unquotedValue struct{} + +// valueQuoted is like value but decodes a +// quoted string literal or literal null into an interface value. +// If it finds anything other than a quoted string literal or null, +// valueQuoted returns unquotedValue{}. +func (d *decodeState) valueQuoted() any { + switch d.opcode { + default: + panic(phasePanicMsg) + + case scanBeginArray, scanBeginObject: + d.skip() + d.scanNext() + + case scanBeginLiteral: + v := d.literalInterface() + switch v.(type) { + case nil, string: + return v + } + } + return unquotedValue{} +} + +// indirect walks down v allocating pointers as needed, +// until it gets to a non-pointer. +// If it encounters an Unmarshaler, indirect stops and returns that. +// If decodingNull is true, indirect stops at the first settable pointer so it +// can be set to nil. +func indirect(v reflect.Value, decodingNull bool) (Unmarshaler, encoding.TextUnmarshaler, reflect.Value) { + // Issue #24153 indicates that it is generally not a guaranteed property + // that you may round-trip a reflect.Value by calling Value.Addr().Elem() + // and expect the value to still be settable for values derived from + // unexported embedded struct fields. + // + // The logic below effectively does this when it first addresses the value + // (to satisfy possible pointer methods) and continues to dereference + // subsequent pointers as necessary. + // + // After the first round-trip, we set v back to the original value to + // preserve the original RW flags contained in reflect.Value. + v0 := v + haveAddr := false + + // If v is a named type and is addressable, + // start with its address, so that if the type has pointer methods, + // we find them. + if v.Kind() != reflect.Pointer && v.Type().Name() != "" && v.CanAddr() { + haveAddr = true + v = v.Addr() + } + for { + // Load value from interface, but only if the result will be + // usefully addressable. + if v.Kind() == reflect.Interface && !v.IsNil() { + e := v.Elem() + if e.Kind() == reflect.Pointer && !e.IsNil() && (!decodingNull || e.Elem().Kind() == reflect.Pointer) { + haveAddr = false + v = e + continue + } + } + + if v.Kind() != reflect.Pointer { + break + } + + if decodingNull && v.CanSet() { + break + } + + // Prevent infinite loop if v is an interface pointing to its own address: + // var v interface{} + // v = &v + if v.Elem().Kind() == reflect.Interface && v.Elem().Elem() == v { + v = v.Elem() + break + } + if v.IsNil() { + v.Set(reflect.New(v.Type().Elem())) + } + if v.Type().NumMethod() > 0 && v.CanInterface() { + if u, ok := v.Interface().(Unmarshaler); ok { + return u, nil, reflect.Value{} + } + if !decodingNull { + if u, ok := v.Interface().(encoding.TextUnmarshaler); ok { + return nil, u, reflect.Value{} + } + } + } + + if haveAddr { + v = v0 // restore original value after round-trip Value.Addr().Elem() + haveAddr = false + } else { + v = v.Elem() + } + } + return nil, nil, v +} + +// array consumes an array from d.data[d.off-1:], decoding into v. +// The first byte of the array ('[') has been read already. +func (d *decodeState) array(v reflect.Value) error { + // Check for unmarshaler. + u, ut, pv := indirect(v, false) + if u != nil { + start := d.readIndex() + d.skip() + return u.UnmarshalJSON(d.data[start:d.off]) + } + if ut != nil { + d.saveError(&UnmarshalTypeError{Value: "array", Type: v.Type(), Offset: int64(d.off)}) + d.skip() + return nil + } + v = pv + + // Check type of target. + switch v.Kind() { + case reflect.Interface: + if v.NumMethod() == 0 { + // Decoding into nil interface? Switch to non-reflect code. + ai := d.arrayInterface() + v.Set(reflect.ValueOf(ai)) + return nil + } + // Otherwise it's invalid. + fallthrough + default: + d.saveError(&UnmarshalTypeError{Value: "array", Type: v.Type(), Offset: int64(d.off)}) + d.skip() + return nil + case reflect.Array, reflect.Slice: + break + } + + i := 0 + for { + // Look ahead for ] - can only happen on first iteration. + d.scanWhile(scanSkipSpace) + if d.opcode == scanEndArray { + break + } + + // Get element of array, growing if necessary. + if v.Kind() == reflect.Slice { + // Grow slice if necessary + if i >= v.Cap() { + newcap := v.Cap() + v.Cap()/2 + if newcap < 4 { + newcap = 4 + } + newv := reflect.MakeSlice(v.Type(), v.Len(), newcap) + reflect.Copy(newv, v) + v.Set(newv) + } + if i >= v.Len() { + v.SetLen(i + 1) + } + } + + if i < v.Len() { + // Decode into element. + if err := d.value(v.Index(i)); err != nil { + return err + } + } else { + // Ran out of fixed array: skip. + if err := d.value(reflect.Value{}); err != nil { + return err + } + } + i++ + + // Next token must be , or ]. + if d.opcode == scanSkipSpace { + d.scanWhile(scanSkipSpace) + } + if d.opcode == scanEndArray { + break + } + if d.opcode != scanArrayValue { + panic(phasePanicMsg) + } + } + + if i < v.Len() { + if v.Kind() == reflect.Array { + // Array. Zero the rest. + z := reflect.Zero(v.Type().Elem()) + for ; i < v.Len(); i++ { + v.Index(i).Set(z) + } + } else { + v.SetLen(i) + } + } + if i == 0 && v.Kind() == reflect.Slice { + v.Set(reflect.MakeSlice(v.Type(), 0, 0)) + } + return nil +} + +var nullLiteral = []byte("null") +var textUnmarshalerType = reflect.TypeOf((*encoding.TextUnmarshaler)(nil)).Elem() + +// object consumes an object from d.data[d.off-1:], decoding into v. +// The first byte ('{') of the object has been read already. +func (d *decodeState) object(v reflect.Value) error { + // Check for unmarshaler. + u, ut, pv := indirect(v, false) + if u != nil { + start := d.readIndex() + d.skip() + return u.UnmarshalJSON(d.data[start:d.off]) + } + if ut != nil { + d.saveError(&UnmarshalTypeError{Value: "object", Type: v.Type(), Offset: int64(d.off)}) + d.skip() + return nil + } + v = pv + t := v.Type() + + // Decoding into nil interface? Switch to non-reflect code. + if v.Kind() == reflect.Interface && v.NumMethod() == 0 { + oi := d.objectInterface() + v.Set(reflect.ValueOf(oi)) + return nil + } + + var fields structFields + + // Check type of target: + // struct or + // map[T1]T2 where T1 is string, an integer type, + // or an encoding.TextUnmarshaler + switch v.Kind() { + case reflect.Map: + // Map key must either have string kind, have an integer kind, + // or be an encoding.TextUnmarshaler. + switch t.Key().Kind() { + case reflect.String, + reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, + reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + default: + if !reflect.PointerTo(t.Key()).Implements(textUnmarshalerType) { + d.saveError(&UnmarshalTypeError{Value: "object", Type: t, Offset: int64(d.off)}) + d.skip() + return nil + } + } + if v.IsNil() { + v.Set(reflect.MakeMap(t)) + } + case reflect.Struct: + fields = cachedTypeFields(t) + // ok + default: + d.saveError(&UnmarshalTypeError{Value: "object", Type: t, Offset: int64(d.off)}) + d.skip() + return nil + } + + var mapElem reflect.Value + var origErrorContext errorContext + if d.errorContext != nil { + origErrorContext = *d.errorContext + } + + var keys []string + + for { + // Read opening " of string key or closing }. + d.scanWhile(scanSkipSpace) + if d.opcode == scanEndObject { + // closing } - can only happen on first iteration. + break + } + if d.opcode != scanBeginLiteral { + panic(phasePanicMsg) + } + + // Read key. + start := d.readIndex() + d.rescanLiteral() + item := d.data[start:d.readIndex()] + key, ok := unquoteBytes(item) + if !ok { + panic(phasePanicMsg) + } + + keys = append(keys, string(key)) + + // Figure out field corresponding to key. + var subv reflect.Value + destring := false // whether the value is wrapped in a string to be decoded first + + if v.Kind() == reflect.Map { + elemType := t.Elem() + if !mapElem.IsValid() { + mapElem = reflect.New(elemType).Elem() + } else { + mapElem.Set(reflect.Zero(elemType)) + } + subv = mapElem + } else { + var f *field + if i, ok := fields.nameIndex[string(key)]; ok { + // Found an exact name match. + f = &fields.list[i] + } else { + // Fall back to the expensive case-insensitive + // linear search. + for i := range fields.list { + ff := &fields.list[i] + if ff.equalFold(ff.nameBytes, key) { + f = ff + break + } + } + } + if f != nil { + subv = v + destring = f.quoted + for _, i := range f.index { + if subv.Kind() == reflect.Pointer { + if subv.IsNil() { + // If a struct embeds a pointer to an unexported type, + // it is not possible to set a newly allocated value + // since the field is unexported. + // + // See https://golang.org/issue/21357 + if !subv.CanSet() { + d.saveError(fmt.Errorf("json: cannot set embedded pointer to unexported struct: %v", subv.Type().Elem())) + // Invalidate subv to ensure d.value(subv) skips over + // the JSON value without assigning it to subv. + subv = reflect.Value{} + destring = false + break + } + subv.Set(reflect.New(subv.Type().Elem())) + } + subv = subv.Elem() + } + subv = subv.Field(i) + } + if d.errorContext == nil { + d.errorContext = new(errorContext) + } + d.errorContext.FieldStack = append(d.errorContext.FieldStack, f.name) + d.errorContext.Struct = t + } else if d.disallowUnknownFields { + d.saveError(fmt.Errorf("json: unknown field %q", key)) + } + } + + // Read : before value. + if d.opcode == scanSkipSpace { + d.scanWhile(scanSkipSpace) + } + if d.opcode != scanObjectKey { + panic(phasePanicMsg) + } + d.scanWhile(scanSkipSpace) + + if destring { + switch qv := d.valueQuoted().(type) { + case nil: + if err := d.literalStore(nullLiteral, subv, false); err != nil { + return err + } + case string: + if err := d.literalStore([]byte(qv), subv, true); err != nil { + return err + } + default: + d.saveError(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal unquoted value into %v", subv.Type())) + } + } else { + if err := d.value(subv); err != nil { + return err + } + } + + // Write value back to map; + // if using struct, subv points into struct already. + if v.Kind() == reflect.Map { + kt := t.Key() + var kv reflect.Value + switch { + case reflect.PointerTo(kt).Implements(textUnmarshalerType): + kv = reflect.New(kt) + if err := d.literalStore(item, kv, true); err != nil { + return err + } + kv = kv.Elem() + case kt.Kind() == reflect.String: + kv = reflect.ValueOf(key).Convert(kt) + default: + switch kt.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + s := string(key) + n, err := strconv.ParseInt(s, 10, 64) + if err != nil || reflect.Zero(kt).OverflowInt(n) { + d.saveError(&UnmarshalTypeError{Value: "number " + s, Type: kt, Offset: int64(start + 1)}) + break + } + kv = reflect.ValueOf(n).Convert(kt) + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + s := string(key) + n, err := strconv.ParseUint(s, 10, 64) + if err != nil || reflect.Zero(kt).OverflowUint(n) { + d.saveError(&UnmarshalTypeError{Value: "number " + s, Type: kt, Offset: int64(start + 1)}) + break + } + kv = reflect.ValueOf(n).Convert(kt) + default: + panic("json: Unexpected key type") // should never occur + } + } + if kv.IsValid() { + v.SetMapIndex(kv, subv) + } + } + + // Next token must be , or }. + if d.opcode == scanSkipSpace { + d.scanWhile(scanSkipSpace) + } + if d.errorContext != nil { + // Reset errorContext to its original state. + // Keep the same underlying array for FieldStack, to reuse the + // space and avoid unnecessary allocs. + d.errorContext.FieldStack = d.errorContext.FieldStack[:len(origErrorContext.FieldStack)] + d.errorContext.Struct = origErrorContext.Struct + } + if d.opcode == scanEndObject { + break + } + if d.opcode != scanObjectValue { + panic(phasePanicMsg) + } + } + + if v.Kind() == reflect.Map { + d.lastKeys = keys + } + return nil +} + +// convertNumber converts the number literal s to a float64 or a Number +// depending on the setting of d.useNumber. +func (d *decodeState) convertNumber(s string) (any, error) { + if d.useNumber { + return Number(s), nil + } + f, err := strconv.ParseFloat(s, 64) + if err != nil { + return nil, &UnmarshalTypeError{Value: "number " + s, Type: reflect.TypeOf(0.0), Offset: int64(d.off)} + } + return f, nil +} + +var numberType = reflect.TypeOf(Number("")) + +// literalStore decodes a literal stored in item into v. +// +// fromQuoted indicates whether this literal came from unwrapping a +// string from the ",string" struct tag option. this is used only to +// produce more helpful error messages. +func (d *decodeState) literalStore(item []byte, v reflect.Value, fromQuoted bool) error { + // Check for unmarshaler. + if len(item) == 0 { + //Empty string given + d.saveError(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type())) + return nil + } + isNull := item[0] == 'n' // null + u, ut, pv := indirect(v, isNull) + if u != nil { + return u.UnmarshalJSON(item) + } + if ut != nil { + if item[0] != '"' { + if fromQuoted { + d.saveError(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type())) + return nil + } + val := "number" + switch item[0] { + case 'n': + val = "null" + case 't', 'f': + val = "bool" + } + d.saveError(&UnmarshalTypeError{Value: val, Type: v.Type(), Offset: int64(d.readIndex())}) + return nil + } + s, ok := unquoteBytes(item) + if !ok { + if fromQuoted { + return fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type()) + } + panic(phasePanicMsg) + } + return ut.UnmarshalText(s) + } + + v = pv + + switch c := item[0]; c { + case 'n': // null + // The main parser checks that only true and false can reach here, + // but if this was a quoted string input, it could be anything. + if fromQuoted && string(item) != "null" { + d.saveError(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type())) + break + } + switch v.Kind() { + case reflect.Interface, reflect.Pointer, reflect.Map, reflect.Slice: + v.Set(reflect.Zero(v.Type())) + // otherwise, ignore null for primitives/string + } + case 't', 'f': // true, false + value := item[0] == 't' + // The main parser checks that only true and false can reach here, + // but if this was a quoted string input, it could be anything. + if fromQuoted && string(item) != "true" && string(item) != "false" { + d.saveError(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type())) + break + } + switch v.Kind() { + default: + if fromQuoted { + d.saveError(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type())) + } else { + d.saveError(&UnmarshalTypeError{Value: "bool", Type: v.Type(), Offset: int64(d.readIndex())}) + } + case reflect.Bool: + v.SetBool(value) + case reflect.Interface: + if v.NumMethod() == 0 { + v.Set(reflect.ValueOf(value)) + } else { + d.saveError(&UnmarshalTypeError{Value: "bool", Type: v.Type(), Offset: int64(d.readIndex())}) + } + } + + case '"': // string + s, ok := unquoteBytes(item) + if !ok { + if fromQuoted { + return fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type()) + } + panic(phasePanicMsg) + } + switch v.Kind() { + default: + d.saveError(&UnmarshalTypeError{Value: "string", Type: v.Type(), Offset: int64(d.readIndex())}) + case reflect.Slice: + if v.Type().Elem().Kind() != reflect.Uint8 { + d.saveError(&UnmarshalTypeError{Value: "string", Type: v.Type(), Offset: int64(d.readIndex())}) + break + } + b := make([]byte, base64.StdEncoding.DecodedLen(len(s))) + n, err := base64.StdEncoding.Decode(b, s) + if err != nil { + d.saveError(err) + break + } + v.SetBytes(b[:n]) + case reflect.String: + if v.Type() == numberType && !isValidNumber(string(s)) { + return fmt.Errorf("json: invalid number literal, trying to unmarshal %q into Number", item) + } + v.SetString(string(s)) + case reflect.Interface: + if v.NumMethod() == 0 { + v.Set(reflect.ValueOf(string(s))) + } else { + d.saveError(&UnmarshalTypeError{Value: "string", Type: v.Type(), Offset: int64(d.readIndex())}) + } + } + + default: // number + if c != '-' && (c < '0' || c > '9') { + if fromQuoted { + return fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type()) + } + panic(phasePanicMsg) + } + s := string(item) + switch v.Kind() { + default: + if v.Kind() == reflect.String && v.Type() == numberType { + // s must be a valid number, because it's + // already been tokenized. + v.SetString(s) + break + } + if fromQuoted { + return fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type()) + } + d.saveError(&UnmarshalTypeError{Value: "number", Type: v.Type(), Offset: int64(d.readIndex())}) + case reflect.Interface: + n, err := d.convertNumber(s) + if err != nil { + d.saveError(err) + break + } + if v.NumMethod() != 0 { + d.saveError(&UnmarshalTypeError{Value: "number", Type: v.Type(), Offset: int64(d.readIndex())}) + break + } + v.Set(reflect.ValueOf(n)) + + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + n, err := strconv.ParseInt(s, 10, 64) + if err != nil || v.OverflowInt(n) { + d.saveError(&UnmarshalTypeError{Value: "number " + s, Type: v.Type(), Offset: int64(d.readIndex())}) + break + } + v.SetInt(n) + + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + n, err := strconv.ParseUint(s, 10, 64) + if err != nil || v.OverflowUint(n) { + d.saveError(&UnmarshalTypeError{Value: "number " + s, Type: v.Type(), Offset: int64(d.readIndex())}) + break + } + v.SetUint(n) + + case reflect.Float32, reflect.Float64: + n, err := strconv.ParseFloat(s, v.Type().Bits()) + if err != nil || v.OverflowFloat(n) { + d.saveError(&UnmarshalTypeError{Value: "number " + s, Type: v.Type(), Offset: int64(d.readIndex())}) + break + } + v.SetFloat(n) + } + } + return nil +} + +// The xxxInterface routines build up a value to be stored +// in an empty interface. They are not strictly necessary, +// but they avoid the weight of reflection in this common case. + +// valueInterface is like value but returns interface{} +func (d *decodeState) valueInterface() (val any) { + switch d.opcode { + default: + panic(phasePanicMsg) + case scanBeginArray: + val = d.arrayInterface() + d.scanNext() + case scanBeginObject: + val = d.objectInterface() + d.scanNext() + case scanBeginLiteral: + val = d.literalInterface() + } + return +} + +// arrayInterface is like array but returns []interface{}. +func (d *decodeState) arrayInterface() []any { + var v = make([]any, 0) + for { + // Look ahead for ] - can only happen on first iteration. + d.scanWhile(scanSkipSpace) + if d.opcode == scanEndArray { + break + } + + v = append(v, d.valueInterface()) + + // Next token must be , or ]. + if d.opcode == scanSkipSpace { + d.scanWhile(scanSkipSpace) + } + if d.opcode == scanEndArray { + break + } + if d.opcode != scanArrayValue { + panic(phasePanicMsg) + } + } + return v +} + +// objectInterface is like object but returns map[string]interface{}. +func (d *decodeState) objectInterface() map[string]any { + m := make(map[string]any) + for { + // Read opening " of string key or closing }. + d.scanWhile(scanSkipSpace) + if d.opcode == scanEndObject { + // closing } - can only happen on first iteration. + break + } + if d.opcode != scanBeginLiteral { + panic(phasePanicMsg) + } + + // Read string key. + start := d.readIndex() + d.rescanLiteral() + item := d.data[start:d.readIndex()] + key, ok := unquote(item) + if !ok { + panic(phasePanicMsg) + } + + // Read : before value. + if d.opcode == scanSkipSpace { + d.scanWhile(scanSkipSpace) + } + if d.opcode != scanObjectKey { + panic(phasePanicMsg) + } + d.scanWhile(scanSkipSpace) + + // Read value. + m[key] = d.valueInterface() + + // Next token must be , or }. + if d.opcode == scanSkipSpace { + d.scanWhile(scanSkipSpace) + } + if d.opcode == scanEndObject { + break + } + if d.opcode != scanObjectValue { + panic(phasePanicMsg) + } + } + return m +} + +// literalInterface consumes and returns a literal from d.data[d.off-1:] and +// it reads the following byte ahead. The first byte of the literal has been +// read already (that's how the caller knows it's a literal). +func (d *decodeState) literalInterface() any { + // All bytes inside literal return scanContinue op code. + start := d.readIndex() + d.rescanLiteral() + + item := d.data[start:d.readIndex()] + + switch c := item[0]; c { + case 'n': // null + return nil + + case 't', 'f': // true, false + return c == 't' + + case '"': // string + s, ok := unquote(item) + if !ok { + panic(phasePanicMsg) + } + return s + + default: // number + if c != '-' && (c < '0' || c > '9') { + panic(phasePanicMsg) + } + n, err := d.convertNumber(string(item)) + if err != nil { + d.saveError(err) + } + return n + } +} + +// getu4 decodes \uXXXX from the beginning of s, returning the hex value, +// or it returns -1. +func getu4(s []byte) rune { + if len(s) < 6 || s[0] != '\\' || s[1] != 'u' { + return -1 + } + var r rune + for _, c := range s[2:6] { + switch { + case '0' <= c && c <= '9': + c = c - '0' + case 'a' <= c && c <= 'f': + c = c - 'a' + 10 + case 'A' <= c && c <= 'F': + c = c - 'A' + 10 + default: + return -1 + } + r = r*16 + rune(c) + } + return r +} + +// unquote converts a quoted JSON string literal s into an actual string t. +// The rules are different than for Go, so cannot use strconv.Unquote. +func unquote(s []byte) (t string, ok bool) { + s, ok = unquoteBytes(s) + t = string(s) + return +} + +func unquoteBytes(s []byte) (t []byte, ok bool) { + if len(s) < 2 || s[0] != '"' || s[len(s)-1] != '"' { + return + } + s = s[1 : len(s)-1] + + // Check for unusual characters. If there are none, + // then no unquoting is needed, so return a slice of the + // original bytes. + r := 0 + for r < len(s) { + c := s[r] + if c == '\\' || c == '"' || c < ' ' { + break + } + if c < utf8.RuneSelf { + r++ + continue + } + rr, size := utf8.DecodeRune(s[r:]) + if rr == utf8.RuneError && size == 1 { + break + } + r += size + } + if r == len(s) { + return s, true + } + + b := make([]byte, len(s)+2*utf8.UTFMax) + w := copy(b, s[0:r]) + for r < len(s) { + // Out of room? Can only happen if s is full of + // malformed UTF-8 and we're replacing each + // byte with RuneError. + if w >= len(b)-2*utf8.UTFMax { + nb := make([]byte, (len(b)+utf8.UTFMax)*2) + copy(nb, b[0:w]) + b = nb + } + switch c := s[r]; { + case c == '\\': + r++ + if r >= len(s) { + return + } + switch s[r] { + default: + return + case '"', '\\', '/', '\'': + b[w] = s[r] + r++ + w++ + case 'b': + b[w] = '\b' + r++ + w++ + case 'f': + b[w] = '\f' + r++ + w++ + case 'n': + b[w] = '\n' + r++ + w++ + case 'r': + b[w] = '\r' + r++ + w++ + case 't': + b[w] = '\t' + r++ + w++ + case 'u': + r-- + rr := getu4(s[r:]) + if rr < 0 { + return + } + r += 6 + if utf16.IsSurrogate(rr) { + rr1 := getu4(s[r:]) + if dec := utf16.DecodeRune(rr, rr1); dec != unicode.ReplacementChar { + // A valid pair; consume. + r += 6 + w += utf8.EncodeRune(b[w:], dec) + break + } + // Invalid surrogate; fall back to replacement rune. + rr = unicode.ReplacementChar + } + w += utf8.EncodeRune(b[w:], rr) + } + + // Quote, control characters are invalid. + case c == '"', c < ' ': + return + + // ASCII + case c < utf8.RuneSelf: + b[w] = c + r++ + w++ + + // Coerce to well-formed UTF-8. + default: + rr, size := utf8.DecodeRune(s[r:]) + r += size + w += utf8.EncodeRune(b[w:], rr) + } + } + return b[0:w], true +} diff --git a/cluster-provision/gocli/vendor/github.com/evanphx/json-patch/v5/internal/json/encode.go b/cluster-provision/gocli/vendor/github.com/evanphx/json-patch/v5/internal/json/encode.go new file mode 100644 index 0000000000..a1819b16ac --- /dev/null +++ b/cluster-provision/gocli/vendor/github.com/evanphx/json-patch/v5/internal/json/encode.go @@ -0,0 +1,1473 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package json implements encoding and decoding of JSON as defined in +// RFC 7159. The mapping between JSON and Go values is described +// in the documentation for the Marshal and Unmarshal functions. +// +// See "JSON and Go" for an introduction to this package: +// https://golang.org/doc/articles/json_and_go.html +package json + +import ( + "bytes" + "encoding" + "encoding/base64" + "fmt" + "math" + "reflect" + "sort" + "strconv" + "strings" + "sync" + "unicode" + "unicode/utf8" +) + +// Marshal returns the JSON encoding of v. +// +// Marshal traverses the value v recursively. +// If an encountered value implements the Marshaler interface +// and is not a nil pointer, Marshal calls its MarshalJSON method +// to produce JSON. If no MarshalJSON method is present but the +// value implements encoding.TextMarshaler instead, Marshal calls +// its MarshalText method and encodes the result as a JSON string. +// The nil pointer exception is not strictly necessary +// but mimics a similar, necessary exception in the behavior of +// UnmarshalJSON. +// +// Otherwise, Marshal uses the following type-dependent default encodings: +// +// Boolean values encode as JSON booleans. +// +// Floating point, integer, and Number values encode as JSON numbers. +// +// String values encode as JSON strings coerced to valid UTF-8, +// replacing invalid bytes with the Unicode replacement rune. +// So that the JSON will be safe to embed inside HTML