diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index ded0a6eed..db651384a 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -18,7 +18,7 @@ jobs: - name: Setup Go uses: actions/setup-go@v2 with: - go-version: '1.18.0' + go-version: '1.18.2' - name: Check Go fmt run: make gofmt diff --git a/.github/workflows/unit_tests.yml b/.github/workflows/unit_tests.yml index 672b93a93..d39837f35 100644 --- a/.github/workflows/unit_tests.yml +++ b/.github/workflows/unit_tests.yml @@ -12,7 +12,7 @@ jobs: runs-on: ubuntu-latest strategy: matrix: - go: ["1.18"] + go: ["1.18.2"] steps: - name: Checkout diff --git a/.gitignore b/.gitignore index e2555dc6f..57253f438 100644 --- a/.gitignore +++ b/.gitignore @@ -11,3 +11,5 @@ coverage/* .vscode/ .DS_Store + +build/1click diff --git a/Makefile b/Makefile index aac618a47..6c463ad03 100644 --- a/Makefile +++ b/Makefile @@ -12,8 +12,8 @@ compile-linux: ## compile: run: ## run @./build/1click -run-cli: compile ## run randomized cli - @./build/1click cli -r --config ./config.yaml +run-cli: compile ## run cli + @./build/1click cli --config ./config.yaml test: ## run tests @mkdir -p coverage diff --git a/README.md b/README.md index ee9261efb..fd7515fbc 100644 --- a/README.md +++ b/README.md @@ -26,7 +26,7 @@ We don't want to stop at Ethereum. We also want to help stakers of other PoS net #### Using Go -If you have at least `go1.18.0` installed then this command will install the `1click` executable along with the library and its dependencies: +If you have at least `go1.18.2` installed then this command will install the `1click` executable along with the library and its dependencies: ``` go install github.com/NethermindEth/1click/cmd/1click@latest @@ -64,25 +64,15 @@ sudo $GOPATH/bin/1click /usr/local/bin/ sudo cp 1click/build/1click /usr/local/bin/ ``` -#### Download the binary (only for linux) - -> This is temporary until the first release - -Download directly the binary and put it in `/usr/local/bin`: - -``` -sudo curl -LJ -o /usr/local/bin/1click https://github.com/NethermindEth/1click/raw/main/build/1click -sudo chmod +x /usr/local/bin/1click -``` - ### Dependencies -`1click` dependencies are `docker` and `docker-compose`, but if you don't have those installed, `1click` will show instructions to install them, or install them for you. +`1click` dependencies are `docker` with `docker compose` plugin, but if you don't have those installed, `1click` will show instructions to install them, or install them for you. ### Quick run With `1click cli` you can go through the entire workflow setup: 1. Check dependencies -2. Generate a `docker-compose` script with randomized clients selection and `.env` -3. Execute the `docker-compose` script (only execution and consensus nodes will be executed by default) +2. Generate jwtsecret (not for mainnet and prater) +3. Generate a `docker-compose` script with randomized clients selection and `.env` +4. Execute the `docker-compose` script (only execution and consensus nodes will be executed by default) ## 🔥 What can you do right now with this tool? @@ -90,7 +80,7 @@ With `1click cli` you can go through the entire workflow setup: - Generate the keystore folder using the [staking-deposit-cli](https://github.com/ethereum/staking-deposit-cli) tool with `1click keys` - Don't remember `docker-compose` commands or flags? No problem, you can check the logs of the running services of the generated `docker-compose` script with `1click logs` and shutdown the services with `1click down` -> The setup is currently designed to start all the three nodes required to start a validator (execution, consensus and validator node). This will change soon and `1click` will let you connect to a public or remote node, or to automatically start the validator node when the execution and consensus nodes in the setup are synced. Although you can do all of this after generating the docker-compose script 😉 +> The setup is currently designed to start all the three nodes required to start a validator (execution, consensus and validator node). This will change soon and `1click` will let you connect to a public or remote node. The execution and consensus nodes will be executed first, and the validator node will be executed automatically after those nodes are sync, giving you time to prepare the keystore and make the deposit for your staked ETH. ## Supported networks and clients @@ -103,20 +93,31 @@ With `1click cli` you can go through the entire workflow setup: | | Prysm | Prysm | | | Teku | Teku | +### Kiln + +| Execution | Consensus | Validator | +| ---------- | ---------- | ---------- | +| Geth | Lighthouse | Lighthouse | +| Nethermind | Lodestar | Lodestar | +| | Prysm | Prysm | +| | Teku | Teku | + ## ✅ Roadmap The following roadmap covers the main features and ideas we want to implement but doesn't cover everything we are planning for this tool. Stay touched if you are interested, a lot of improvements are to come in the next two months. ### Version 0.1 (coming soon in May-June 2022) - [x] Generate `docker-compose` scripts and `.env` files for selected clients with a cli tool - [x] Generate keystore folder with the cli -- [ ] Test coverage (unit and integration tests) -- [ ] Monitoring tool for alerting, tracking validator balance, and tracking sync progress and status of nodes +- [x] Test coverage (unit tests) +- [x] Integrate Kiln network - [ ] Integrate MEV-Boost as recommended setting -- [ ] Use public execution and consensus nodes ### Version 0.X +- [ ] Use public execution and consensus nodes +- [ ] Monitoring tool for alerting, tracking validator balance, and tracking sync progress and status of nodes - [ ] TUI for guided and more interactive setup (better UX) -- [ ] Integrate Kiln network +- [ ] Integrate Ropsten network +- [ ] Integrate Sepolia network - [ ] Integrate Prater network - [ ] Off-premise setup support - [ ] Improve documentation diff --git a/build/1click b/build/1click index e3a6d5690..230e42fd7 100755 Binary files a/build/1click and b/build/1click differ diff --git a/cli/cli.go b/cli/cli.go index ee27bb678..9310c11b8 100644 --- a/cli/cli.go +++ b/cli/cli.go @@ -16,6 +16,7 @@ limitations under the License. package cli import ( + "errors" "fmt" "os" "strings" @@ -31,20 +32,30 @@ import ( posmonidb "github.com/NethermindEth/posmoni/pkg/eth2/db" posmoninet "github.com/NethermindEth/posmoni/pkg/eth2/networking" "github.com/spf13/cobra" - "github.com/spf13/pflag" ) var ( executionName string + executionImage string consensusName string + consensusImage string validatorName string + validatorImage string generationPath string checkpointSyncUrl string + network string + feeRecipient string + jwtPath string install bool run bool y bool services *[]string fallbackEL *[]string + elExtraFlags *[]string + clExtraFlags *[]string + vlExtraFlags *[]string + waitingTime time.Duration + mapAllPorts bool ) const ( @@ -58,7 +69,7 @@ var cliCmd = &cobra.Command{ Long: `Run the setup tool on-premise in a quick way. Provide only the command line options and the tool will do all the work. -First it will check if dependencies like docker and docker-compose are installed on your machine +First it will check if dependencies such as docker are installed on your machine and provide instructions for installing them if they are not installed. Second, it will generate docker-compose scripts to run the full setup according to your selection. @@ -66,11 +77,13 @@ Second, it will generate docker-compose scripts to run the full setup according Finally, it will run the generated docker-compose script. Only execution and consensus clients will be executed by default.`, Args: cobra.NoArgs, PreRun: func(cmd *cobra.Command, args []string) { + // notest if err := preRunCliCmd(cmd, args); err != nil { log.Fatal(err) } }, Run: func(cmd *cobra.Command, args []string) { + // notest if errs := runCliCmd(cmd, args); len(errs) > 0 { for _, err := range errs { log.Error(err) @@ -81,15 +94,6 @@ Finally, it will run the generated docker-compose script. Only execution and con } func preRunCliCmd(cmd *cobra.Command, args []string) error { - // Count flags being set - count := 0 - // HACKME: LocalFlags() doesn't work, so we count manually and check for parent flag config - cmd.Flags().Visit(func(f *pflag.Flag) { - if f.Name != "config" { - count++ - } - }) - // Quick run if y { install, run = true, true @@ -104,15 +108,66 @@ func preRunCliCmd(cmd *cobra.Command, args []string) error { // Ambiguous value return fmt.Errorf(configs.RunClientsFlagAmbiguousError, *services) } + } else if utils.Contains(*services, "none") { + if len(*services) == 1 { + // all used correctly + services = &[]string{} + } else { + // Ambiguous value + return fmt.Errorf(configs.RunClientsFlagAmbiguousError, *services) + } } else if !utils.ContainsOnly(*services, []string{execution, consensus, validator}) { return fmt.Errorf(configs.RunClientsError, strings.Join(*services, ","), strings.Join([]string{execution, consensus, validator}, ",")) } + + // Validate network + networks, err := utils.SupportedNetworks() + if err != nil { + return fmt.Errorf(configs.NetworkValidationFailedError, err) + } + if !utils.Contains(networks, network) { + return fmt.Errorf(configs.UnknownNetworkError, network) + } + + // Validate fee recipient + if feeRecipient != "" && !utils.IsAddress(feeRecipient) { + return errors.New(configs.InvalidFeeRecipientError) + } + + // Prepare custom images + if executionName != "" { + executionParts := strings.Split(executionName, ":") + executionName = executionParts[0] + executionImage = strings.Join(executionParts[1:], ":") + } + if consensusName != "" { + consensusParts := strings.Split(consensusName, ":") + consensusName = consensusParts[0] + consensusImage = strings.Join(consensusParts[1:], ":") + } + if validatorName != "" { + validatorParts := strings.Split(validatorName, ":") + validatorName = validatorParts[0] + validatorImage = strings.Join(validatorParts[1:], ":") + } + return nil } func runCliCmd(cmd *cobra.Command, args []string) []error { + // Warnings + // Warn if custom images are used + if executionImage != "" || consensusImage != "" || validatorImage != "" { + log.Warn(configs.CustomImagesWarning) + } + // Warn if exposed ports are used + if mapAllPorts { + log.Warn(configs.MapAllPortsWarning) + } + // Get all clients: supported + configured - clientsMap, errors := clients.GetClients([]string{execution, consensus, validator}) + c := clients.ClientInfo{Network: network} + clientsMap, errors := c.Clients([]string{execution, consensus, validator}) if len(errors) > 0 { return errors } @@ -143,19 +198,54 @@ func runCliCmd(cmd *cobra.Command, args []string) []error { } log.Info(configs.DependenciesOK) + // Generate JWT secret if necessary + if jwtPath == "" && configs.JWTNetworks[network] { + if err = handleJWTSecret(); err != nil { + return []error{err} + } + } + + // Get fee recipient + if !y && feeRecipient == "" { + if err = feeRecipientPrompt(); err != nil { + return []error{err} + } + } + // Generate docker-compose scripts gd := generate.GenerationData{ ExecutionClient: combinedClients.Execution.Name, + ExecutionImage: executionImage, ConsensusClient: combinedClients.Consensus.Name, + ConsensusImage: consensusImage, ValidatorClient: combinedClients.Validator.Name, + ValidatorImage: validatorImage, GenerationPath: generationPath, + Network: network, CheckpointSyncUrl: checkpointSyncUrl, + FeeRecipient: feeRecipient, + JWTSecretPath: jwtPath, FallbackELUrls: *fallbackEL, + ElExtraFlags: *elExtraFlags, + ClExtraFlags: *clExtraFlags, + VlExtraFlags: *vlExtraFlags, + MapAllPorts: mapAllPorts, } if err = generate.GenerateScripts(gd); err != nil { return []error{err} } + // If --run-clients=none was set then exit and don't run anything + if len(*services) == 0 { + log.Info(configs.HappyStaking2) + return nil + } + + // If teku is chosen, then prepare datadir with 777 permissions + if combinedClients.Consensus.Name == "teku" { + preRunTeku() + } + if run { if err = runAndShowContainers(*services); err != nil { return []error{err} @@ -171,6 +261,9 @@ func runCliCmd(cmd *cobra.Command, args []string) []error { // Run validator after execution and consensus clients are synced, unless the user intencionally wants to run the validator service in the previous step if !utils.Contains(*services, validator) { + // Wait for clients to start + log.Info(configs.WaitingForNodesToStart) + time.Sleep(waitingTime) // Track sync of execution and consensus clients // TODO: Parameterize wait arg of trackSync if err = trackSync(monitor, time.Minute); err != nil { @@ -197,27 +290,43 @@ func runCliCmd(cmd *cobra.Command, args []string) []error { func init() { rootCmd.AddCommand(cliCmd) + cliCmd.Flags().SortFlags = false + // Local flags - cliCmd.Flags().StringVarP(&executionName, "execution", "e", "", "Execution engine client, e.g. Geth, Nethermind, Besu, Erigon") + cliCmd.Flags().StringVarP(&executionName, "execution", "e", "", "Execution engine client, e.g. geth, nethermind, besu, erigon. Additionally, you can use this syntax ':' to override the docker image used for the client. If you want to use the default docker image, just use the client name.") - cliCmd.Flags().StringVarP(&consensusName, "consensus", "c", "", "Consensus engine client, e.g. Teku, Lodestar, Prysm, Lighthouse, Nimbus") + cliCmd.Flags().StringVarP(&consensusName, "consensus", "c", "", "Consensus engine client, e.g. teku, lodestar, prysm, lighthouse, Nimbus. Additionally, you can use this syntax ':' to override the docker image used for the client. If you want to use the default docker image, just use the client name.") - cliCmd.Flags().StringVarP(&validatorName, "validator", "v", "", "Validator engine client, e.g. Teku, Lodestar, Prysm, Lighthouse, Nimbus") + cliCmd.Flags().StringVarP(&validatorName, "validator", "v", "", "Validator engine client, e.g. teku, lodestar, prysm, lighthouse, Nimbus. Additionally, you can use this syntax ':' to override the docker image used for the client. If you want to use the default docker image, just use the client name.") cliCmd.Flags().StringVarP(&generationPath, "path", "p", configs.DefaultDockerComposeScriptsPath, "docker-compose scripts generation path") cliCmd.Flags().StringVar(&checkpointSyncUrl, "checkpoint-sync-url", "", "Initial state endpoint (trusted synced consensus endpoint) for the consensus client to sync from a finalized checkpoint. Provide faster sync process for the consensus client and protect it from long-range attacks affored by Weak Subjetivity") + cliCmd.Flags().StringVarP(&network, "network", "n", "mainnet", "Target network. e.g. mainnet, prater, kiln, etc.") + + cliCmd.Flags().StringVar(&feeRecipient, "fee-recipient", "", "Suggested fee recipient. Is 20-byte Ethereum address which the execution layer might choose to set as the coinbase and the recipient of other fees or rewards. There is no guarantee that an execution node will use the suggested fee recipient to collect fees, it may use any address it chooses. It is assumed that an honest execution node will use the suggested fee recipient, but users should note this trust assumption.") + + cliCmd.Flags().StringVar(&jwtPath, "jwt-secret-path", "", "Path to the JWT secret file") + cliCmd.Flags().BoolVarP(&install, "install", "i", false, "Install dependencies if not installed without asking") cliCmd.Flags().BoolVarP(&run, "run", "r", false, "Run the generated docker-compose scripts without asking") cliCmd.Flags().BoolVarP(&y, "yes", "y", false, "Shortcut for '1click cli -r -i --run'. Run without prompts") - services = cliCmd.Flags().StringSlice("run-clients", []string{execution, consensus}, "Run only the specified clients. Possible values: execution, consensus, validator, all. The 'all' option must be used alone. Example: '1click cli -r --run-clients=consensus,validator'") + cliCmd.Flags().BoolVar(&mapAllPorts, "map-all", false, "Map all clients ports to host. Use with care. Useful to allow remote access to the clients.") + + services = cliCmd.Flags().StringSlice("run-clients", []string{execution, consensus}, "Run only the specified clients. Possible values: execution, consensus, validator, all, none. The 'all' and 'none' option must be used alone. Example: '1click cli -r --run-clients=consensus,validator'") fallbackEL = cliCmd.Flags().StringSlice("fallback-execution-urls", []string{}, "Fallback/backup execution endpoints for the consensus client. Not supported by Teku. Example: '1click cli -r --fallback-execution=https://mainnet.infura.io/v3/YOUR-PROJECT-ID,https://eth-mainnet.alchemyapi.io/v2/YOUR-PROJECT-ID'") + elExtraFlags = cliCmd.Flags().StringArray("el-extra-flag", []string{}, "Additional flag to configure the execution client service in the generated docker-compose script. Example: '1click cli --el-extra-flag \"=value1\" --el-extra-flag \"=\\\"value2\\\"\"'") + + clExtraFlags = cliCmd.Flags().StringArray("cl-extra-flag", []string{}, "Additional flag to configure the consensus client service in the generated docker-compose script. Example: '1click cli --cl-extra-flag \"=value1\" --cl-extra-flag \"=\\\"value2\\\"\"'") + + vlExtraFlags = cliCmd.Flags().StringArray("vl-extra-flag", []string{}, "Additional flag to configure the validator client service in the generated docker-compose script. Example: '1click cli --vl-extra-flag \"=value1\" --vl-extra-flag \"=\\\"value2\\\"\"'") + // Initialize monitoring tool initMonitor(func() MonitoringTool { // Initialize Eth2 Monitoring tool @@ -229,8 +338,8 @@ func init() { } m, err := posmoni.NewEth2Monitor( posmonidb.EmptyRepository{}, - &posmoninet.BeaconClient{RetryDuration: time.Second}, - &posmoninet.ExecutionClient{RetryDuration: time.Second}, + &posmoninet.BeaconClient{RetryDuration: time.Second * 30}, + &posmoninet.ExecutionClient{RetryDuration: time.Second * 30}, posmoninet.SubscribeOpts{}, moniCfg, ) @@ -240,4 +349,6 @@ func init() { return m }) + + waitingTime = time.Minute } diff --git a/cli/cli_test.go b/cli/cli_test.go index 23bcb5611..d926be629 100644 --- a/cli/cli_test.go +++ b/cli/cli_test.go @@ -18,6 +18,78 @@ import ( log "github.com/sirupsen/logrus" ) +var inspectOut = ` +[ + { + "NetworkSettings": { + "Bridge": "", + "SandboxID": "56e2c759c33315c9de009bd70aac0fdeb9367549303433debb71edff8dd4db39", + "HairpinMode": false, + "LinkLocalIPv6Address": "", + "LinkLocalIPv6PrefixLen": 0, + "Ports": { + "30303/tcp": [ + { + "HostIp": "0.0.0.0", + "HostPort": "30303" + } + ], + "30303/udp": [ + { + "HostIp": "0.0.0.0", + "HostPort": "30303" + } + ], + "8008/tcp": [ + { + "HostIp": "0.0.0.0", + "HostPort": "8008" + } + ], + "8545/tcp": [ + { + "HostIp": "0.0.0.0", + "HostPort": "8560" + } + ] + }, + "SandboxKey": "/var/run/docker/netns/56e2c759c333", + "SecondaryIPAddresses": null, + "SecondaryIPv6Addresses": null, + "EndpointID": "", + "Gateway": "", + "GlobalIPv6Address": "", + "GlobalIPv6PrefixLen": 0, + "IPAddress": "", + "IPPrefixLen": 0, + "IPv6Gateway": "", + "MacAddress": "", + "Networks": { + "1click_network": { + "IPAMConfig": null, + "Links": null, + "Aliases": [ + "execution-client", + "execution", + "babf61f2c52a" + ], + "NetworkID": "b4bb0c21aa1c9495d08309f8f7f4f2fb5a493fd925c880cb146045aafb2f4390", + "EndpointID": "7832cdd23f1f9f70e38576f8088da61010e057bffb0b98c83bd391065d703ed9", + "Gateway": "192.168.128.1", + "IPAddress": "192.168.128.3", + "IPPrefixLen": 20, + "IPv6Gateway": "", + "GlobalIPv6Address": "", + "GlobalIPv6PrefixLen": 0, + "MacAddress": "02:42:c0:a8:80:03", + "DriverOpts": null + } + } + } + } +] +` + type cliCmdTestCase struct { name string configPath string @@ -31,18 +103,21 @@ type cliCmdTestCase struct { } type cliCmdArgs struct { - random bool - run bool - install bool - execClient string - conClient string - valClient string + yes bool + run bool + install bool + execClient string + conClient string + valClient string + network string + feeRecipient string + services []string } func (args *cliCmdArgs) toString() string { s := []string{} - if args.random { - s = append(s, "-r") + if args.yes { + s = append(s, "--yes") } if args.run { s = append(s, "--run") @@ -59,6 +134,17 @@ func (args *cliCmdArgs) toString() string { if args.valClient != "" { s = append(s, "-v", args.valClient) } + if args.network != "" { + s = append(s, "-n", args.network) + } + if args.feeRecipient != "" { + s = append(s, "--fee-recipient", args.feeRecipient) + } + if len(*services) == 0 { + s = append(s, "--run-client none") + } else { + s = append(s, strings.Join(*services, ", ")) + } return strings.Join(s, " ") } @@ -67,11 +153,16 @@ func resetCliCmd() { executionName = "" consensusName = "" validatorName = "" + network = "mainnet" + feeRecipient = "" generationPath = configs.DefaultDockerComposeScriptsPath install = false run = false y = false services = &[]string{} + fallbackEL = &[]string{} + jwtPath = "" + checkpointSyncUrl = "" } func prepareCliCmd(tc cliCmdTestCase) error { @@ -83,8 +174,10 @@ func prepareCliCmd(tc cliCmdTestCase) error { initConfig() // Set flags generationPath = tc.generationPath + y = tc.args.yes run = tc.args.run install = tc.args.install + services = &tc.args.services if tc.args.execClient != "" { executionName = tc.args.execClient } @@ -94,6 +187,12 @@ func prepareCliCmd(tc cliCmdTestCase) error { if tc.args.valClient != "" { validatorName = tc.args.valClient } + if tc.args.network != "" { + network = tc.args.network + } + if tc.args.feeRecipient != "" { + feeRecipient = tc.args.feeRecipient + } if err := preRunCliCmd(rootCmd, []string{}); err != nil { return err } @@ -103,6 +202,7 @@ func prepareCliCmd(tc cliCmdTestCase) error { initMonitor(func() MonitoringTool { return tc.monitor }) + waitingTime = time.Millisecond return nil } @@ -123,6 +223,12 @@ func buildCliTestCase(t *testing.T, name, caseTestDataDir string, args cliCmdArg // TODO: allow runner edition tc.runner = &test.SimpleCMDRunner{ SRunCMD: func(c commands.Command) (string, error) { + // For getContainerIP logic + if strings.Contains(c.Cmd, "ps --quiet") { + return "666", nil + } else if strings.Contains(c.Cmd, "docker inspect 666") { + return inspectOut, nil + } return "", nil }, SRunBash: func(bs commands.BashScript) (string, error) { @@ -147,12 +253,10 @@ func TestCliCmd(t *testing.T) { tcs := []cliCmdTestCase{ *buildCliTestCase( t, - "Random clients", - "case_1", + "Random clients", "case_1", cliCmdArgs{ - random: true, - run: true, - install: true, + yes: true, + services: []string{execution, consensus}, }, []posmoni.EndpointSyncStatus{ {Endpoint: configs.OnPremiseExecutionURL, Synced: true}, @@ -163,14 +267,13 @@ func TestCliCmd(t *testing.T) { ), *buildCliTestCase( t, - "Fixed clients", - "case_1", + "Fixed clients", "case_1", cliCmdArgs{ - run: true, - install: true, + yes: true, execClient: "nethermind", conClient: "lighthouse", valClient: "lighthouse", + services: []string{execution, consensus}, }, []posmoni.EndpointSyncStatus{ {Endpoint: configs.OnPremiseExecutionURL, Synced: true}, @@ -181,13 +284,12 @@ func TestCliCmd(t *testing.T) { ), *buildCliTestCase( t, - "Missing consensus client", - "case_1", + "Missing consensus client", "case_1", cliCmdArgs{ - run: true, - install: true, + yes: true, execClient: "nethermind", valClient: "lighthouse", + services: []string{execution, consensus}, }, []posmoni.EndpointSyncStatus{ {Endpoint: configs.OnPremiseExecutionURL, Synced: true}, @@ -200,10 +302,225 @@ func TestCliCmd(t *testing.T) { t, "Missing validator client", "case_1", cliCmdArgs{ - run: true, - install: true, + yes: true, execClient: "nethermind", conClient: "lighthouse", + services: []string{execution, consensus}, + }, + []posmoni.EndpointSyncStatus{ + {Endpoint: configs.OnPremiseExecutionURL, Synced: true}, + {Endpoint: configs.OnPremiseConsensusURL, Synced: true}, + }, + false, + false, + ), + *buildCliTestCase( + t, + "Good network input", "case_1", + cliCmdArgs{ + yes: true, + execClient: "nethermind", + conClient: "lighthouse", + network: "mainnet", + services: []string{execution, consensus}, + }, + []posmoni.EndpointSyncStatus{ + {Endpoint: configs.OnPremiseExecutionURL, Synced: true}, + {Endpoint: configs.OnPremiseConsensusURL, Synced: true}, + }, + false, + false, + ), + *buildCliTestCase( + t, + "Bad network input", "case_1", + cliCmdArgs{ + yes: true, + execClient: "nethermind", + conClient: "lighthouse", + network: "1click", + services: []string{execution, consensus}, + }, + []posmoni.EndpointSyncStatus{ + {Endpoint: configs.OnPremiseExecutionURL, Synced: true}, + {Endpoint: configs.OnPremiseConsensusURL, Synced: true}, + }, + true, + true, + ), + *buildCliTestCase( + t, + "Bad fee recipient input", "case_1", + cliCmdArgs{ + yes: true, + execClient: "nethermind", + conClient: "lighthouse", + network: "kiln", + feeRecipient: "666", + services: []string{execution, consensus}, + }, + []posmoni.EndpointSyncStatus{ + {Endpoint: configs.OnPremiseExecutionURL, Synced: true}, + {Endpoint: configs.OnPremiseConsensusURL, Synced: true}, + }, + true, + false, + ), + *buildCliTestCase( + t, + "Good fee recipient input", "case_1", + cliCmdArgs{ + yes: true, + execClient: "nethermind", + conClient: "lighthouse", + network: "kiln", + feeRecipient: "0x5c00ABEf07604C59Ac72E859E5F93D5ab8546F83", + services: []string{execution, consensus}, + }, + []posmoni.EndpointSyncStatus{ + {Endpoint: configs.OnPremiseExecutionURL, Synced: true}, + {Endpoint: configs.OnPremiseConsensusURL, Synced: true}, + }, + false, + false, + ), + *buildCliTestCase( + t, + "--run-client all", "case_1", + cliCmdArgs{ + yes: true, + services: []string{"all"}, + }, + []posmoni.EndpointSyncStatus{ + {Endpoint: configs.OnPremiseExecutionURL, Synced: true}, + {Endpoint: configs.OnPremiseConsensusURL, Synced: true}, + }, + false, + false, + ), + *buildCliTestCase( + t, + "--run-client none", "case_1", + cliCmdArgs{ + yes: true, + }, + []posmoni.EndpointSyncStatus{ + {Endpoint: configs.OnPremiseExecutionURL, Synced: true}, + {Endpoint: configs.OnPremiseConsensusURL, Synced: true}, + }, + false, + false, + ), + *buildCliTestCase( + t, + "--run-client none, execution, ambiguos error", "case_1", + cliCmdArgs{ + yes: true, + services: []string{execution, "none"}, + }, + []posmoni.EndpointSyncStatus{ + {Endpoint: configs.OnPremiseExecutionURL, Synced: true}, + {Endpoint: configs.OnPremiseConsensusURL, Synced: true}, + }, + true, + false, + ), + *buildCliTestCase( + t, + "--run-client validator", "case_1", + cliCmdArgs{ + yes: true, + services: []string{validator}, + }, + []posmoni.EndpointSyncStatus{ + {Endpoint: configs.OnPremiseExecutionURL, Synced: true}, + {Endpoint: configs.OnPremiseConsensusURL, Synced: true}, + }, + false, + false, + ), + *buildCliTestCase( + t, + "--run-client all, validator, ambiguos error", "case_1", + cliCmdArgs{ + yes: true, + services: []string{validator, "all"}, + }, + []posmoni.EndpointSyncStatus{ + {Endpoint: configs.OnPremiseExecutionURL, Synced: true}, + {Endpoint: configs.OnPremiseConsensusURL, Synced: true}, + }, + true, + false, + ), + *buildCliTestCase( + t, + "--run-client all, validator, ambiguos error", "case_1", + cliCmdArgs{ + yes: true, + services: []string{validator, "all"}, + }, + []posmoni.EndpointSyncStatus{ + {Endpoint: configs.OnPremiseExecutionURL, Synced: true}, + {Endpoint: configs.OnPremiseConsensusURL, Synced: true}, + }, + true, + false, + ), + *buildCliTestCase( + t, + "--network kiln", "case_1", + cliCmdArgs{ + yes: true, + network: "kiln", + services: []string{execution, consensus}, + }, + []posmoni.EndpointSyncStatus{ + {Endpoint: configs.OnPremiseExecutionURL, Synced: true}, + {Endpoint: configs.OnPremiseConsensusURL, Synced: true}, + }, + false, + false, + ), + *buildCliTestCase( + t, + "Invalid network", "case_1", + cliCmdArgs{ + yes: true, + network: "test", + services: []string{execution, consensus}, + }, + []posmoni.EndpointSyncStatus{ + {Endpoint: configs.OnPremiseExecutionURL, Synced: true}, + {Endpoint: configs.OnPremiseConsensusURL, Synced: true}, + }, + true, + true, + ), + *buildCliTestCase( + t, + "--network kiln, testing teku datadirs preparation", "case_1", + cliCmdArgs{ + yes: true, + network: "kiln", + services: []string{execution, consensus}, + conClient: "teku", + }, + []posmoni.EndpointSyncStatus{ + {Endpoint: configs.OnPremiseExecutionURL, Synced: true}, + {Endpoint: configs.OnPremiseConsensusURL, Synced: true}, + }, + false, + false, + ), + *buildCliTestCase( + t, + "--network kiln, testing teku datadirs preparation, all services", "case_1", + cliCmdArgs{ + yes: true, + network: "kiln", + services: []string{"all"}, + conClient: "teku", }, []posmoni.EndpointSyncStatus{ {Endpoint: configs.OnPremiseExecutionURL, Synced: true}, diff --git a/cli/cli_utils.go b/cli/cli_utils.go index 8c3ddee59..78b78a7af 100644 --- a/cli/cli_utils.go +++ b/cli/cli_utils.go @@ -1,11 +1,15 @@ package cli import ( + "bytes" + "encoding/json" + "errors" "fmt" "io" "os" "path/filepath" "strings" + "text/template" "time" log "github.com/sirupsen/logrus" @@ -14,6 +18,7 @@ import ( "github.com/NethermindEth/1click/internal/pkg/clients" "github.com/NethermindEth/1click/internal/pkg/commands" "github.com/NethermindEth/1click/internal/utils" + "github.com/NethermindEth/1click/templates" posmoni "github.com/NethermindEth/posmoni/pkg/eth2" "github.com/manifoldco/promptui" ) @@ -144,34 +149,28 @@ func validateClients(allClients clients.OrderedClients, w io.Writer) (clients.Cl func runScriptOrExit() (err error) { // notest - optRun, optExit := fmt.Sprintf("Run the script with the selected services %s", strings.Join(*services, ",")), "Exit" - prompt := promptui.Select{ - Label: "Select how to proceed with the generated docker-compose script", - Items: []string{optRun, optExit}, - } - log.Infof(configs.InstructionsFor, "running docker-compose script") upCMD := commands.Runner.BuildDockerComposeUpCMD(commands.DockerComposeUpOptions{ - Path: generationPath, + Path: filepath.Join(generationPath, configs.DefaultDockerComposeScriptName), Services: *services, }) fmt.Printf("\n%s\n\n", upCMD.Cmd) - _, result, err := prompt.Run() - if err != nil { - return fmt.Errorf("prompt failed %s", err) + prompt := promptui.Prompt{ + Label: fmt.Sprintf("Run the script with the selected services %s", strings.Join(*services, ", ")), + IsConfirm: true, + Default: "Y", } - - switch result { - case optRun: - if err = runAndShowContainers(*services); err != nil { - return err - } - default: + _, err = prompt.Run() + if err != nil { log.Info(configs.Exiting) os.Exit(0) } + if err = runAndShowContainers(*services); err != nil { + return err + } + return nil } @@ -198,10 +197,10 @@ func runAndShowContainers(services []string) error { return fmt.Errorf(configs.CommandError, upCMD.Cmd, err) } - // Run docker-compose ps --filter status=running to show script running containers + // Run docker compose ps --filter status=running to show script running containers dcpsCMD := commands.Runner.BuildDockerComposePSCMD(commands.DockerComposePsOptions{ - Path: filepath.Join(generationPath, configs.DefaultDockerComposeScriptName), - Services: false, + Path: filepath.Join(generationPath, configs.DefaultDockerComposeScriptName), + FilterRunning: true, }) log.Infof(configs.RunningCommand, dcpsCMD.Cmd) if _, err := commands.Runner.RunCMD(dcpsCMD); err != nil { @@ -211,9 +210,77 @@ func runAndShowContainers(services []string) error { return nil } +type container struct { + NetworkSettings networkSettings +} +type networkSettings struct { + Networks map[string]networks +} +type networks struct { + IPAddress string +} + +func parseNetwork(js string) (string, error) { + var c []container + if err := json.NewDecoder(bytes.NewReader([]byte(js))).Decode(&c); err != nil { + return "", err + } + if len(c) == 0 { + return "", errors.New(configs.NoOutputDockerInspectError) + } + if ip := c[0].NetworkSettings.Networks["1click_network"].IPAddress; ip != "" { + return ip, nil + } + return "", errors.New(configs.IPNotFoundError) +} + +func getContainerIP(service string) (ip string, err error) { + // Run docker compose ps --quiet to show service's ID + dcpsCMD := commands.Runner.BuildDockerComposePSCMD(commands.DockerComposePsOptions{ + Path: filepath.Join(generationPath, configs.DefaultDockerComposeScriptName), + Quiet: true, + ServiceName: service, + }) + log.Infof(configs.RunningCommand, dcpsCMD.Cmd) + dcpsCMD.GetOutput = true + id, err := commands.Runner.RunCMD(dcpsCMD) + if err != nil { + return ip, fmt.Errorf(configs.CommandError, dcpsCMD.Cmd, err) + } + + // Run docker inspect to get IP address + inspectCmd := commands.Runner.BuildDockerInspectCMD(commands.DockerInspectOptions{ + Name: id, + }) + log.Infof(configs.RunningCommand, inspectCmd.Cmd) + inspectCmd.GetOutput = true + data, err := commands.Runner.RunCMD(inspectCmd) + if err != nil { + return + } + + ip, err = parseNetwork(data) + return +} + func trackSync(m MonitoringTool, wait time.Duration) error { done := make(chan struct{}) - statuses := m.TrackSync(done, []string{configs.OnPremiseExecutionURL}, []string{configs.OnPremiseConsensusURL}, time.Minute) + + log.Info(configs.GettingContainersIP) + executionIP, errE := getContainerIP(execution) + if errE != nil { + log.Errorf(configs.GetContainerIPError, execution, errE) + } + consensusIP, errC := getContainerIP(consensus) + if errC != nil { + log.Errorf(configs.GetContainerIPError, consensus, errC) + if errE != nil { + // Both IP were not detected, both containers probably failed + return errors.New(configs.UnableToTrackSyncError) + } + } + + statuses := m.TrackSync(done, []string{"http://" + consensusIP + ":4000"}, []string{"http://" + executionIP + ":8545"}, time.Minute) var esynced, csynced bool for s := range statuses { @@ -234,33 +301,106 @@ func trackSync(m MonitoringTool, wait time.Duration) error { func RunValidatorOrExit() error { // notest - optRun, optExit := "Run validator service", "Exit" - prompt := promptui.Select{ - Label: "Select how to proceed with the validator client", - Items: []string{optRun, optExit}, - } - log.Infof(configs.InstructionsFor, "running validator service of docker-compose script") upCMD := commands.Runner.BuildDockerComposeUpCMD(commands.DockerComposeUpOptions{ - Path: generationPath, + Path: filepath.Join(generationPath, configs.DefaultDockerComposeScriptName), Services: []string{validator}, }) fmt.Printf("\n%s\n\n", upCMD.Cmd) - _, result, err := prompt.Run() + prompt := promptui.Prompt{ + Label: "Run validator service", + IsConfirm: true, + Default: "Y", + } + _, err := prompt.Run() if err != nil { - return fmt.Errorf("prompt failed %s", err) + log.Info(configs.Exiting) + os.Exit(0) } - switch result { - case optRun: - if err = runAndShowContainers([]string{validator}); err != nil { - return err + if err = runAndShowContainers([]string{validator}); err != nil { + return err + } + + return nil +} + +func handleJWTSecret() error { + log.Info(configs.GeneratingJWTSecret) + + rawScript, err := templates.Scripts.ReadFile(filepath.Join("scripts", "jwt_secret.sh")) + if err != nil { + return fmt.Errorf(configs.GenerateJWTSecretError, err) + } + + tmp, err := template.New("script").Parse(string(rawScript)) + if err != nil { + return fmt.Errorf(configs.GenerateJWTSecretError, err) + } + + script := commands.BashScript{ + Tmp: tmp, + GetOutput: false, + Data: struct{}{}, + } + + if _, err = commands.Runner.RunBash(script); err != nil { + return fmt.Errorf(configs.GenerateJWTSecretError, err) + } + + // Get PWD + pwd, err := os.Getwd() + if err != nil { + return fmt.Errorf(configs.GetPWDError, err) + } + jwtPath = filepath.Join(pwd, "jwtsecret") + + log.Info(configs.JWTSecretGenerated) + return nil +} + +func feeRecipientPrompt() error { + // notest + validate := func(input string) error { + if input != "" && !utils.IsAddress(input) { + return errors.New(configs.InvalidFeeRecipientError) } - default: - log.Info(configs.Exiting) - os.Exit(0) + return nil + } + + prompt := promptui.Prompt{ + Label: "Please enter the Fee Recipient address. You can leave it blank and press enter (not recommended)", + Validate: validate, } + result, err := prompt.Run() + + if err != nil { + return fmt.Errorf(configs.PromptFailedError, err) + } + + feeRecipient = result + return nil +} + +func preRunTeku() error { + log.Info(configs.PreparingTekuDatadir) + for _, s := range *services { + if s == "all" || s == consensus { + // Prepare consensus datadir + path := filepath.Join(generationPath, configs.ConsensusDefaultDataDir) + if err := os.MkdirAll(path, 0777); err != nil { + return fmt.Errorf(configs.TekuDatadirError, consensus, err) + } + } + if s == "all" || s == validator { + // Prepare validator datadir + path := filepath.Join(generationPath, configs.ValidatorDefaultDataDir) + if err := os.MkdirAll(path, 0777); err != nil { + return fmt.Errorf(configs.TekuDatadirError, validator, err) + } + } + } return nil } diff --git a/cli/down.go b/cli/down.go index 081a3fb24..e933f76c6 100644 --- a/cli/down.go +++ b/cli/down.go @@ -30,7 +30,7 @@ import ( var downCmd = &cobra.Command{ Use: "down [flags]", Short: "Shutdown 1click running containers", - Long: `Shutdown 1click running containers using docker-compose CLI. Shortcut for 'docker-compose -f