From bc8e90c68485aaf6f9284f16c404ce1b6ae44c6e Mon Sep 17 00:00:00 2001 From: Arpit Date: Tue, 21 Jan 2025 16:32:34 +0000 Subject: [PATCH 01/12] Rewriting config module to simplify the flow --- .gitignore | 2 + cmd/init_config.go | 26 +++++ cmd/root.go | 53 +++++++++ config/config.go | 33 ++++++ go.mod | 20 +++- go.sum | 47 +++++++- main.go | 275 +------------------------------------------- server/main.go | 281 +++++++++++++++++++++++++++++++++++++++++++++ 8 files changed, 458 insertions(+), 279 deletions(-) create mode 100644 cmd/init_config.go create mode 100644 cmd/root.go create mode 100644 server/main.go diff --git a/.gitignore b/.gitignore index 70eb07810..aa02c8470 100644 --- a/.gitignore +++ b/.gitignore @@ -35,3 +35,5 @@ pnpm-debug.log* # macOS-specific files .DS_Store +dicedb.yaml +dicedb.conf diff --git a/cmd/init_config.go b/cmd/init_config.go new file mode 100644 index 000000000..17e180adc --- /dev/null +++ b/cmd/init_config.go @@ -0,0 +1,26 @@ +// Copyright (c) 2022-present, DiceDB contributors +// All rights reserved. Licensed under the BSD 3-Clause License. See LICENSE file in the project root for full license information. + +package cmd + +import ( + "fmt" + + "github.com/dicedb/dice/config" + "github.com/spf13/cobra" + "github.com/spf13/viper" +) + +var initConfigCmd = &cobra.Command{ + Use: "init-config", + Short: "creates a config file at dicedb.yaml with default values", + Run: func(cmd *cobra.Command, args []string) { + config.Init(cmd.Flags()) + viper.WriteConfigAs("dicedb.yaml") + fmt.Println("config created at dicedb.yaml") + }, +} + +func init() { + rootCmd.AddCommand(initConfigCmd) +} diff --git a/cmd/root.go b/cmd/root.go new file mode 100644 index 000000000..f532869c3 --- /dev/null +++ b/cmd/root.go @@ -0,0 +1,53 @@ +// Copyright (c) 2022-present, DiceDB contributors +// All rights reserved. Licensed under the BSD 3-Clause License. See LICENSE file in the project root for full license information. + +package cmd + +import ( + "fmt" + "os" + "reflect" + "strconv" + + "github.com/dicedb/dice/config" + "github.com/dicedb/dice/server" + "github.com/spf13/cobra" +) + +func init() { + c := config.DiceDBConfig{} + _type := reflect.TypeOf(c) + for i := 0; i < _type.NumField(); i++ { + field := _type.Field(i) + yamlTag := field.Tag.Get("mapstructure") + descriptionTag := field.Tag.Get("description") + defaultTag := field.Tag.Get("default") + + switch field.Type.Kind() { + case reflect.String: + rootCmd.PersistentFlags().String(yamlTag, defaultTag, descriptionTag) + case reflect.Int: + val, _ := strconv.Atoi(defaultTag) + rootCmd.PersistentFlags().Int(yamlTag, val, descriptionTag) + case reflect.Bool: + val, _ := strconv.ParseBool(defaultTag) + rootCmd.PersistentFlags().Bool(yamlTag, val, descriptionTag) + } + } +} + +var rootCmd = &cobra.Command{ + Use: "dicedb", + Short: "an in-memory database;", + Run: func(cmd *cobra.Command, args []string) { + config.Init(cmd.Flags()) + server.Start() + }, +} + +func Execute() { + if err := rootCmd.Execute(); err != nil { + fmt.Fprintln(os.Stderr, err) + os.Exit(1) + } +} diff --git a/config/config.go b/config/config.go index 54d9622f4..b808808cf 100644 --- a/config/config.go +++ b/config/config.go @@ -12,6 +12,8 @@ import ( "time" "github.com/dicedb/dice/internal/server/utils" + "github.com/spf13/pflag" + "github.com/spf13/viper" ) const ( @@ -318,3 +320,34 @@ func MergeFlags(flags *Config) { } }) } + +type DiceDBConfig struct { + Host string `mapstructure:"host" description:"the host address to bind to" default:"0.0.0.0"` + Port int `mapstructure:"port" description:"the port to bind to" default:"7379"` + EnableHTTP bool `mapstructure:"enable-http" description:"enable http server" default:"false"` +} + +var GlobalDiceDBConfig *DiceDBConfig + +func Init(flags *pflag.FlagSet) { + viper.SetConfigName("dicedb") + viper.SetConfigType("yaml") + viper.AddConfigPath(".") + viper.AddConfigPath("/etc/dicedb") + + err := viper.ReadInConfig() + if _, ok := err.(viper.ConfigFileNotFoundError); !ok && err != nil { + panic(err) + } + + flags.VisitAll(func(flag *pflag.Flag) { + if flag.Name == "help" { + return + } + viper.Set(flag.Name, flag.Value.String()) + }) + + if err := viper.Unmarshal(&GlobalDiceDBConfig); err != nil { + panic(err) + } +} diff --git a/go.mod b/go.mod index dcf33b828..515bde104 100644 --- a/go.mod +++ b/go.mod @@ -11,20 +11,34 @@ require ( github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect github.com/dgryski/go-metro v0.0.0-20211217172704-adc40b04c140 // indirect github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f // indirect + github.com/fsnotify/fsnotify v1.7.0 // indirect github.com/gabriel-vasile/mimetype v1.4.3 // indirect github.com/go-playground/locales v0.14.1 // indirect github.com/go-playground/universal-translator v0.18.1 // indirect + github.com/hashicorp/hcl v1.0.0 // indirect + github.com/inconshreveable/mousetrap v1.1.0 // indirect github.com/klauspost/cpuid/v2 v2.2.8 // indirect - github.com/kr/pretty v0.3.1 // indirect github.com/leodido/go-urn v1.4.0 // indirect + github.com/magiconair/properties v1.8.7 // indirect github.com/mattn/go-colorable v0.1.13 // indirect github.com/mattn/go-isatty v0.0.20 // indirect + github.com/mitchellh/mapstructure v1.5.0 // indirect + github.com/pelletier/go-toml/v2 v2.2.2 // indirect github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect + github.com/sagikazarmark/locafero v0.4.0 // indirect + github.com/sagikazarmark/slog-shim v0.1.0 // indirect + github.com/sourcegraph/conc v0.3.0 // indirect + github.com/spf13/afero v1.11.0 // indirect + github.com/spf13/cast v1.6.0 // indirect + github.com/subosito/gotenv v1.6.0 // indirect github.com/twitchyliquid64/golang-asm v0.15.1 // indirect + go.uber.org/atomic v1.9.0 // indirect + go.uber.org/multierr v1.9.0 // indirect golang.org/x/arch v0.11.0 // indirect golang.org/x/net v0.23.0 // indirect golang.org/x/sys v0.27.0 // indirect golang.org/x/text v0.20.0 // indirect + gopkg.in/ini.v1 v1.67.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect ) @@ -47,9 +61,11 @@ require ( github.com/ohler55/ojg v1.25.0 github.com/rs/xid v1.6.0 github.com/rs/zerolog v1.33.0 + github.com/spf13/cobra v1.8.1 + github.com/spf13/pflag v1.0.5 + github.com/spf13/viper v1.19.0 github.com/stretchr/testify v1.9.0 github.com/twmb/murmur3 v1.1.8 - github.com/xwb1989/sqlparser v0.0.0-20180606152119-120387863bf2 golang.org/x/crypto v0.29.0 golang.org/x/exp v0.0.0-20241108190413-2d47ceb2692f google.golang.org/protobuf v1.35.1 diff --git a/go.sum b/go.sum index 51567b272..ffc65ee77 100644 --- a/go.sum +++ b/go.sum @@ -20,7 +20,7 @@ github.com/cloudwego/iasm v0.2.0/go.mod h1:8rXZaNYT2n95jn+zTI1sDr+IgcD2GVs0nlbbQ github.com/cockroachdb/swiss v0.0.0-20240612210725-f4de07ae6964 h1:Ew0znI2JatzKy52N1iS5muUsHkf2UJuhocH7uFW7jjs= github.com/cockroachdb/swiss v0.0.0-20240612210725-f4de07ae6964/go.mod h1:yBRu/cnL4ks9bgy4vAASdjIW+/xMlFwuHKqtmh3GZQg= github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= -github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= +github.com/cpuguy83/go-md2man/v2 v2.0.4/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= @@ -35,6 +35,10 @@ github.com/dicedb/dicedb-go v0.0.0-20241120175955-5eaa6c7e79bb h1:HVdPhxbTT7wLIN github.com/dicedb/dicedb-go v0.0.0-20241120175955-5eaa6c7e79bb/go.mod h1:DuggsMhSh810UH6hH4MXWLflPz+/ZgoFAhhsi53S9e0= github.com/fatih/color v1.18.0 h1:S8gINlzdQ840/4pfAwic/ZE0djQEH3wM94VfqLTZcOM= github.com/fatih/color v1.18.0/go.mod h1:4FelSpRwEGDpQ12mAdzqdOukCy4u8WUtOY6lkT/6HfU= +github.com/frankban/quicktest v1.14.6 h1:7Xjx+VpznH+oBnejlPUj8oUpdxnVs4f8XU8WnHkI4W8= +github.com/frankban/quicktest v1.14.6/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0= +github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA= +github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM= github.com/gabriel-vasile/mimetype v1.4.3 h1:in2uUcidCuFcDKtdcBxlR0rJ1+fsokWf+uqxgUFjbI0= github.com/gabriel-vasile/mimetype v1.4.3/go.mod h1:d8uq/6HKRL6CGdk+aubisF/M5GcPfT7nKyLpA0lbSSk= github.com/go-playground/assert/v2 v2.2.0 h1:JvknZsQTYeFEAhQwI4qEt9cyV5ONwRHC+lYKSsYSR8s= @@ -56,6 +60,10 @@ github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/gorilla/websocket v1.5.3 h1:saDtZ6Pbx/0u+bgYQ3q96pZgCzfhKXGPqt7kZ72aNNg= github.com/gorilla/websocket v1.5.3/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= +github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4= +github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= +github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= +github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= github.com/klauspost/cpuid/v2 v2.0.9/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg= github.com/klauspost/cpuid/v2 v2.2.8 h1:+StwCXwm9PdpiEkPyzBXIy+M9KUb4ODm0Zarf1kS5BM= github.com/klauspost/cpuid/v2 v2.2.8/go.mod h1:Lcz8mBdAVJIBVzewtcLocK12l3Y+JytZYpaMropDUws= @@ -66,6 +74,8 @@ github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= github.com/leodido/go-urn v1.4.0 h1:WT9HwE9SGECu3lg4d/dIA+jxlljEa1/ffXKmRjqdmIQ= github.com/leodido/go-urn v1.4.0/go.mod h1:bvxc+MVxLKB4z00jd1z+Dvzr47oO32F/QSNjSBOlFxI= +github.com/magiconair/properties v1.8.7 h1:IeQXZAiQcpL9mgcAe1Nu6cX9LLw6ExEHKjN0VQdvPDY= +github.com/magiconair/properties v1.8.7/go.mod h1:Dhd985XPs7jluiymwWYZ0G4Z61jb3vdS329zhj2hYo0= github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA= github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg= github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= @@ -74,11 +84,14 @@ github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWE github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= github.com/mattn/go-sqlite3 v1.14.24 h1:tpSp2G2KyMnnQu99ngJ47EIkWVmliIizyZBfPrBWDRM= github.com/mattn/go-sqlite3 v1.14.24/go.mod h1:Uh1q+B4BYcTPb+yiD3kU8Ct7aC0hY9fxUwlHK0RXw+Y= +github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY= +github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/mmcloughlin/geohash v0.10.0 h1:9w1HchfDfdeLc+jFEf/04D27KP7E2QmpDu52wPbJWRE= github.com/mmcloughlin/geohash v0.10.0/go.mod h1:oNZxQo5yWJh0eMQEP/8hwQuVx9Z9tjwFUqcTB1SmG0c= github.com/ohler55/ojg v1.25.0 h1:sDwc4u4zex65Uz5Nm7O1QwDKTT+YRcpeZQTy1pffRkw= github.com/ohler55/ojg v1.25.0/go.mod h1:gQhDVpQLqrmnd2eqGAvJtn+NfKoYJbe/A4Sj3/Vro4o= -github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA= +github.com/pelletier/go-toml/v2 v2.2.2 h1:aYUidT7k73Pcl9nb2gScu7NSrKCSHIDE89b3+6Wq+LM= +github.com/pelletier/go-toml/v2 v2.2.2/go.mod h1:1t835xjRzz80PqgE6HHgN2JOsmgYu/h4qDAS4n929Rs= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= @@ -90,21 +103,45 @@ github.com/rs/xid v1.6.0 h1:fV591PaemRlL6JfRxGDEPl69wICngIQ3shQtzfy2gxU= github.com/rs/xid v1.6.0/go.mod h1:7XoLgs4eV+QndskICGsho+ADou8ySMSjJKDIan90Nz0= github.com/rs/zerolog v1.33.0 h1:1cU2KZkvPxNyfgEmhHAz/1A9Bz+llsdYzklWFzgp0r8= github.com/rs/zerolog v1.33.0/go.mod h1:/7mN4D5sKwJLZQ2b/znpjC3/GQWY/xaDXUM0kKWRHss= +github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= +github.com/sagikazarmark/locafero v0.4.0 h1:HApY1R9zGo4DBgr7dqsTH/JJxLTTsOt7u6keLGt6kNQ= +github.com/sagikazarmark/locafero v0.4.0/go.mod h1:Pe1W6UlPYUk/+wc/6KFhbORCfqzgYEpgQ3O5fPuL3H4= +github.com/sagikazarmark/slog-shim v0.1.0 h1:diDBnUNK9N/354PgrxMywXnAwEr1QZcOr6gto+ugjYE= +github.com/sagikazarmark/slog-shim v0.1.0/go.mod h1:SrcSrq8aKtyuqEI1uvTDTK1arOWRIczQRv+GVI1AkeQ= +github.com/sourcegraph/conc v0.3.0 h1:OQTbbt6P72L20UqAkXXuLOj79LfEanQ+YQFNpLA9ySo= +github.com/sourcegraph/conc v0.3.0/go.mod h1:Sdozi7LEKbFPqYX2/J+iBAM6HpqSLTASQIKqDmF7Mt0= +github.com/spf13/afero v1.11.0 h1:WJQKhtpdm3v2IzqG8VMqrr6Rf3UYpEF239Jy9wNepM8= +github.com/spf13/afero v1.11.0/go.mod h1:GH9Y3pIexgf1MTIWtNGyogA5MwRIDXGUr+hbWNoBjkY= +github.com/spf13/cast v1.6.0 h1:GEiTHELF+vaR5dhz3VqZfFSzZjYbgeKDpBxQVS4GYJ0= +github.com/spf13/cast v1.6.0/go.mod h1:ancEpBxwJDODSW/UG4rDrAqiKolqNNh2DX3mk86cAdo= +github.com/spf13/cobra v1.8.1 h1:e5/vxKd/rZsfSJMUX1agtjeTDf+qv1/JdBF8gg5k9ZM= +github.com/spf13/cobra v1.8.1/go.mod h1:wHxEcudfqmLYa8iTfL+OuZPbBZkmvliBWKIezN3kD9Y= +github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= +github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/spf13/viper v1.19.0 h1:RWq5SEjt8o25SROyN3z2OrDB9l7RPd3lwTWU8EcEdcI= +github.com/spf13/viper v1.19.0/go.mod h1:GQUN9bilAbhU/jgc1bKs99f/suXKeUMct8Adx5+Ntkg= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= +github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= +github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= +github.com/subosito/gotenv v1.6.0 h1:9NlTDc1FTs4qu0DDq7AEtTPNw6SVm7uBMsUCUjABIf8= +github.com/subosito/gotenv v1.6.0/go.mod h1:Dk4QP5c2W3ibzajGcXpNraDfq2IrhjMIvMSWPKKo0FU= github.com/twitchyliquid64/golang-asm v0.15.1 h1:SU5vSMR7hnwNxj24w34ZyCi/FmDZTkS4MhqMhdFk5YI= github.com/twitchyliquid64/golang-asm v0.15.1/go.mod h1:a1lVb/DtPvCB8fslRZhAngC2+aY1QWCk3Cedj/Gdt08= github.com/twmb/murmur3 v1.1.8 h1:8Yt9taO/WN3l08xErzjeschgZU2QSrwm1kclYq+0aRg= github.com/twmb/murmur3 v1.1.8/go.mod h1:Qq/R7NUyOfr65zD+6Q5IHKsJLwP7exErjN6lyyq3OSQ= -github.com/xwb1989/sqlparser v0.0.0-20180606152119-120387863bf2 h1:zzrxE1FKn5ryBNl9eKOeqQ58Y/Qpo3Q9QNxKHX5uzzQ= -github.com/xwb1989/sqlparser v0.0.0-20180606152119-120387863bf2/go.mod h1:hzfGeIUDq/j97IG+FhNqkowIyEcD88LrW6fyU3K3WqY= +go.uber.org/atomic v1.9.0 h1:ECmE8Bn/WFTYwEW/bpKD3M8VtR/zQVbavAoalC1PYyE= +go.uber.org/atomic v1.9.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= +go.uber.org/multierr v1.9.0 h1:7fIwc/ZtS0q++VgcfqFDxSBZVv/Xo49/SYnDFupUwlI= +go.uber.org/multierr v1.9.0/go.mod h1:X2jQV1h+kxSjClGpnseKVIxpmcjrj7MNnI0bnlfKTVQ= golang.org/x/arch v0.11.0 h1:KXV8WWKCXm6tRpLirl2szsO5j/oOODwZf4hATmGVNs4= golang.org/x/arch v0.11.0/go.mod h1:FEVrYAQjsQXMVJ1nsMoVVXPZg6p2JE2mx8psSWTDQys= golang.org/x/crypto v0.29.0 h1:L5SG1JTTXupVV3n6sUqMTeWbjAyfPwoda2DLX8J8FrQ= @@ -126,6 +163,8 @@ google.golang.org/protobuf v1.35.1/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojt gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= +gopkg.in/ini.v1 v1.67.0 h1:Dgnx+6+nfE+IfzjUEISNeydPJh9AXNNsWbGP9KzCsOA= +gopkg.in/ini.v1 v1.67.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= diff --git a/main.go b/main.go index b320bdc39..52864ce6a 100644 --- a/main.go +++ b/main.go @@ -3,279 +3,8 @@ package main -import ( - "context" - "errors" - "fmt" - "log/slog" - "net/http" - "os" - "os/signal" - "runtime" - "runtime/pprof" - "runtime/trace" - "sync" - "syscall" - "time" - - "github.com/dicedb/dice/internal/server/httpws" - - "github.com/dicedb/dice/internal/cli" - "github.com/dicedb/dice/internal/commandhandler" - "github.com/dicedb/dice/internal/logger" - "github.com/dicedb/dice/internal/server/abstractserver" - "github.com/dicedb/dice/internal/wal" - "github.com/dicedb/dice/internal/watchmanager" - - "github.com/dicedb/dice/config" - diceerrors "github.com/dicedb/dice/internal/errors" - "github.com/dicedb/dice/internal/iothread" - "github.com/dicedb/dice/internal/observability" - "github.com/dicedb/dice/internal/server/resp" - "github.com/dicedb/dice/internal/shard" - dstore "github.com/dicedb/dice/internal/store" -) - -const ( - WALEngineAOF = "aof" -) +import "github.com/dicedb/dice/cmd" func main() { - iid := observability.GetOrCreateInstanceID() - config.DiceConfig.InstanceID = iid - - // This is counter intuitive, but it's the first thing that should be done - // because this function parses the flags and prepares the config, - cli.Execute() - - slog.SetDefault(logger.New()) - go observability.Ping() - - ctx, cancel := context.WithCancel(context.Background()) - - // Handle SIGTERM and SIGINT - sigs := make(chan os.Signal, 1) - signal.Notify(sigs, syscall.SIGTERM, syscall.SIGINT) - - var ( - cmdWatchChan chan dstore.CmdWatchEvent - serverErrCh = make(chan error, 2) - cmdWatchSubscriptionChan = make(chan watchmanager.WatchSubscription) - wl wal.AbstractWAL - ) - - wl, _ = wal.NewNullWAL() - if config.DiceConfig.Persistence.Enabled { - if config.DiceConfig.Persistence.WALEngine == WALEngineAOF { - _wl, err := wal.NewAOFWAL(config.DiceConfig.WAL.LogDir) - if err != nil { - slog.Warn("could not create WAL with", slog.String("wal-engine", config.DiceConfig.Persistence.WALEngine), slog.Any("error", err)) - sigs <- syscall.SIGKILL - return - } - wl = _wl - } else { - slog.Error("unsupported WAL engine", slog.String("engine", config.DiceConfig.Persistence.WALEngine)) - sigs <- syscall.SIGKILL - return - } - - if err := wl.Init(time.Now()); err != nil { - slog.Error("could not initialize WAL", slog.Any("error", err)) - } else { - go wal.InitBG(wl) - } - - slog.Debug("WAL initialization complete") - - if config.DiceConfig.Persistence.RestoreFromWAL { - slog.Info("restoring database from WAL") - wal.ReplayWAL(wl) - slog.Info("database restored from WAL") - } - } - - if config.DiceConfig.Performance.EnableWatch { - bufSize := config.DiceConfig.Performance.WatchChanBufSize - cmdWatchChan = make(chan dstore.CmdWatchEvent, bufSize) - } - - // Get the number of available CPU cores on the machine using runtime.NumCPU(). - // This determines the total number of logical processors that can be utilized - // for parallel execution. Setting the maximum number of CPUs to the available - // core count ensures the application can make full use of all available hardware. - var numShards int - numShards = runtime.NumCPU() - if config.DiceConfig.Performance.NumShards > 0 { - numShards = config.DiceConfig.Performance.NumShards - } - - // The runtime.GOMAXPROCS(numShards) call limits the number of operating system - // threads that can execute Go code simultaneously to the number of CPU cores. - // This enables Go to run more efficiently, maximizing CPU utilization and - // improving concurrency performance across multiple goroutines. - runtime.GOMAXPROCS(runtime.NumCPU()) - - // Initialize the ShardManager - shardManager := shard.NewShardManager(uint8(numShards), cmdWatchChan, serverErrCh) - - wg := sync.WaitGroup{} - - wg.Add(1) - go func() { - defer wg.Done() - shardManager.Run(ctx) - }() - - var serverWg sync.WaitGroup - - if config.DiceConfig.Performance.EnableProfiling { - stopProfiling, err := startProfiling() - if err != nil { - slog.Error("Profiling could not be started", slog.Any("error", err)) - sigs <- syscall.SIGKILL - } - defer stopProfiling() - } - ioThreadManager := iothread.NewManager(config.DiceConfig.Performance.MaxClients) - cmdHandlerManager := commandhandler.NewRegistry(config.DiceConfig.Performance.MaxClients, shardManager) - - respServer := resp.NewServer(shardManager, ioThreadManager, cmdHandlerManager, cmdWatchSubscriptionChan, cmdWatchChan, serverErrCh, wl) - serverWg.Add(1) - go runServer(ctx, &serverWg, respServer, serverErrCh) - - if config.DiceConfig.HTTP.Enabled { - httpServer := httpws.NewHTTPServer(shardManager, wl) - serverWg.Add(1) - go runServer(ctx, &serverWg, httpServer, serverErrCh) - } - - if config.DiceConfig.WebSocket.Enabled { - websocketServer := httpws.NewWebSocketServer(shardManager, config.DiceConfig.WebSocket.Port, wl) - serverWg.Add(1) - go runServer(ctx, &serverWg, websocketServer, serverErrCh) - } - - wg.Add(1) - go func() { - defer wg.Done() - <-sigs - cancel() - }() - - go func() { - serverWg.Wait() - close(serverErrCh) // Close the channel when both servers are done - }() - - for err := range serverErrCh { - if err != nil && errors.Is(err, diceerrors.ErrAborted) { - // if either the AsyncServer/RESPServer or the HTTPServer received an abort command, - // cancel the context, helping gracefully exiting all servers - cancel() - } - } - - close(sigs) - - if config.DiceConfig.Persistence.Enabled { - wal.ShutdownBG() - } - - cancel() - - wg.Wait() -} - -func runServer(ctx context.Context, wg *sync.WaitGroup, srv abstractserver.AbstractServer, errCh chan<- error) { - defer wg.Done() - if err := srv.Run(ctx); err != nil { - switch { - case errors.Is(err, context.Canceled): - slog.Debug(fmt.Sprintf("%T was canceled", srv)) - case errors.Is(err, diceerrors.ErrAborted): - slog.Debug(fmt.Sprintf("%T received abort command", srv)) - case errors.Is(err, http.ErrServerClosed): - slog.Debug(fmt.Sprintf("%T received abort command", srv)) - default: - slog.Error(fmt.Sprintf("%T error", srv), slog.Any("error", err)) - } - errCh <- err - } else { - slog.Debug("bye.") - } -} -func startProfiling() (func(), error) { - // Start CPU profiling - cpuFile, err := os.Create("cpu.prof") - if err != nil { - return nil, fmt.Errorf("could not create cpu.prof: %w", err) - } - - if err = pprof.StartCPUProfile(cpuFile); err != nil { - cpuFile.Close() - return nil, fmt.Errorf("could not start CPU profile: %w", err) - } - - // Start memory profiling - memFile, err := os.Create("mem.prof") - if err != nil { - pprof.StopCPUProfile() - cpuFile.Close() - return nil, fmt.Errorf("could not create mem.prof: %w", err) - } - - // Start block profiling - runtime.SetBlockProfileRate(1) - - // Start execution trace - traceFile, err := os.Create("trace.out") - if err != nil { - runtime.SetBlockProfileRate(0) - memFile.Close() - pprof.StopCPUProfile() - cpuFile.Close() - return nil, fmt.Errorf("could not create trace.out: %w", err) - } - - if err := trace.Start(traceFile); err != nil { - traceFile.Close() - runtime.SetBlockProfileRate(0) - memFile.Close() - pprof.StopCPUProfile() - cpuFile.Close() - return nil, fmt.Errorf("could not start trace: %w", err) - } - - // Return a cleanup function - return func() { - // Stop the CPU profiling and close cpuFile - pprof.StopCPUProfile() - cpuFile.Close() - - // Write heap profile - runtime.GC() - if err := pprof.WriteHeapProfile(memFile); err != nil { - slog.Warn("could not write memory profile", slog.Any("error", err)) - } - - memFile.Close() - - // Write block profile - blockFile, err := os.Create("block.prof") - if err != nil { - slog.Warn("could not create block profile", slog.Any("error", err)) - } else { - if err := pprof.Lookup("block").WriteTo(blockFile, 0); err != nil { - slog.Warn("could not write block profile", slog.Any("error", err)) - } - blockFile.Close() - } - - runtime.SetBlockProfileRate(0) - - // Stop trace and close traceFile - trace.Stop() - traceFile.Close() - }, nil + cmd.Execute() } diff --git a/server/main.go b/server/main.go new file mode 100644 index 000000000..971060f04 --- /dev/null +++ b/server/main.go @@ -0,0 +1,281 @@ +// Copyright (c) 2022-present, DiceDB contributors +// All rights reserved. Licensed under the BSD 3-Clause License. See LICENSE file in the project root for full license information. + +package server + +import ( + "context" + "errors" + "fmt" + "log/slog" + "net/http" + "os" + "os/signal" + "runtime" + "runtime/pprof" + "runtime/trace" + "sync" + "syscall" + "time" + + "github.com/dicedb/dice/internal/server/httpws" + + "github.com/dicedb/dice/internal/cli" + "github.com/dicedb/dice/internal/commandhandler" + "github.com/dicedb/dice/internal/logger" + "github.com/dicedb/dice/internal/server/abstractserver" + "github.com/dicedb/dice/internal/wal" + "github.com/dicedb/dice/internal/watchmanager" + + "github.com/dicedb/dice/config" + diceerrors "github.com/dicedb/dice/internal/errors" + "github.com/dicedb/dice/internal/iothread" + "github.com/dicedb/dice/internal/observability" + "github.com/dicedb/dice/internal/server/resp" + "github.com/dicedb/dice/internal/shard" + dstore "github.com/dicedb/dice/internal/store" +) + +const ( + WALEngineAOF = "aof" +) + +func Start() { + iid := observability.GetOrCreateInstanceID() + config.DiceConfig.InstanceID = iid + + // This is counter intuitive, but it's the first thing that should be done + // because this function parses the flags and prepares the config, + cli.Execute() + + slog.SetDefault(logger.New()) + go observability.Ping() + + ctx, cancel := context.WithCancel(context.Background()) + + // Handle SIGTERM and SIGINT + sigs := make(chan os.Signal, 1) + signal.Notify(sigs, syscall.SIGTERM, syscall.SIGINT) + + var ( + cmdWatchChan chan dstore.CmdWatchEvent + serverErrCh = make(chan error, 2) + cmdWatchSubscriptionChan = make(chan watchmanager.WatchSubscription) + wl wal.AbstractWAL + ) + + wl, _ = wal.NewNullWAL() + if config.DiceConfig.Persistence.Enabled { + if config.DiceConfig.Persistence.WALEngine == WALEngineAOF { + _wl, err := wal.NewAOFWAL(config.DiceConfig.WAL.LogDir) + if err != nil { + slog.Warn("could not create WAL with", slog.String("wal-engine", config.DiceConfig.Persistence.WALEngine), slog.Any("error", err)) + sigs <- syscall.SIGKILL + return + } + wl = _wl + } else { + slog.Error("unsupported WAL engine", slog.String("engine", config.DiceConfig.Persistence.WALEngine)) + sigs <- syscall.SIGKILL + return + } + + if err := wl.Init(time.Now()); err != nil { + slog.Error("could not initialize WAL", slog.Any("error", err)) + } else { + go wal.InitBG(wl) + } + + slog.Debug("WAL initialization complete") + + if config.DiceConfig.Persistence.RestoreFromWAL { + slog.Info("restoring database from WAL") + wal.ReplayWAL(wl) + slog.Info("database restored from WAL") + } + } + + if config.DiceConfig.Performance.EnableWatch { + bufSize := config.DiceConfig.Performance.WatchChanBufSize + cmdWatchChan = make(chan dstore.CmdWatchEvent, bufSize) + } + + // Get the number of available CPU cores on the machine using runtime.NumCPU(). + // This determines the total number of logical processors that can be utilized + // for parallel execution. Setting the maximum number of CPUs to the available + // core count ensures the application can make full use of all available hardware. + var numShards int + numShards = runtime.NumCPU() + if config.DiceConfig.Performance.NumShards > 0 { + numShards = config.DiceConfig.Performance.NumShards + } + + // The runtime.GOMAXPROCS(numShards) call limits the number of operating system + // threads that can execute Go code simultaneously to the number of CPU cores. + // This enables Go to run more efficiently, maximizing CPU utilization and + // improving concurrency performance across multiple goroutines. + runtime.GOMAXPROCS(runtime.NumCPU()) + + // Initialize the ShardManager + shardManager := shard.NewShardManager(uint8(numShards), cmdWatchChan, serverErrCh) + + wg := sync.WaitGroup{} + + wg.Add(1) + go func() { + defer wg.Done() + shardManager.Run(ctx) + }() + + var serverWg sync.WaitGroup + + if config.DiceConfig.Performance.EnableProfiling { + stopProfiling, err := startProfiling() + if err != nil { + slog.Error("Profiling could not be started", slog.Any("error", err)) + sigs <- syscall.SIGKILL + } + defer stopProfiling() + } + ioThreadManager := iothread.NewManager(config.DiceConfig.Performance.MaxClients) + cmdHandlerManager := commandhandler.NewRegistry(config.DiceConfig.Performance.MaxClients, shardManager) + + respServer := resp.NewServer(shardManager, ioThreadManager, cmdHandlerManager, cmdWatchSubscriptionChan, cmdWatchChan, serverErrCh, wl) + serverWg.Add(1) + go runServer(ctx, &serverWg, respServer, serverErrCh) + + if config.DiceConfig.HTTP.Enabled { + httpServer := httpws.NewHTTPServer(shardManager, wl) + serverWg.Add(1) + go runServer(ctx, &serverWg, httpServer, serverErrCh) + } + + if config.DiceConfig.WebSocket.Enabled { + websocketServer := httpws.NewWebSocketServer(shardManager, config.DiceConfig.WebSocket.Port, wl) + serverWg.Add(1) + go runServer(ctx, &serverWg, websocketServer, serverErrCh) + } + + wg.Add(1) + go func() { + defer wg.Done() + <-sigs + cancel() + }() + + go func() { + serverWg.Wait() + close(serverErrCh) // Close the channel when both servers are done + }() + + for err := range serverErrCh { + if err != nil && errors.Is(err, diceerrors.ErrAborted) { + // if either the AsyncServer/RESPServer or the HTTPServer received an abort command, + // cancel the context, helping gracefully exiting all servers + cancel() + } + } + + close(sigs) + + if config.DiceConfig.Persistence.Enabled { + wal.ShutdownBG() + } + + cancel() + + wg.Wait() +} + +func runServer(ctx context.Context, wg *sync.WaitGroup, srv abstractserver.AbstractServer, errCh chan<- error) { + defer wg.Done() + if err := srv.Run(ctx); err != nil { + switch { + case errors.Is(err, context.Canceled): + slog.Debug(fmt.Sprintf("%T was canceled", srv)) + case errors.Is(err, diceerrors.ErrAborted): + slog.Debug(fmt.Sprintf("%T received abort command", srv)) + case errors.Is(err, http.ErrServerClosed): + slog.Debug(fmt.Sprintf("%T received abort command", srv)) + default: + slog.Error(fmt.Sprintf("%T error", srv), slog.Any("error", err)) + } + errCh <- err + } else { + slog.Debug("bye.") + } +} +func startProfiling() (func(), error) { + // Start CPU profiling + cpuFile, err := os.Create("cpu.prof") + if err != nil { + return nil, fmt.Errorf("could not create cpu.prof: %w", err) + } + + if err = pprof.StartCPUProfile(cpuFile); err != nil { + cpuFile.Close() + return nil, fmt.Errorf("could not start CPU profile: %w", err) + } + + // Start memory profiling + memFile, err := os.Create("mem.prof") + if err != nil { + pprof.StopCPUProfile() + cpuFile.Close() + return nil, fmt.Errorf("could not create mem.prof: %w", err) + } + + // Start block profiling + runtime.SetBlockProfileRate(1) + + // Start execution trace + traceFile, err := os.Create("trace.out") + if err != nil { + runtime.SetBlockProfileRate(0) + memFile.Close() + pprof.StopCPUProfile() + cpuFile.Close() + return nil, fmt.Errorf("could not create trace.out: %w", err) + } + + if err := trace.Start(traceFile); err != nil { + traceFile.Close() + runtime.SetBlockProfileRate(0) + memFile.Close() + pprof.StopCPUProfile() + cpuFile.Close() + return nil, fmt.Errorf("could not start trace: %w", err) + } + + // Return a cleanup function + return func() { + // Stop the CPU profiling and close cpuFile + pprof.StopCPUProfile() + cpuFile.Close() + + // Write heap profile + runtime.GC() + if err := pprof.WriteHeapProfile(memFile); err != nil { + slog.Warn("could not write memory profile", slog.Any("error", err)) + } + + memFile.Close() + + // Write block profile + blockFile, err := os.Create("block.prof") + if err != nil { + slog.Warn("could not create block profile", slog.Any("error", err)) + } else { + if err := pprof.Lookup("block").WriteTo(blockFile, 0); err != nil { + slog.Warn("could not write block profile", slog.Any("error", err)) + } + blockFile.Close() + } + + runtime.SetBlockProfileRate(0) + + // Stop trace and close traceFile + trace.Stop() + traceFile.Close() + }, nil +} From 7fed270cb12530eb0ca7db7393751e023bf15cbf Mon Sep 17 00:00:00 2001 From: Arpit Date: Tue, 21 Jan 2025 17:56:10 +0000 Subject: [PATCH 02/12] Getting rid of config template and moving auth to top-level config --- cmd/init_config.go | 2 +- config/config.go | 97 ++--------------------- internal/auth/session.go | 4 +- internal/cli/cli.go | 96 ++-------------------- internal/commandhandler/commandhandler.go | 14 +++- internal/eval/eval.go | 4 +- server/main.go | 10 +++ 7 files changed, 36 insertions(+), 191 deletions(-) diff --git a/cmd/init_config.go b/cmd/init_config.go index 17e180adc..4a433c5f0 100644 --- a/cmd/init_config.go +++ b/cmd/init_config.go @@ -16,7 +16,7 @@ var initConfigCmd = &cobra.Command{ Short: "creates a config file at dicedb.yaml with default values", Run: func(cmd *cobra.Command, args []string) { config.Init(cmd.Flags()) - viper.WriteConfigAs("dicedb.yaml") + _ = viper.WriteConfigAs("dicedb.yaml") fmt.Println("config created at dicedb.yaml") }, } diff --git a/config/config.go b/config/config.go index b808808cf..5f517f2d0 100644 --- a/config/config.go +++ b/config/config.go @@ -29,82 +29,6 @@ const ( DefaultKeysLimit int = 200000000 DefaultEvictionRatio float64 = 0.1 - - defaultConfigTemplate = `# Configuration file for Dicedb - -# Version -version = "0.1.0" - -# Async Server Configuration -async_server.addr = "0.0.0.0" -async_server.port = 7379 -async_server.keepalive = 300 -async_server.timeout = 300 -async_server.max_conn = 0 - -# HTTP Configuration -http.enabled = false -http.port = 8082 - -# WebSocket Configuration -websocket.enabled = false -websocket.port = 8379 -websocket.max_write_response_retries = 3 -websocket.write_response_timeout = 10s - -# Performance Configuration -performance.watch_chan_buf_size = 20000 -performance.shard_cron_frequency = 1s -performance.multiplexer_poll_timeout = 100ms -performance.max_clients = 20000 -performance.store_map_init_size = 1024000 -performance.adhoc_req_chan_buf_size = 20 -performance.enable_profiling = false -performance.enable_watch = false -performance.num_shards = -1 - -# Memory Configuration -memory.max_memory = 0 -memory.eviction_policy = "allkeys-lfu" -memory.eviction_ratio = 0.9 -memory.keys_limit = 200000000 -memory.lfu_log_factor = 10 - -# Persistence Configuration -persistence.enabled = false -persistence.aof_file = "./dice-master.aof" -persistence.persistence_enabled = true -persistence.write_aof_on_cleanup = false -persistence.wal-dir = "./" -persistence.restore-wal = false -persistence.wal-engine = "aof" - -# Logging Configuration -logging.log_level = "info" -logging.log_dir = "/tmp/dicedb" - -# Authentication Configuration -auth.username = "dice" -auth.password = "" - -# Network Configuration -network.io_buffer_length = 512 -network.io_buffer_length_max = 51200 - -# WAL Configuration -LogDir = "tmp/dicedb-wal" -Enabled = "true" -WalMode = "buffered" -WriteMode = "default" -BufferSizeMB = 1 -RotationMode = "segemnt-size" -MaxSegmentSizeMB = 16 -MaxSegmentRotationTime = 60s -BufferSyncInterval = 200ms -RetentionMode = "num-segments" -MaxSegmentCount = 10 -MaxSegmentRetentionDuration = 600s -RecoveryMode = "strict"` ) var ( @@ -115,7 +39,6 @@ var ( type Config struct { Version string `config:"version" default:"0.1.0"` InstanceID string `config:"instance_id"` - Auth auth `config:"auth"` RespServer respServer `config:"async_server"` HTTP http `config:"http"` WebSocket websocket `config:"websocket"` @@ -127,11 +50,6 @@ type Config struct { WAL WALConfig `config:"WAL"` } -type auth struct { - UserName string `config:"username" default:"dice"` - Password string `config:"password"` -} - type respServer struct { Addr string `config:"addr" default:"0.0.0.0" validate:"ipv4"` Port int `config:"port" default:"7379" validate:"number,gte=0,lte=65535"` @@ -261,10 +179,6 @@ func writeConfigFile(configFilePath string) error { } defer file.Close() - if _, err := file.WriteString(defaultConfigTemplate); err != nil { - return err - } - return nil } @@ -311,8 +225,6 @@ func MergeFlags(flags *Config) { DiceConfig.Persistence.RestoreFromWAL = flags.Persistence.RestoreFromWAL case "wal-engine": DiceConfig.Persistence.WALEngine = flags.Persistence.WALEngine - case "require-pass": - DiceConfig.Auth.Password = flags.Auth.Password case "keys-limit": DiceConfig.Memory.KeysLimit = flags.Memory.KeysLimit case "eviction-ratio": @@ -322,9 +234,12 @@ func MergeFlags(flags *Config) { } type DiceDBConfig struct { - Host string `mapstructure:"host" description:"the host address to bind to" default:"0.0.0.0"` - Port int `mapstructure:"port" description:"the port to bind to" default:"7379"` - EnableHTTP bool `mapstructure:"enable-http" description:"enable http server" default:"false"` + Host string `mapstructure:"host" default:"0.0.0.0" description:"the host address to bind to"` + Port int `mapstructure:"port" default:"7379" description:"the port to bind to"` + EnableHTTP bool `mapstructure:"enable-http" default:"false" description:"enable http server"` + + Username string `mapstructure:"username" default:"dicedb" description:"the username to use for authentication"` + Password string `mapstructure:"password" default:"" description:"the password to use for authentication"` } var GlobalDiceDBConfig *DiceDBConfig diff --git a/internal/auth/session.go b/internal/auth/session.go index a49051381..491a8f807 100644 --- a/internal/auth/session.go +++ b/internal/auth/session.go @@ -108,7 +108,7 @@ func NewSession() (session *Session) { } func (session *Session) IsActive() (isActive bool) { - if config.DiceConfig.Auth.Password == utils.EmptyStr && session.Status != SessionStatusActive { + if config.GlobalDiceDBConfig.Password == utils.EmptyStr && session.Status != SessionStatusActive { session.Activate(session.User) } isActive = session.Status == SessionStatusActive @@ -133,7 +133,7 @@ func (session *Session) Validate(username, password string) error { if user, err = UserStore.Get(username); err != nil { return err } - if username == config.DiceConfig.Auth.UserName && len(user.Passwords) == 0 { + if username == config.GlobalDiceDBConfig.Username && len(user.Passwords) == 0 { session.Activate(user) return nil } diff --git a/internal/cli/cli.go b/internal/cli/cli.go index a48fb527a..1e5e627c6 100644 --- a/internal/cli/cli.go +++ b/internal/cli/cli.go @@ -11,10 +11,8 @@ import ( "os" "path/filepath" "runtime" - "strings" "github.com/dicedb/dice/config" - "github.com/dicedb/dice/internal/server/utils" "github.com/fatih/color" ) @@ -72,6 +70,11 @@ func render() { func Execute() { flagsConfig := config.Config{} + var tempStr string + + flag.StringVar(&tempStr, "username", "dicedb", "deleted") + flag.StringVar(&tempStr, "password", "dicedb", "deleted") + flag.StringVar(&flagsConfig.RespServer.Addr, "host", "0.0.0.0", "host for the DiceDB server") flag.IntVar(&flagsConfig.RespServer.Port, "port", 7379, "port for the DiceDB server") @@ -94,7 +97,6 @@ func Execute() { flag.BoolVar(&flagsConfig.Persistence.RestoreFromWAL, "restore-wal", false, "restore the database from the WAL files") flag.StringVar(&flagsConfig.Persistence.WALEngine, "wal-engine", "null", "wal engine to use, values: sqlite, aof") - flag.StringVar(&flagsConfig.Auth.Password, "requirepass", utils.EmptyStr, "enable authentication for the default user") flag.StringVar(&config.CustomConfigFilePath, "o", config.CustomConfigFilePath, "dir path to create the flagsConfig file") flag.StringVar(&config.CustomConfigDirPath, "c", config.CustomConfigDirPath, "file path of the config file") @@ -140,94 +142,6 @@ func Execute() { } flag.Parse() - - if len(os.Args) > 2 { - switch os.Args[1] { - case "-v", "--version": - fmt.Println("dicedb version", config.DiceDBVersion) - os.Exit(0) - - case "-": - parser := config.NewConfigParser() - if err := parser.ParseFromStdin(); err != nil { - log.Fatal(err) - } - if err := parser.Loadconfig(config.DiceConfig); err != nil { - log.Fatal(err) - } - fmt.Println(config.DiceConfig.Version) - case "-o", "--output": - if len(os.Args) < 3 { - log.Fatal("Output file path not provided") - } else { - dirPath := os.Args[2] - if dirPath == "" { - log.Fatal("Output file path not provided") - } - - info, err := os.Stat(dirPath) - switch { - case os.IsNotExist(err): - log.Fatal("Output file path does not exist") - case err != nil: - log.Fatalf("Error checking output file path: %v", err) - case !info.IsDir(): - log.Fatal("Output file path is not a directory") - } - - filePath := filepath.Join(dirPath, config.DefaultConfigName) - if _, err := os.Stat(filePath); err == nil { - slog.Warn("Config file already exists at the specified path", slog.String("path", filePath), slog.String("action", "skipping file creation")) - return - } - if err := config.CreateConfigFile(filePath); err != nil { - log.Fatal(err) - } - - config.MergeFlags(&flagsConfig) - render() - } - case "-c", "--config": - if len(os.Args) >= 3 { - filePath := os.Args[2] - if filePath == "" { - log.Fatal("Error: Config file path not provided") - } - - info, err := os.Stat(filePath) - switch { - case os.IsNotExist(err): - log.Fatalf("Config file does not exist: %s", filePath) - case err != nil: - log.Fatalf("Unable to check config file: %v", err) - } - - if info.IsDir() { - log.Fatalf("Config file path points to a directory: %s", filePath) - } - - if !strings.HasSuffix(filePath, ".conf") { - log.Fatalf("Config file must have a .conf extension: %s", filePath) - } - - parser := config.NewConfigParser() - if err := parser.ParseFromFile(filePath); err != nil { - log.Fatal(err) - } - if err := parser.Loadconfig(config.DiceConfig); err != nil { - log.Fatal(err) - } - - config.MergeFlags(&flagsConfig) - render() - } else { - log.Fatal("Config file path not provided") - } - default: - defaultConfig(&flagsConfig) - } - } - defaultConfig(&flagsConfig) } diff --git a/internal/commandhandler/commandhandler.go b/internal/commandhandler/commandhandler.go index 569c9b8a1..3c7821848 100644 --- a/internal/commandhandler/commandhandler.go +++ b/internal/commandhandler/commandhandler.go @@ -504,8 +504,14 @@ func (h *BaseCommandHandler) sendResponseToIOThread(resp interface{}, err error) h.ioThreadWriteChan <- resp } -func (h *BaseCommandHandler) isAuthenticated(diceDBCmd *cmd.DiceDBCmd) error { - if diceDBCmd.Cmd != auth.Cmd && !h.Session.IsActive() { +func (h *BaseCommandHandler) isAuthenticated(c *cmd.DiceDBCmd) error { + // TODO: Revisit the flow and check the need of explicitly whitelisting PING and CLIENT commands here. + // We might not need this special case handling for other commands. + if c.Cmd == "PING" || c.Cmd == "CLIENT" { + return nil + } + + if c.Cmd != auth.Cmd && !h.Session.IsActive() { return errors.New("NOAUTH Authentication required") } @@ -530,11 +536,11 @@ func (h *BaseCommandHandler) RespAuth(args []string) interface{} { return diceerrors.ErrWrongArgumentCount("AUTH") } - if config.DiceConfig.Auth.Password == "" { + if config.GlobalDiceDBConfig.Password == "" { return diceerrors.ErrAuth } - username := config.DiceConfig.Auth.UserName + username := config.GlobalDiceDBConfig.Username var password string if len(args) == 1 { diff --git a/internal/eval/eval.go b/internal/eval/eval.go index 5a77cee0f..e2e9898bc 100644 --- a/internal/eval/eval.go +++ b/internal/eval/eval.go @@ -117,11 +117,11 @@ func evalECHO(args []string, store *dstore.Store) []byte { func EvalAUTH(args []string, c *comm.Client) []byte { var err error - if config.DiceConfig.Auth.Password == "" { + if config.GlobalDiceDBConfig.Password == "" { return diceerrors.NewErrWithMessage("AUTH called without any password configured for the default user. Are you sure your configuration is correct?") } - username := config.DiceConfig.Auth.UserName + username := config.GlobalDiceDBConfig.Username var password string if len(args) == 1 { diff --git a/server/main.go b/server/main.go index 971060f04..1bcb336a6 100644 --- a/server/main.go +++ b/server/main.go @@ -18,6 +18,7 @@ import ( "syscall" "time" + "github.com/dicedb/dice/internal/auth" "github.com/dicedb/dice/internal/server/httpws" "github.com/dicedb/dice/internal/cli" @@ -44,6 +45,13 @@ func Start() { iid := observability.GetOrCreateInstanceID() config.DiceConfig.InstanceID = iid + // TODO: Handle the addition of the default user + // and new users in a much better way. Doing this using + // and empty password check is not a good solution. + if config.GlobalDiceDBConfig.Password != "" { + _, _ = auth.UserStore.Add(config.GlobalDiceDBConfig.Username) + } + // This is counter intuitive, but it's the first thing that should be done // because this function parses the flags and prepares the config, cli.Execute() @@ -71,12 +79,14 @@ func Start() { if err != nil { slog.Warn("could not create WAL with", slog.String("wal-engine", config.DiceConfig.Persistence.WALEngine), slog.Any("error", err)) sigs <- syscall.SIGKILL + cancel() return } wl = _wl } else { slog.Error("unsupported WAL engine", slog.String("engine", config.DiceConfig.Persistence.WALEngine)) sigs <- syscall.SIGKILL + cancel() return } From 0136fe580020aca8ab77189e566dd3b9fd5b44b0 Mon Sep 17 00:00:00 2001 From: Arpit Date: Tue, 21 Jan 2025 18:13:45 +0000 Subject: [PATCH 03/12] Moving network config to top-level --- config/config.go | 6 ------ config/constants.go | 5 +++++ integration_tests/commands/http/setup.go | 1 - integration_tests/commands/resp/setup.go | 1 - integration_tests/commands/websocket/setup.go | 1 - integration_tests/config/parser_test.go | 5 ----- internal/clientio/io.go | 2 +- internal/clientio/io_test.go | 4 ++-- 8 files changed, 8 insertions(+), 17 deletions(-) create mode 100644 config/constants.go diff --git a/config/config.go b/config/config.go index 5f517f2d0..2791c94e5 100644 --- a/config/config.go +++ b/config/config.go @@ -46,7 +46,6 @@ type Config struct { Memory memory `config:"memory"` Persistence persistence `config:"persistence"` Logging logging `config:"logging"` - Network network `config:"network"` WAL WALConfig `config:"WAL"` } @@ -132,11 +131,6 @@ type logging struct { LogDir string `config:"log_dir" default:"/tmp/dicedb" validate:"dirpath"` } -type network struct { - IOBufferLengthMAX int `config:"io_buffer_length_max" default:"51200" validate:"min=0,max=1048576"` // max is 1MB' - IOBufferLength int `config:"io_buffer_length" default:"512" validate:"min=0"` -} - // DiceConfig is the global configuration object for dice var DiceConfig = &Config{} diff --git a/config/constants.go b/config/constants.go new file mode 100644 index 000000000..2c88d3f4c --- /dev/null +++ b/config/constants.go @@ -0,0 +1,5 @@ +package config + +const ( + IOBufferLength int = 512 +) diff --git a/integration_tests/commands/http/setup.go b/integration_tests/commands/http/setup.go index 39b1c0972..e8d5c24a8 100644 --- a/integration_tests/commands/http/setup.go +++ b/integration_tests/commands/http/setup.go @@ -110,7 +110,6 @@ func (e *HTTPCommandExecutor) Name() string { } func RunHTTPServer(ctx context.Context, wg *sync.WaitGroup, opt TestServerOptions) { - config.DiceConfig.Network.IOBufferLength = 16 config.DiceConfig.Persistence.WriteAOFOnCleanup = false globalErrChannel := make(chan error) diff --git a/integration_tests/commands/resp/setup.go b/integration_tests/commands/resp/setup.go index 560f35b58..ea4fa74b5 100644 --- a/integration_tests/commands/resp/setup.go +++ b/integration_tests/commands/resp/setup.go @@ -184,7 +184,6 @@ func fireCommandAndGetRESPParser(conn net.Conn, cmd string) *clientio.RESPParser } func RunTestServer(wg *sync.WaitGroup, opt TestServerOptions) { - config.DiceConfig.Network.IOBufferLength = 16 config.DiceConfig.Persistence.WriteAOFOnCleanup = false // #1261: Added here to prevent resp integration tests from failing on lower-spec machines diff --git a/integration_tests/commands/websocket/setup.go b/integration_tests/commands/websocket/setup.go index 7560f578f..7c0bff597 100644 --- a/integration_tests/commands/websocket/setup.go +++ b/integration_tests/commands/websocket/setup.go @@ -110,7 +110,6 @@ func (e *WebsocketCommandExecutor) Name() string { } func RunWebsocketServer(ctx context.Context, wg *sync.WaitGroup, opt TestServerOptions) { - config.DiceConfig.Network.IOBufferLength = 16 config.DiceConfig.Persistence.WriteAOFOnCleanup = false // Initialize WebsocketServer diff --git a/integration_tests/config/parser_test.go b/integration_tests/config/parser_test.go index 4e30eccdb..d5a74c7cf 100644 --- a/integration_tests/config/parser_test.go +++ b/integration_tests/config/parser_test.go @@ -85,11 +85,6 @@ type logging struct { LogLevel string `config:"log_level" default:"info" validate:"oneof=debug info warn error"` } -type network struct { - IOBufferLengthMAX int `config:"io_buffer_length_max" default:"51200" validate:"min=0,max=1048576"` // max is 1MB' - IOBufferLength int `config:"io_buffer_length" default:"512" validate:"min=0"` -} - func TestNewConfigParser(t *testing.T) { parser := config.NewConfigParser() if parser == nil { diff --git a/internal/clientio/io.go b/internal/clientio/io.go index 510e60c67..6ee7a6462 100644 --- a/internal/clientio/io.go +++ b/internal/clientio/io.go @@ -37,7 +37,7 @@ func NewRESPParserWithBytes(c io.ReadWriter, initBytes []byte) *RESPParser { // we want. // note: the size 512 is arbitrarily chosen, and we can put // a decent thought into deciding the optimal value (in case it affects the perf) - tbuf: make([]byte, config.DiceConfig.Network.IOBufferLength), + tbuf: make([]byte, config.IOBufferLength), } } diff --git a/internal/clientio/io_test.go b/internal/clientio/io_test.go index a5333b6c1..73b5b788d 100644 --- a/internal/clientio/io_test.go +++ b/internal/clientio/io_test.go @@ -121,7 +121,7 @@ func TestDecodeOneEmptyMessage(t *testing.T) { } func TestDecodeOneHighVolumeData(t *testing.T) { - largeString := bytes.Repeat([]byte("a"), 10*config.DiceConfig.Network.IOBufferLength) + largeString := bytes.Repeat([]byte("a"), 10*config.IOBufferLength) mockRW := &MockReadWriter{ ReadChunks: [][]byte{ []byte("$" + strconv.Itoa(len(largeString)) + "\r\n"), @@ -178,7 +178,7 @@ func TestDecodeOnePartialMessages(t *testing.T) { } func TestDecodeOneVeryLargeMessage(t *testing.T) { - largeString := bytes.Repeat([]byte("a"), 10*config.DiceConfig.Network.IOBufferLength) + largeString := bytes.Repeat([]byte("a"), 10*config.IOBufferLength) mockRW := &MockReadWriter{ ReadChunks: [][]byte{ []byte("$" + strconv.Itoa(len(largeString)) + "\r\n"), From 9256b8766f6d9f7c6fdf777184b08955c6aeb3eb Mon Sep 17 00:00:00 2001 From: Arpit Date: Tue, 21 Jan 2025 18:39:54 +0000 Subject: [PATCH 04/12] Moving LogLevel to top level --- config/config.go | 7 +++---- internal/cli/cli.go | 2 +- internal/logger/logger.go | 2 +- 3 files changed, 5 insertions(+), 6 deletions(-) diff --git a/config/config.go b/config/config.go index 2791c94e5..8c1ceba9c 100644 --- a/config/config.go +++ b/config/config.go @@ -127,8 +127,7 @@ type WALConfig struct { } type logging struct { - LogLevel string `config:"log_level" default:"info" validate:"oneof=debug info warn error"` - LogDir string `config:"log_dir" default:"/tmp/dicedb" validate:"dirpath"` + LogDir string `config:"log_dir" default:"/tmp/dicedb" validate:"dirpath"` } // DiceConfig is the global configuration object for dice @@ -209,8 +208,6 @@ func MergeFlags(flags *Config) { DiceConfig.Performance.EnableWatch = flags.Performance.EnableWatch case "enable-profiling": DiceConfig.Performance.EnableProfiling = flags.Performance.EnableProfiling - case "log-level": - DiceConfig.Logging.LogLevel = flags.Logging.LogLevel case "log-dir": DiceConfig.Logging.LogDir = flags.Logging.LogDir case "enable-persistence": @@ -234,6 +231,8 @@ type DiceDBConfig struct { Username string `mapstructure:"username" default:"dicedb" description:"the username to use for authentication"` Password string `mapstructure:"password" default:"" description:"the password to use for authentication"` + + LogLevel string `mapstructure:"log-level" default:"info" description:"the log level"` } var GlobalDiceDBConfig *DiceDBConfig diff --git a/internal/cli/cli.go b/internal/cli/cli.go index 1e5e627c6..c60ba078c 100644 --- a/internal/cli/cli.go +++ b/internal/cli/cli.go @@ -90,7 +90,7 @@ func Execute() { flag.BoolVar(&flagsConfig.Performance.EnableWatch, "enable-watch", false, "enable support for .WATCH commands and real-time reactivity") flag.BoolVar(&flagsConfig.Performance.EnableProfiling, "enable-profiling", false, "enable profiling and capture critical metrics and traces in .prof files") - flag.StringVar(&flagsConfig.Logging.LogLevel, "log-level", "info", "log level, values: info, debug") + flag.StringVar(&tempStr, "log-level", "info", "log level, values: info, debug") flag.StringVar(&config.DiceConfig.Logging.LogDir, "log-dir", "/tmp/dicedb", "log directory path") flag.BoolVar(&flagsConfig.Persistence.Enabled, "enable-persistence", false, "enable write-ahead logging") diff --git a/internal/logger/logger.go b/internal/logger/logger.go index 14c11fab9..6ecc8e7bf 100644 --- a/internal/logger/logger.go +++ b/internal/logger/logger.go @@ -13,7 +13,7 @@ import ( ) func getSLogLevel() slog.Level { - switch config.DiceConfig.Logging.LogLevel { + switch config.GlobalDiceDBConfig.LogLevel { case "debug": return slog.LevelDebug case "info": From 07482e215ffb248ead5e7be8d21f96d9852e36d8 Mon Sep 17 00:00:00 2001 From: Arpit Date: Tue, 21 Jan 2025 18:44:35 +0000 Subject: [PATCH 05/12] Rmeoving logging sub-configuration --- config/config.go | 7 ------- internal/cli/cli.go | 2 +- 2 files changed, 1 insertion(+), 8 deletions(-) diff --git a/config/config.go b/config/config.go index 8c1ceba9c..592004452 100644 --- a/config/config.go +++ b/config/config.go @@ -45,7 +45,6 @@ type Config struct { Performance performance `config:"performance"` Memory memory `config:"memory"` Persistence persistence `config:"persistence"` - Logging logging `config:"logging"` WAL WALConfig `config:"WAL"` } @@ -126,10 +125,6 @@ type WALConfig struct { RecoveryMode string `config:"recovery_mode" default:"strict" validate:"oneof=strict truncate ignore"` } -type logging struct { - LogDir string `config:"log_dir" default:"/tmp/dicedb" validate:"dirpath"` -} - // DiceConfig is the global configuration object for dice var DiceConfig = &Config{} @@ -208,8 +203,6 @@ func MergeFlags(flags *Config) { DiceConfig.Performance.EnableWatch = flags.Performance.EnableWatch case "enable-profiling": DiceConfig.Performance.EnableProfiling = flags.Performance.EnableProfiling - case "log-dir": - DiceConfig.Logging.LogDir = flags.Logging.LogDir case "enable-persistence": DiceConfig.Persistence.Enabled = flags.Persistence.Enabled case "restore-from-wal": diff --git a/internal/cli/cli.go b/internal/cli/cli.go index c60ba078c..d676e360d 100644 --- a/internal/cli/cli.go +++ b/internal/cli/cli.go @@ -91,7 +91,7 @@ func Execute() { flag.BoolVar(&flagsConfig.Performance.EnableProfiling, "enable-profiling", false, "enable profiling and capture critical metrics and traces in .prof files") flag.StringVar(&tempStr, "log-level", "info", "log level, values: info, debug") - flag.StringVar(&config.DiceConfig.Logging.LogDir, "log-dir", "/tmp/dicedb", "log directory path") + flag.StringVar(&tempStr, "log-dir", "/tmp/dicedb", "log directory path") flag.BoolVar(&flagsConfig.Persistence.Enabled, "enable-persistence", false, "enable write-ahead logging") flag.BoolVar(&flagsConfig.Persistence.RestoreFromWAL, "restore-wal", false, "restore the database from the WAL files") From 089b1cbb37f65a33d05dee90c93927e1433deb9c Mon Sep 17 00:00:00 2001 From: Arpit Date: Wed, 22 Jan 2025 10:49:22 +0000 Subject: [PATCH 06/12] Getting rid of persistence config block and simplifying opts --- config/config.go | 20 ++----- integration_tests/commands/http/setup.go | 2 - integration_tests/commands/resp/setup.go | 2 - integration_tests/commands/websocket/setup.go | 2 - integration_tests/config/parser_test.go | 4 -- internal/cli/cli.go | 10 ++-- internal/shard/shard_thread.go | 2 +- internal/store/aof.go | 53 ------------------- server/main.go | 16 +++--- 9 files changed, 14 insertions(+), 97 deletions(-) diff --git a/config/config.go b/config/config.go index 592004452..9ee59cda4 100644 --- a/config/config.go +++ b/config/config.go @@ -44,7 +44,6 @@ type Config struct { WebSocket websocket `config:"websocket"` Performance performance `config:"performance"` Memory memory `config:"memory"` - Persistence persistence `config:"persistence"` WAL WALConfig `config:"WAL"` } @@ -88,19 +87,9 @@ type memory struct { LFULogFactor int `config:"lfu_log_factor" default:"10" validate:"min=0"` } -type persistence struct { - Enabled bool `config:"enabled" default:"false"` - AOFFile string `config:"aof_file" default:"./dice-master.aof" validate:"filepath"` - WriteAOFOnCleanup bool `config:"write_aof_on_cleanup" default:"false"` - RestoreFromWAL bool `config:"restore-wal" default:"false"` - WALEngine string `config:"wal-engine" default:"aof" validate:"oneof=sqlite aof"` -} - type WALConfig struct { // Directory where WAL log files will be stored LogDir string `config:"log_dir" default:"tmp/dicedb-wal"` - // Whether WAL is enabled - Enabled bool `config:"enabled" default:"true"` // WAL buffering mode: 'buffered' (writes buffered in memory) or 'unbuffered' (immediate disk writes) WalMode string `config:"wal_mode" default:"buffered" validate:"oneof=buffered unbuffered"` // Write mode: 'default' (OS handles syncing) or 'fsync' (explicit fsync after writes) @@ -203,12 +192,6 @@ func MergeFlags(flags *Config) { DiceConfig.Performance.EnableWatch = flags.Performance.EnableWatch case "enable-profiling": DiceConfig.Performance.EnableProfiling = flags.Performance.EnableProfiling - case "enable-persistence": - DiceConfig.Persistence.Enabled = flags.Persistence.Enabled - case "restore-from-wal": - DiceConfig.Persistence.RestoreFromWAL = flags.Persistence.RestoreFromWAL - case "wal-engine": - DiceConfig.Persistence.WALEngine = flags.Persistence.WALEngine case "keys-limit": DiceConfig.Memory.KeysLimit = flags.Memory.KeysLimit case "eviction-ratio": @@ -226,6 +209,9 @@ type DiceDBConfig struct { Password string `mapstructure:"password" default:"" description:"the password to use for authentication"` LogLevel string `mapstructure:"log-level" default:"info" description:"the log level"` + + EnableWAL bool `mapstructure:"enable-wal" default:"true" description:"enable write-ahead logging"` + WALEngine string `mapstructure:"wal-engine" default:"aof" description:"wal engine to use, values: sqlite, aof"` } var GlobalDiceDBConfig *DiceDBConfig diff --git a/integration_tests/commands/http/setup.go b/integration_tests/commands/http/setup.go index e8d5c24a8..d79e18598 100644 --- a/integration_tests/commands/http/setup.go +++ b/integration_tests/commands/http/setup.go @@ -110,8 +110,6 @@ func (e *HTTPCommandExecutor) Name() string { } func RunHTTPServer(ctx context.Context, wg *sync.WaitGroup, opt TestServerOptions) { - config.DiceConfig.Persistence.WriteAOFOnCleanup = false - globalErrChannel := make(chan error) shardManager := shard.NewShardManager(1, nil, globalErrChannel) diff --git a/integration_tests/commands/resp/setup.go b/integration_tests/commands/resp/setup.go index ea4fa74b5..060da0437 100644 --- a/integration_tests/commands/resp/setup.go +++ b/integration_tests/commands/resp/setup.go @@ -184,8 +184,6 @@ func fireCommandAndGetRESPParser(conn net.Conn, cmd string) *clientio.RESPParser } func RunTestServer(wg *sync.WaitGroup, opt TestServerOptions) { - config.DiceConfig.Persistence.WriteAOFOnCleanup = false - // #1261: Added here to prevent resp integration tests from failing on lower-spec machines config.DiceConfig.Memory.KeysLimit = 2000 if opt.Port != 0 { diff --git a/integration_tests/commands/websocket/setup.go b/integration_tests/commands/websocket/setup.go index 7c0bff597..4d55b15ff 100644 --- a/integration_tests/commands/websocket/setup.go +++ b/integration_tests/commands/websocket/setup.go @@ -110,8 +110,6 @@ func (e *WebsocketCommandExecutor) Name() string { } func RunWebsocketServer(ctx context.Context, wg *sync.WaitGroup, opt TestServerOptions) { - config.DiceConfig.Persistence.WriteAOFOnCleanup = false - // Initialize WebsocketServer globalErrChannel := make(chan error) shardManager := shard.NewShardManager(1, nil, globalErrChannel) diff --git a/integration_tests/config/parser_test.go b/integration_tests/config/parser_test.go index d5a74c7cf..1dd58b2cf 100644 --- a/integration_tests/config/parser_test.go +++ b/integration_tests/config/parser_test.go @@ -73,12 +73,8 @@ type memory struct { } type persistence struct { - AOFFile string `config:"aof_file" default:"./dice-master.aof" validate:"filepath"` PersistenceEnabled bool `config:"persistence_enabled" default:"true"` - WriteAOFOnCleanup bool `config:"write_aof_on_cleanup" default:"false"` WALDir string `config:"wal-dir" default:"./" validate:"dirpath"` - RestoreFromWAL bool `config:"restore-wal" default:"false"` - WALEngine string `config:"wal-engine" default:"aof" validate:"oneof=sqlite aof"` } type logging struct { diff --git a/internal/cli/cli.go b/internal/cli/cli.go index d676e360d..b3d4fbb4b 100644 --- a/internal/cli/cli.go +++ b/internal/cli/cli.go @@ -49,9 +49,6 @@ func printConfiguration() { // Add whether the watch feature is enabled slog.Info("running with", slog.Bool("profiling", config.DiceConfig.Performance.EnableProfiling)) - - // Add whether the persistence feature is enabled - slog.Info("running with", slog.Bool("persistence", config.DiceConfig.Persistence.Enabled)) } // printConfigTable prints key-value pairs in a vertical table format. @@ -71,6 +68,7 @@ func render() { func Execute() { flagsConfig := config.Config{} var tempStr string + var tempBool bool flag.StringVar(&tempStr, "username", "dicedb", "deleted") flag.StringVar(&tempStr, "password", "dicedb", "deleted") @@ -93,9 +91,9 @@ func Execute() { flag.StringVar(&tempStr, "log-level", "info", "log level, values: info, debug") flag.StringVar(&tempStr, "log-dir", "/tmp/dicedb", "log directory path") - flag.BoolVar(&flagsConfig.Persistence.Enabled, "enable-persistence", false, "enable write-ahead logging") - flag.BoolVar(&flagsConfig.Persistence.RestoreFromWAL, "restore-wal", false, "restore the database from the WAL files") - flag.StringVar(&flagsConfig.Persistence.WALEngine, "wal-engine", "null", "wal engine to use, values: sqlite, aof") + flag.BoolVar(&tempBool, "enable-persistence", false, "enable write-ahead logging") + flag.BoolVar(&tempBool, "restore-wal", false, "restore the database from the WAL files") + flag.StringVar(&tempStr, "wal-engine", "null", "wal engine to use, values: sqlite, aof") flag.StringVar(&config.CustomConfigFilePath, "o", config.CustomConfigFilePath, "dir path to create the flagsConfig file") flag.StringVar(&config.CustomConfigDirPath, "c", config.CustomConfigDirPath, "file path of the config file") diff --git a/internal/shard/shard_thread.go b/internal/shard/shard_thread.go index 6e514cc9c..b928758b5 100644 --- a/internal/shard/shard_thread.go +++ b/internal/shard/shard_thread.go @@ -142,7 +142,7 @@ func (shard *ShardThread) processRequest(op *ops.StoreOp) { // cleanup handles cleanup logic when the shard stops. func (shard *ShardThread) cleanup() { close(shard.ReqChan) - if !config.DiceConfig.Persistence.Enabled || !config.DiceConfig.Persistence.WriteAOFOnCleanup { + if !config.GlobalDiceDBConfig.EnableWAL { return } } diff --git a/internal/store/aof.go b/internal/store/aof.go index 875825bf2..d2d5e9c28 100644 --- a/internal/store/aof.go +++ b/internal/store/aof.go @@ -5,17 +5,9 @@ package store import ( "bufio" - "bytes" - "fmt" "io/fs" - "log" "os" - "strings" "sync" - - "github.com/dicedb/dice/internal/object" - - "github.com/dicedb/dice/config" ) type AOF struct { @@ -84,48 +76,3 @@ func (a *AOF) Load() ([]string, error) { return operations, nil } - -func encodeString(v string) []byte { - return []byte(fmt.Sprintf("$%d\r\n%s\r\n", len(v), v)) -} - -func encode(strs []string) []byte { - var b []byte - buf := bytes.NewBuffer(b) - for _, b := range strs { - buf.Write(encodeString(b)) - } - return []byte(fmt.Sprintf("*%d\r\n%s", len(strs), buf.Bytes())) -} - -// TODO: Support Expiration -// TODO: Support non-kv data structures -// TODO: Support sync write -func dumpKey(aof *AOF, key string, obj *object.Obj) (err error) { - cmd := fmt.Sprintf("SET %s %s", key, obj.Value) - tokens := strings.Split(cmd, " ") - return aof.Write(string(encode(tokens))) -} - -// DumpAllAOF dumps all keys in the store to the AOF file -func DumpAllAOF(store *Store) error { - var ( - aof *AOF - err error - ) - if aof, err = NewAOF(config.DiceConfig.Persistence.AOFFile); err != nil { - return err - } - defer aof.Close() - - log.Println("rewriting AOF file at", config.DiceConfig.Persistence.AOFFile) - - store.store.All(func(k string, obj *object.Obj) bool { - err = dumpKey(aof, k, obj) - // continue if no error - return err == nil - }) - - log.Println("AOF file rewrite complete") - return err -} diff --git a/server/main.go b/server/main.go index 1bcb336a6..db4a88b1c 100644 --- a/server/main.go +++ b/server/main.go @@ -37,10 +37,6 @@ import ( dstore "github.com/dicedb/dice/internal/store" ) -const ( - WALEngineAOF = "aof" -) - func Start() { iid := observability.GetOrCreateInstanceID() config.DiceConfig.InstanceID = iid @@ -73,18 +69,18 @@ func Start() { ) wl, _ = wal.NewNullWAL() - if config.DiceConfig.Persistence.Enabled { - if config.DiceConfig.Persistence.WALEngine == WALEngineAOF { + if config.GlobalDiceDBConfig.EnableWAL { + if config.GlobalDiceDBConfig.WALEngine == "aof" { _wl, err := wal.NewAOFWAL(config.DiceConfig.WAL.LogDir) if err != nil { - slog.Warn("could not create WAL with", slog.String("wal-engine", config.DiceConfig.Persistence.WALEngine), slog.Any("error", err)) + slog.Warn("could not create WAL with", slog.String("wal-engine", config.GlobalDiceDBConfig.WALEngine), slog.Any("error", err)) sigs <- syscall.SIGKILL cancel() return } wl = _wl } else { - slog.Error("unsupported WAL engine", slog.String("engine", config.DiceConfig.Persistence.WALEngine)) + slog.Error("unsupported WAL engine", slog.String("engine", config.GlobalDiceDBConfig.WALEngine)) sigs <- syscall.SIGKILL cancel() return @@ -98,7 +94,7 @@ func Start() { slog.Debug("WAL initialization complete") - if config.DiceConfig.Persistence.RestoreFromWAL { + if config.GlobalDiceDBConfig.EnableWAL { slog.Info("restoring database from WAL") wal.ReplayWAL(wl) slog.Info("database restored from WAL") @@ -188,7 +184,7 @@ func Start() { close(sigs) - if config.DiceConfig.Persistence.Enabled { + if config.GlobalDiceDBConfig.EnableWAL { wal.ShutdownBG() } From 9009e993567417179159995ab18353ab3fcbd0e1 Mon Sep 17 00:00:00 2001 From: Arpit Date: Wed, 22 Jan 2025 11:07:45 +0000 Subject: [PATCH 07/12] Removing memory config and renaming default eviction as primitive --- config/config.go | 16 ---------------- config/constants.go | 4 +++- integration_tests/commands/resp/setup.go | 1 - integration_tests/config/parser_test.go | 5 ----- internal/cli/cli.go | 6 ++++-- internal/shard/shard_manager.go | 4 ++-- internal/store/batchevictionlru.go | 23 +++++++++++------------ internal/store/batchevictionlru_test.go | 15 +++++++-------- internal/store/store.go | 7 +------ 9 files changed, 28 insertions(+), 53 deletions(-) diff --git a/config/config.go b/config/config.go index 9ee59cda4..b1e96e9df 100644 --- a/config/config.go +++ b/config/config.go @@ -26,9 +26,6 @@ const ( EvictAllKeysLRU = "allkeys-lru" EvictAllKeysLFU = "allkeys-lfu" EvictBatchKeysLRU = "batch_keys_lru" - - DefaultKeysLimit int = 200000000 - DefaultEvictionRatio float64 = 0.1 ) var ( @@ -43,7 +40,6 @@ type Config struct { HTTP http `config:"http"` WebSocket websocket `config:"websocket"` Performance performance `config:"performance"` - Memory memory `config:"memory"` WAL WALConfig `config:"WAL"` } @@ -79,14 +75,6 @@ type performance struct { NumShards int `config:"num_shards" default:"-1" validate:"oneof=-1|min=1,lte=128"` } -type memory struct { - MaxMemory int64 `config:"max_memory" default:"0" validate:"min=0"` - EvictionPolicy string `config:"eviction_policy" default:"allkeys-lfu" validate:"oneof=simple-first allkeys-random allkeys-lru allkeys-lfu"` - EvictionRatio float64 `config:"eviction_ratio" default:"0.9" validate:"min=0,lte=1"` - KeysLimit int `config:"keys_limit" default:"200000000" validate:"min=10"` - LFULogFactor int `config:"lfu_log_factor" default:"10" validate:"min=0"` -} - type WALConfig struct { // Directory where WAL log files will be stored LogDir string `config:"log_dir" default:"tmp/dicedb-wal"` @@ -192,10 +180,6 @@ func MergeFlags(flags *Config) { DiceConfig.Performance.EnableWatch = flags.Performance.EnableWatch case "enable-profiling": DiceConfig.Performance.EnableProfiling = flags.Performance.EnableProfiling - case "keys-limit": - DiceConfig.Memory.KeysLimit = flags.Memory.KeysLimit - case "eviction-ratio": - DiceConfig.Memory.EvictionRatio = flags.Memory.EvictionRatio } }) } diff --git a/config/constants.go b/config/constants.go index 2c88d3f4c..cd5589a8e 100644 --- a/config/constants.go +++ b/config/constants.go @@ -1,5 +1,7 @@ package config const ( - IOBufferLength int = 512 + IOBufferLength int = 512 + EvictionRatio float64 = 0.9 + DefaultKeysLimit int = 200000000 ) diff --git a/integration_tests/commands/resp/setup.go b/integration_tests/commands/resp/setup.go index 060da0437..238c16c3b 100644 --- a/integration_tests/commands/resp/setup.go +++ b/integration_tests/commands/resp/setup.go @@ -185,7 +185,6 @@ func fireCommandAndGetRESPParser(conn net.Conn, cmd string) *clientio.RESPParser func RunTestServer(wg *sync.WaitGroup, opt TestServerOptions) { // #1261: Added here to prevent resp integration tests from failing on lower-spec machines - config.DiceConfig.Memory.KeysLimit = 2000 if opt.Port != 0 { config.DiceConfig.RespServer.Port = opt.Port } else { diff --git a/integration_tests/config/parser_test.go b/integration_tests/config/parser_test.go index 1dd58b2cf..4fbe53d2b 100644 --- a/integration_tests/config/parser_test.go +++ b/integration_tests/config/parser_test.go @@ -65,11 +65,6 @@ type performance struct { } type memory struct { - MaxMemory int64 `config:"max_memory" default:"0"` - EvictionPolicy string `config:"eviction_policy" default:"allkeys-lfu" validate:"oneof=simple-first allkeys-random allkeys-lru allkeys-lfu"` - EvictionRatio float64 `config:"eviction_ratio" default:"0.9" validate:"min=0,lte=1"` - KeysLimit int `config:"keys_limit" default:"200000000" validate:"min=0"` - LFULogFactor int `config:"lfu_log_factor" default:"10" validate:"min=0"` } type persistence struct { diff --git a/internal/cli/cli.go b/internal/cli/cli.go index b3d4fbb4b..923cbbce3 100644 --- a/internal/cli/cli.go +++ b/internal/cli/cli.go @@ -69,6 +69,8 @@ func Execute() { flagsConfig := config.Config{} var tempStr string var tempBool bool + var tempFloat float64 + var tempInt int flag.StringVar(&tempStr, "username", "dicedb", "deleted") flag.StringVar(&tempStr, "password", "dicedb", "deleted") @@ -98,10 +100,10 @@ func Execute() { flag.StringVar(&config.CustomConfigFilePath, "o", config.CustomConfigFilePath, "dir path to create the flagsConfig file") flag.StringVar(&config.CustomConfigDirPath, "c", config.CustomConfigDirPath, "file path of the config file") - flag.IntVar(&flagsConfig.Memory.KeysLimit, "keys-limit", config.DefaultKeysLimit, "keys limit for the DiceDB server. "+ + flag.IntVar(&tempInt, "keys-limit", config.DefaultKeysLimit, "keys limit for the DiceDB server. "+ "This flag controls the number of keys each shard holds at startup. You can multiply this number with the "+ "total number of shard threads to estimate how much memory will be required at system start up.") - flag.Float64Var(&flagsConfig.Memory.EvictionRatio, "eviction-ratio", 0.9, "ratio of keys to evict when the "+ + flag.Float64Var(&tempFloat, "eviction-ratio", 0.9, "ratio of keys to evict when the "+ "keys limit is reached") flag.Usage = func() { diff --git a/internal/shard/shard_manager.go b/internal/shard/shard_manager.go index b2201959e..6c4bd7407 100644 --- a/internal/shard/shard_manager.go +++ b/internal/shard/shard_manager.go @@ -35,9 +35,9 @@ func NewShardManager(shardCount uint8, cmdWatchChan chan dstore.CmdWatchEvent, g shardReqMap := make(map[ShardID]chan *ops.StoreOp) shardErrorChan := make(chan *ShardError) - maxKeysPerShard := config.DiceConfig.Memory.KeysLimit / int(shardCount) + maxKeysPerShard := config.DefaultKeysLimit / int(shardCount) for i := uint8(0); i < shardCount; i++ { - evictionStrategy := dstore.NewBatchEvictionLRU(maxKeysPerShard, config.DiceConfig.Memory.EvictionRatio) + evictionStrategy := dstore.NewPrimitiveEvictionStrategy(maxKeysPerShard) // Shards are numbered from 0 to shardCount-1 shard := NewShardThread(i, globalErrorChan, shardErrorChan, cmdWatchChan, evictionStrategy) shards[i] = shard diff --git a/internal/store/batchevictionlru.go b/internal/store/batchevictionlru.go index 339379053..ab6f039f9 100644 --- a/internal/store/batchevictionlru.go +++ b/internal/store/batchevictionlru.go @@ -7,6 +7,7 @@ import ( "container/heap" "math" + "github.com/dicedb/dice/config" "github.com/dicedb/dice/internal/object" ) @@ -43,21 +44,19 @@ func (h *evictionItemHeap) pop() evictionItem { return heap.Pop(h).(evictionItem) } -// BatchEvictionLRU implements batch eviction of least recently used keys -type BatchEvictionLRU struct { +// PrimitiveEvictionStrategy implements batch eviction of least recently used keys +type PrimitiveEvictionStrategy struct { BaseEvictionStrategy - maxKeys int - evictionRatio float64 + maxKeys int } -func NewBatchEvictionLRU(maxKeys int, evictionRatio float64) *BatchEvictionLRU { - return &BatchEvictionLRU{ - maxKeys: maxKeys, - evictionRatio: evictionRatio, +func NewPrimitiveEvictionStrategy(maxKeys int) *PrimitiveEvictionStrategy { + return &PrimitiveEvictionStrategy{ + maxKeys: maxKeys, } } -func (e *BatchEvictionLRU) ShouldEvict(store *Store) int { +func (e *PrimitiveEvictionStrategy) ShouldEvict(store *Store) int { currentKeyCount := store.GetKeyCount() // Check if eviction is necessary only till the number of keys remains less than maxKeys @@ -66,7 +65,7 @@ func (e *BatchEvictionLRU) ShouldEvict(store *Store) int { } // Calculate target key count after eviction - targetKeyCount := int(math.Ceil(float64(e.maxKeys) * (1 - e.evictionRatio))) + targetKeyCount := int(math.Ceil(float64(e.maxKeys) * (1 - config.EvictionRatio))) // Calculate the number of keys to evict to reach the target key count toEvict := currentKeyCount - targetKeyCount @@ -78,7 +77,7 @@ func (e *BatchEvictionLRU) ShouldEvict(store *Store) int { } // EvictVictims deletes keys with the lowest LastAccessedAt values from the store. -func (e *BatchEvictionLRU) EvictVictims(store *Store, toEvict int) { +func (e *PrimitiveEvictionStrategy) EvictVictims(store *Store, toEvict int) { if toEvict <= 0 { return } @@ -111,6 +110,6 @@ func (e *BatchEvictionLRU) EvictVictims(store *Store, toEvict int) { e.stats.recordEviction(int64(toEvict)) } -func (e *BatchEvictionLRU) OnAccess(key string, obj *object.Obj, accessType AccessType) { +func (e *PrimitiveEvictionStrategy) OnAccess(key string, obj *object.Obj, accessType AccessType) { // Nothing to do for LRU batch eviction } diff --git a/internal/store/batchevictionlru_test.go b/internal/store/batchevictionlru_test.go index 9276f2554..b463d4272 100644 --- a/internal/store/batchevictionlru_test.go +++ b/internal/store/batchevictionlru_test.go @@ -14,7 +14,7 @@ import ( ) func TestEvictVictims_BelowMaxKeys(t *testing.T) { - eviction := NewBatchEvictionLRU(5, 0.2) + eviction := NewPrimitiveEvictionStrategy(5, 0.2) s := NewStore(nil, eviction) // Add 3 keys (below maxKeys of 5) @@ -36,8 +36,7 @@ func TestEvictVictims_BelowMaxKeys(t *testing.T) { func TestEvictVictims_ExceedsMaxKeys(t *testing.T) { maxKeys := 5 - evictionRatio := 0.4 - eviction := NewBatchEvictionLRU(maxKeys, evictionRatio) + eviction := NewPrimitiveEvictionStrategy(maxKeys) s := NewStore(nil, eviction) // Add 10 keys, exceeding maxKeys of 5 @@ -59,7 +58,7 @@ func TestEvictVictims_EvictsLRU(t *testing.T) { mockTime := &utils.MockClock{CurrTime: time.Now()} utils.CurrentTime = mockTime - eviction := NewBatchEvictionLRU(10, 0.4) + eviction := NewPrimitiveEvictionStrategy(10, 0.4) s := NewStore(nil, eviction) // Add keys with varying LastAccessedAt @@ -89,7 +88,7 @@ func TestEvictVictims_IdenticalLastAccessedAt(t *testing.T) { currentTime := time.Now() mockTime := &utils.MockClock{CurrTime: currentTime} utils.CurrentTime = mockTime - eviction := NewBatchEvictionLRU(10, 0.5) + eviction := NewPrimitiveEvictionStrategy(10, 0.5) s := NewStore(nil, eviction) // Add 10 keys with identical LastAccessedAt @@ -105,7 +104,7 @@ func TestEvictVictims_IdenticalLastAccessedAt(t *testing.T) { } func TestEvictVictims_EvictsAtLeastOne(t *testing.T) { - eviction := NewBatchEvictionLRU(10, 0.000) // 0% eviction rate + eviction := NewPrimitiveEvictionStrategy(10, 0.000) // 0% eviction rate s := NewStore(nil, eviction) // Add 10 keys (equals maxKeys) @@ -120,7 +119,7 @@ func TestEvictVictims_EvictsAtLeastOne(t *testing.T) { } func TestEvictVictims_EmptyStore(t *testing.T) { // Handles Empty Store Gracefully - eviction := NewBatchEvictionLRU(5, 0.2) + eviction := NewPrimitiveEvictionStrategy(5) s := NewStore(nil, eviction) toEvict := eviction.ShouldEvict(s) @@ -134,7 +133,7 @@ func TestEvictVictims_LastAccessedAtUpdated(t *testing.T) { currentTime := time.Now() mockTime := &utils.MockClock{CurrTime: currentTime} utils.CurrentTime = mockTime - eviction := NewBatchEvictionLRU(10, 0.4) + eviction := NewPrimitiveEvictionStrategy(10, 0.4) s := NewStore(nil, eviction) // Add keys with initial LastAccessedAt diff --git a/internal/store/store.go b/internal/store/store.go index 224a897a1..5fc659d94 100644 --- a/internal/store/store.go +++ b/internal/store/store.go @@ -6,8 +6,6 @@ package store import ( "path" - "github.com/dicedb/dice/config" - "github.com/dicedb/dice/internal/common" "github.com/dicedb/dice/internal/object" "github.com/dicedb/dice/internal/server/utils" @@ -34,10 +32,7 @@ func NewExpireMap() common.ITable[*object.Obj, uint64] { } func NewDefaultEviction() EvictionStrategy { - return &BatchEvictionLRU{ - maxKeys: config.DefaultKeysLimit, - evictionRatio: config.DefaultEvictionRatio, - } + return &PrimitiveEvictionStrategy{} } // QueryWatchEvent represents a change in a watched key. From 2932f9690a012bffc56d86be346f93a916085afd Mon Sep 17 00:00:00 2001 From: Arpit Date: Wed, 22 Jan 2025 11:32:51 +0000 Subject: [PATCH 08/12] Removing performance config and moving key elements to top-level --- config/config.go | 35 +++++++---------------- config/constants.go | 12 ++++++-- config/validator.go | 5 ++-- integration_tests/commands/resp/setup.go | 9 +++--- integration_tests/config/parser_test.go | 9 ------ integration_tests/server/max_conn_test.go | 8 +++--- internal/cli/cli.go | 17 ++++------- internal/commandhandler/commandhandler.go | 2 +- internal/commandhandler/registry.go | 5 ++-- internal/iomultiplexer/epoll_linux.go | 10 ++++--- internal/iomultiplexer/kqueue_darwin.go | 10 ++++--- internal/iothread/manager.go | 11 ++++--- internal/shard/shard_thread.go | 2 +- server/main.go | 14 ++++----- 14 files changed, 64 insertions(+), 85 deletions(-) diff --git a/config/config.go b/config/config.go index b1e96e9df..bf70c388e 100644 --- a/config/config.go +++ b/config/config.go @@ -34,13 +34,12 @@ var ( ) type Config struct { - Version string `config:"version" default:"0.1.0"` - InstanceID string `config:"instance_id"` - RespServer respServer `config:"async_server"` - HTTP http `config:"http"` - WebSocket websocket `config:"websocket"` - Performance performance `config:"performance"` - WAL WALConfig `config:"WAL"` + Version string `config:"version" default:"0.1.0"` + InstanceID string `config:"instance_id"` + RespServer respServer `config:"async_server"` + HTTP http `config:"http"` + WebSocket websocket `config:"websocket"` + WAL WALConfig `config:"WAL"` } type respServer struct { @@ -63,18 +62,6 @@ type websocket struct { WriteResponseTimeout time.Duration `config:"write_response_timeout" default:"10s"` } -type performance struct { - WatchChanBufSize int `config:"watch_chan_buf_size" default:"20000"` - ShardCronFrequency time.Duration `config:"shard_cron_frequency" default:"1s"` - MultiplexerPollTimeout time.Duration `config:"multiplexer_poll_timeout" default:"100ms"` - MaxClients uint32 `config:"max_clients" default:"20000" validate:"min=0"` - StoreMapInitSize int `config:"store_map_init_size" default:"1024000"` - AdhocReqChanBufSize int `config:"adhoc_req_chan_buf_size" default:"20"` - EnableProfiling bool `config:"profiling" default:"false"` - EnableWatch bool `config:"enable_watch" default:"false"` - NumShards int `config:"num_shards" default:"-1" validate:"oneof=-1|min=1,lte=128"` -} - type WALConfig struct { // Directory where WAL log files will be stored LogDir string `config:"log_dir" default:"tmp/dicedb-wal"` @@ -174,12 +161,6 @@ func MergeFlags(flags *Config) { DiceConfig.WebSocket.Enabled = flags.WebSocket.Enabled case "websocket-port": DiceConfig.WebSocket.Port = flags.WebSocket.Port - case "num-shards": - DiceConfig.Performance.NumShards = flags.Performance.NumShards - case "enable-watch": - DiceConfig.Performance.EnableWatch = flags.Performance.EnableWatch - case "enable-profiling": - DiceConfig.Performance.EnableProfiling = flags.Performance.EnableProfiling } }) } @@ -196,6 +177,10 @@ type DiceDBConfig struct { EnableWAL bool `mapstructure:"enable-wal" default:"true" description:"enable write-ahead logging"` WALEngine string `mapstructure:"wal-engine" default:"aof" description:"wal engine to use, values: sqlite, aof"` + + EnableWatch bool `mapstructure:"enable-watch" default:"false" description:"enable support for .WATCH commands and real-time reactivity"` + MaxClients int `mapstructure:"max-clients" default:"20000" description:"the maximum number of clients to accept"` + NumShards int `mapstructure:"num-shards" default:"-1" description:"number of shards to create. defaults to number of cores"` } var GlobalDiceDBConfig *DiceDBConfig diff --git a/config/constants.go b/config/constants.go index cd5589a8e..38de37d7e 100644 --- a/config/constants.go +++ b/config/constants.go @@ -1,7 +1,13 @@ package config +import "time" + const ( - IOBufferLength int = 512 - EvictionRatio float64 = 0.9 - DefaultKeysLimit int = 200000000 + IOBufferLength int = 512 + EvictionRatio float64 = 0.9 + DefaultKeysLimit int = 200000000 + WatchChanBufSize int = 20000 + ShardCronFrequency time.Duration = 1 * time.Second + AdhocReqChanBufSize int = 20 + EnableProfile bool = false ) diff --git a/config/validator.go b/config/validator.go index 34f0b46a6..740c850fe 100644 --- a/config/validator.go +++ b/config/validator.go @@ -44,9 +44,8 @@ func validateConfig(config *Config) error { } func validateShardCount(sl validator.StructLevel) { - config := sl.Current().Interface().(Config) - if config.Performance.NumShards <= 0 && config.Performance.NumShards != -1 { - sl.ReportError(config.Performance.NumShards, "NumShards", "NumShards", "invalidValue", "must be -1 or greater than 0") + if GlobalDiceDBConfig.NumShards <= 0 && GlobalDiceDBConfig.NumShards != -1 { + sl.ReportError(GlobalDiceDBConfig.NumShards, "NumShards", "NumShards", "invalidValue", "must be -1 or greater than 0") } } diff --git a/integration_tests/commands/resp/setup.go b/integration_tests/commands/resp/setup.go index 238c16c3b..1a3ec1bb5 100644 --- a/integration_tests/commands/resp/setup.go +++ b/integration_tests/commands/resp/setup.go @@ -33,8 +33,7 @@ import ( ) type TestServerOptions struct { - Port int - MaxClients int32 + Port int } func init() { @@ -191,12 +190,12 @@ func RunTestServer(wg *sync.WaitGroup, opt TestServerOptions) { config.DiceConfig.RespServer.Port = 9739 } - cmdWatchChan := make(chan dstore.CmdWatchEvent, config.DiceConfig.Performance.WatchChanBufSize) + cmdWatchChan := make(chan dstore.CmdWatchEvent, config.WatchChanBufSize) cmdWatchSubscriptionChan := make(chan watchmanager.WatchSubscription) gec := make(chan error) shardManager := shard.NewShardManager(1, cmdWatchChan, gec) - ioThreadManager := iothread.NewManager(config.DiceConfig.Performance.MaxClients) - cmdHandlerManager := commandhandler.NewRegistry(config.DiceConfig.Performance.MaxClients, shardManager) + ioThreadManager := iothread.NewManager() + cmdHandlerManager := commandhandler.NewRegistry(shardManager) // Initialize the RESP Server wl, _ := wal.NewNullWAL() diff --git a/integration_tests/config/parser_test.go b/integration_tests/config/parser_test.go index 4fbe53d2b..af0b35c0d 100644 --- a/integration_tests/config/parser_test.go +++ b/integration_tests/config/parser_test.go @@ -53,15 +53,6 @@ type websocket struct { } type performance struct { - WatchChanBufSize int `config:"watch_chan_buf_size" default:"20000"` - ShardCronFrequency time.Duration `config:"shard_cron_frequency" default:"1s"` - MultiplexerPollTimeout time.Duration `config:"multiplexer_poll_timeout" default:"100ms"` - MaxClients int32 `config:"max_clients" default:"20000" validate:"min=0"` - StoreMapInitSize int `config:"store_map_init_size" default:"1024000"` - AdhocReqChanBufSize int `config:"adhoc_req_chan_buf_size" default:"20"` - EnableProfiling bool `config:"profiling" default:"false"` - EnableWatch bool `config:"enable_watch" default:"false"` - NumShards int `config:"num_shards" default:"-1" validate:"oneof=-1|min=1,lte=128"` } type memory struct { diff --git a/integration_tests/server/max_conn_test.go b/integration_tests/server/max_conn_test.go index f868985e1..71e874777 100644 --- a/integration_tests/server/max_conn_test.go +++ b/integration_tests/server/max_conn_test.go @@ -26,15 +26,15 @@ func getConnection(port int) (net.Conn, error) { func TestMaxConnection(t *testing.T) { var wg sync.WaitGroup + var maxClients uint32 = 50 var maxConnTestOptions = commands.TestServerOptions{ - Port: 8741, - MaxClients: 50, + Port: 8741, } commands.RunTestServer(&wg, maxConnTestOptions) time.Sleep(2 * time.Second) - var maxConnLimit = maxConnTestOptions.MaxClients + 2 + var maxConnLimit = maxClients + 2 connections := make([]net.Conn, maxConnLimit) defer func() { // Ensure all connections are closed at the end of the test @@ -45,7 +45,7 @@ func TestMaxConnection(t *testing.T) { } }() - for i := int32(0); i < maxConnLimit; i++ { + for i := uint32(0); i < maxConnLimit; i++ { conn, err := getConnection(maxConnTestOptions.Port) if err == nil { connections[i] = conn diff --git a/internal/cli/cli.go b/internal/cli/cli.go index 923cbbce3..b0c86fead 100644 --- a/internal/cli/cli.go +++ b/internal/cli/cli.go @@ -39,16 +39,10 @@ func printConfiguration() { // Conditionally add the number of shards to be used for DiceDB numShards := runtime.NumCPU() - if config.DiceConfig.Performance.NumShards > 0 { - numShards = config.DiceConfig.Performance.NumShards + if config.GlobalDiceDBConfig.NumShards > 0 { + numShards = config.GlobalDiceDBConfig.NumShards } slog.Info("running with", slog.Int("shards", numShards)) - - // Add whether the watch feature is enabled - slog.Info("running with", slog.Bool("watch", config.DiceConfig.Performance.EnableWatch)) - - // Add whether the watch feature is enabled - slog.Info("running with", slog.Bool("profiling", config.DiceConfig.Performance.EnableProfiling)) } // printConfigTable prints key-value pairs in a vertical table format. @@ -85,10 +79,9 @@ func Execute() { flag.IntVar(&flagsConfig.WebSocket.Port, "websocket-port", 8379, "port for accepting requets over WebSocket") flag.BoolVar(&flagsConfig.WebSocket.Enabled, "enable-websocket", false, "enable DiceDB to listen, accept, and process WebSocket") - flag.IntVar(&flagsConfig.Performance.NumShards, "num-shards", -1, "number shards to create. defaults to number of cores") + flag.IntVar(&tempInt, "num-shards", -1, "number shards to create. defaults to number of cores") - flag.BoolVar(&flagsConfig.Performance.EnableWatch, "enable-watch", false, "enable support for .WATCH commands and real-time reactivity") - flag.BoolVar(&flagsConfig.Performance.EnableProfiling, "enable-profiling", false, "enable profiling and capture critical metrics and traces in .prof files") + flag.BoolVar(&tempBool, "enable-watch", false, "enable support for .WATCH commands and real-time reactivity") flag.StringVar(&tempStr, "log-level", "info", "log level, values: info, debug") flag.StringVar(&tempStr, "log-dir", "/tmp/dicedb", "log directory path") @@ -96,6 +89,8 @@ func Execute() { flag.BoolVar(&tempBool, "enable-persistence", false, "enable write-ahead logging") flag.BoolVar(&tempBool, "restore-wal", false, "restore the database from the WAL files") flag.StringVar(&tempStr, "wal-engine", "null", "wal engine to use, values: sqlite, aof") + flag.BoolVar(&tempBool, "enable-wal", false, "enable wal") + flag.IntVar(&tempInt, "max-clients", 200000, "max clients") flag.StringVar(&config.CustomConfigFilePath, "o", config.CustomConfigFilePath, "dir path to create the flagsConfig file") flag.StringVar(&config.CustomConfigDirPath, "c", config.CustomConfigDirPath, "file path of the config file") diff --git a/internal/commandhandler/commandhandler.go b/internal/commandhandler/commandhandler.go index 3c7821848..0475831ad 100644 --- a/internal/commandhandler/commandhandler.go +++ b/internal/commandhandler/commandhandler.go @@ -65,7 +65,7 @@ func NewCommandHandler(id string, responseChan, preprocessingChan chan *ops.Stor id: id, parser: parser, shardManager: shardManager, - adhocReqChan: make(chan *cmd.DiceDBCmd, config.DiceConfig.Performance.AdhocReqChanBufSize), + adhocReqChan: make(chan *cmd.DiceDBCmd, config.AdhocReqChanBufSize), Session: auth.NewSession(), globalErrorChan: gec, ioThreadReadChan: ioThreadReadChan, diff --git a/internal/commandhandler/registry.go b/internal/commandhandler/registry.go index 318e03edf..d03443c5d 100644 --- a/internal/commandhandler/registry.go +++ b/internal/commandhandler/registry.go @@ -8,6 +8,7 @@ import ( "sync" "sync/atomic" + "github.com/dicedb/dice/config" "github.com/dicedb/dice/internal/shard" ) @@ -26,9 +27,9 @@ var ( ErrCmdHandlerResponseChanNil = errors.New("command handler response channel is nil") ) -func NewRegistry(maxClients uint32, sm *shard.ShardManager) *Registry { +func NewRegistry(sm *shard.ShardManager) *Registry { return &Registry{ - maxCmdHandlers: maxClients, + maxCmdHandlers: uint32(config.GlobalDiceDBConfig.MaxClients), ShardManager: sm, } } diff --git a/internal/iomultiplexer/epoll_linux.go b/internal/iomultiplexer/epoll_linux.go index 69b2d4c93..60f49d986 100644 --- a/internal/iomultiplexer/epoll_linux.go +++ b/internal/iomultiplexer/epoll_linux.go @@ -7,6 +7,8 @@ import ( "fmt" "syscall" "time" + + "github.com/dicedb/dice/config" ) // Epoll implements the IOMultiplexer interface for Linux-based systems @@ -21,8 +23,8 @@ type Epoll struct { } // New creates a new Epoll instance -func New(maxClients int32) (*Epoll, error) { - if maxClients < 0 { +func New() (*Epoll, error) { + if config.GlobalDiceDBConfig.MaxClients == 0 { return nil, ErrInvalidMaxClients } @@ -33,8 +35,8 @@ func New(maxClients int32) (*Epoll, error) { return &Epoll{ fd: fd, - ePollEvents: make([]syscall.EpollEvent, maxClients), - diceEvents: make([]Event, maxClients), + ePollEvents: make([]syscall.EpollEvent, config.GlobalDiceDBConfig.MaxClients), + diceEvents: make([]Event, config.GlobalDiceDBConfig.MaxClients), }, nil } diff --git a/internal/iomultiplexer/kqueue_darwin.go b/internal/iomultiplexer/kqueue_darwin.go index de357f2b1..56189d924 100644 --- a/internal/iomultiplexer/kqueue_darwin.go +++ b/internal/iomultiplexer/kqueue_darwin.go @@ -7,6 +7,8 @@ import ( "fmt" "syscall" "time" + + "github.com/dicedb/dice/config" ) // KQueue implements the IOMultiplexer interface for Darwin-based systems @@ -21,8 +23,8 @@ type KQueue struct { } // New creates a new KQueue instance -func New(maxClients int32) (*KQueue, error) { - if maxClients < 0 { +func New() (*KQueue, error) { + if config.GlobalDiceDBConfig.MaxClients < 0 { return nil, ErrInvalidMaxClients } @@ -33,8 +35,8 @@ func New(maxClients int32) (*KQueue, error) { return &KQueue{ fd: fd, - kQEvents: make([]syscall.Kevent_t, maxClients), - diceEvents: make([]Event, maxClients), + kQEvents: make([]syscall.Kevent_t, config.GlobalDiceDBConfig.MaxClients), + diceEvents: make([]Event, config.GlobalDiceDBConfig.MaxClients), }, nil } diff --git a/internal/iothread/manager.go b/internal/iothread/manager.go index 3bbf19cbb..752cbd5dd 100644 --- a/internal/iothread/manager.go +++ b/internal/iothread/manager.go @@ -7,12 +7,13 @@ import ( "errors" "sync" "sync/atomic" + + "github.com/dicedb/dice/config" ) type Manager struct { connectedClients sync.Map numIOThreads atomic.Uint32 - maxClients uint32 mu sync.Mutex } @@ -21,17 +22,15 @@ var ( ErrIOThreadNotFound = errors.New("io-thread not found") ) -func NewManager(maxClients uint32) *Manager { - return &Manager{ - maxClients: maxClients, - } +func NewManager() *Manager { + return &Manager{} } func (m *Manager) RegisterIOThread(ioThread IOThread) error { m.mu.Lock() defer m.mu.Unlock() - if m.IOThreadCount() >= m.maxClients { + if m.IOThreadCount() >= uint32(config.GlobalDiceDBConfig.MaxClients) { return ErrMaxClientsReached } diff --git a/internal/shard/shard_thread.go b/internal/shard/shard_thread.go index b928758b5..54fef62eb 100644 --- a/internal/shard/shard_thread.go +++ b/internal/shard/shard_thread.go @@ -56,7 +56,7 @@ func NewShardThread(id ShardID, gec chan error, sec chan *ShardError, globalErrorChan: gec, shardErrorChan: sec, lastCronExecTime: utils.GetCurrentTime(), - cronFrequency: config.DiceConfig.Performance.ShardCronFrequency, + cronFrequency: config.ShardCronFrequency, } } diff --git a/server/main.go b/server/main.go index db4a88b1c..367bbaab4 100644 --- a/server/main.go +++ b/server/main.go @@ -101,8 +101,8 @@ func Start() { } } - if config.DiceConfig.Performance.EnableWatch { - bufSize := config.DiceConfig.Performance.WatchChanBufSize + if config.GlobalDiceDBConfig.EnableWatch { + bufSize := config.WatchChanBufSize cmdWatchChan = make(chan dstore.CmdWatchEvent, bufSize) } @@ -112,8 +112,8 @@ func Start() { // core count ensures the application can make full use of all available hardware. var numShards int numShards = runtime.NumCPU() - if config.DiceConfig.Performance.NumShards > 0 { - numShards = config.DiceConfig.Performance.NumShards + if config.GlobalDiceDBConfig.NumShards > 0 { + numShards = config.GlobalDiceDBConfig.NumShards } // The runtime.GOMAXPROCS(numShards) call limits the number of operating system @@ -135,7 +135,7 @@ func Start() { var serverWg sync.WaitGroup - if config.DiceConfig.Performance.EnableProfiling { + if config.EnableProfile { stopProfiling, err := startProfiling() if err != nil { slog.Error("Profiling could not be started", slog.Any("error", err)) @@ -143,8 +143,8 @@ func Start() { } defer stopProfiling() } - ioThreadManager := iothread.NewManager(config.DiceConfig.Performance.MaxClients) - cmdHandlerManager := commandhandler.NewRegistry(config.DiceConfig.Performance.MaxClients, shardManager) + ioThreadManager := iothread.NewManager() + cmdHandlerManager := commandhandler.NewRegistry(shardManager) respServer := resp.NewServer(shardManager, ioThreadManager, cmdHandlerManager, cmdWatchSubscriptionChan, cmdWatchChan, serverErrCh, wl) serverWg.Add(1) From 76f0fe0ce1fcd3b98b937d3b2cbd4ece8570ddd7 Mon Sep 17 00:00:00 2001 From: Arpit Date: Wed, 22 Jan 2025 15:29:06 +0000 Subject: [PATCH 09/12] Removing configuration for HTTP and WebSocket --- config/config.go | 36 ------------------- config/constants.go | 16 +++++---- integration_tests/commands/http/setup.go | 2 -- integration_tests/commands/websocket/setup.go | 1 - integration_tests/config/parser_test.go | 7 ++-- internal/cli/cli.go | 20 ++--------- internal/server/httpws/httpServer.go | 3 +- internal/server/httpws/websocketServer.go | 12 +++---- server/main.go | 6 ++-- 9 files changed, 24 insertions(+), 79 deletions(-) diff --git a/config/config.go b/config/config.go index bf70c388e..eb922cb95 100644 --- a/config/config.go +++ b/config/config.go @@ -4,7 +4,6 @@ package config import ( - "flag" "fmt" "log/slog" "os" @@ -37,8 +36,6 @@ type Config struct { Version string `config:"version" default:"0.1.0"` InstanceID string `config:"instance_id"` RespServer respServer `config:"async_server"` - HTTP http `config:"http"` - WebSocket websocket `config:"websocket"` WAL WALConfig `config:"WAL"` } @@ -50,18 +47,6 @@ type respServer struct { MaxConn int32 `config:"max_conn" default:"0"` } -type http struct { - Enabled bool `config:"enabled" default:"true"` - Port int `config:"port" default:"8082" validate:"number,gte=0,lte=65535"` -} - -type websocket struct { - Enabled bool `config:"enabled" default:"true"` - Port int `config:"port" default:"8379" validate:"number,gte=0,lte=65535"` - MaxWriteResponseRetries int `config:"max_write_response_retries" default:"3" validate:"min=0"` - WriteResponseTimeout time.Duration `config:"write_response_timeout" default:"10s"` -} - type WALConfig struct { // Directory where WAL log files will be stored LogDir string `config:"log_dir" default:"tmp/dicedb-wal"` @@ -144,27 +129,6 @@ func loadDiceConfig(configFilePath string) error { return parser.Loadconfig(DiceConfig) } -func MergeFlags(flags *Config) { - flagset := flag.CommandLine - flagset.Visit(func(f *flag.Flag) { - // updating values for flags that were explicitly set by the user - switch f.Name { - case "host": - DiceConfig.RespServer.Addr = flags.RespServer.Addr - case "port": - DiceConfig.RespServer.Port = flags.RespServer.Port - case "enable-http": - DiceConfig.HTTP.Enabled = flags.HTTP.Enabled - case "http-port": - DiceConfig.HTTP.Port = flags.HTTP.Port - case "enable-websocket": - DiceConfig.WebSocket.Enabled = flags.WebSocket.Enabled - case "websocket-port": - DiceConfig.WebSocket.Port = flags.WebSocket.Port - } - }) -} - type DiceDBConfig struct { Host string `mapstructure:"host" default:"0.0.0.0" description:"the host address to bind to"` Port int `mapstructure:"port" default:"7379" description:"the port to bind to"` diff --git a/config/constants.go b/config/constants.go index 38de37d7e..9bd97b6cc 100644 --- a/config/constants.go +++ b/config/constants.go @@ -3,11 +3,13 @@ package config import "time" const ( - IOBufferLength int = 512 - EvictionRatio float64 = 0.9 - DefaultKeysLimit int = 200000000 - WatchChanBufSize int = 20000 - ShardCronFrequency time.Duration = 1 * time.Second - AdhocReqChanBufSize int = 20 - EnableProfile bool = false + IOBufferLength int = 512 + EvictionRatio float64 = 0.9 + DefaultKeysLimit int = 200000000 + WatchChanBufSize int = 20000 + ShardCronFrequency time.Duration = 1 * time.Second + AdhocReqChanBufSize int = 20 + EnableProfile bool = false + WebSocketWriteResponseTimeout time.Duration = 10 * time.Second + WebSocketMaxWriteResponseRetries int = 3 ) diff --git a/integration_tests/commands/http/setup.go b/integration_tests/commands/http/setup.go index d79e18598..acee191ff 100644 --- a/integration_tests/commands/http/setup.go +++ b/integration_tests/commands/http/setup.go @@ -113,11 +113,9 @@ func RunHTTPServer(ctx context.Context, wg *sync.WaitGroup, opt TestServerOption globalErrChannel := make(chan error) shardManager := shard.NewShardManager(1, nil, globalErrChannel) - config.DiceConfig.HTTP.Port = opt.Port // Initialize the HTTPServer testServer := httpws.NewHTTPServer(shardManager, nil) // Inform the user that the server is starting - fmt.Println("Starting the test server on port", config.DiceConfig.HTTP.Port) shardManagerCtx, cancelShardManager := context.WithCancel(ctx) wg.Add(1) go func() { diff --git a/integration_tests/commands/websocket/setup.go b/integration_tests/commands/websocket/setup.go index 4d55b15ff..082c74b72 100644 --- a/integration_tests/commands/websocket/setup.go +++ b/integration_tests/commands/websocket/setup.go @@ -113,7 +113,6 @@ func RunWebsocketServer(ctx context.Context, wg *sync.WaitGroup, opt TestServerO // Initialize WebsocketServer globalErrChannel := make(chan error) shardManager := shard.NewShardManager(1, nil, globalErrChannel) - config.DiceConfig.WebSocket.Port = opt.Port testServer := httpws.NewWebSocketServer(shardManager, testPort1, nil) shardManagerCtx, cancelShardManager := context.WithCancel(ctx) diff --git a/integration_tests/config/parser_test.go b/integration_tests/config/parser_test.go index af0b35c0d..9a44caf7b 100644 --- a/integration_tests/config/parser_test.go +++ b/integration_tests/config/parser_test.go @@ -7,7 +7,6 @@ import ( "os" "path/filepath" "testing" - "time" "github.com/dicedb/dice/config" ) @@ -46,10 +45,8 @@ type http struct { } type websocket struct { - Enabled bool `config:"enabled" default:"true"` - Port int `config:"port" default:"8379" validate:"min=1024,max=65535"` - MaxWriteResponseRetries int `config:"max_write_response_retries" default:"3" validate:"min=0"` - WriteResponseTimeout time.Duration `config:"write_response_timeout" default:"10s"` + Enabled bool `config:"enabled" default:"true"` + Port int `config:"port" default:"8379" validate:"min=1024,max=65535"` } type performance struct { diff --git a/internal/cli/cli.go b/internal/cli/cli.go index b0c86fead..f82197200 100644 --- a/internal/cli/cli.go +++ b/internal/cli/cli.go @@ -25,15 +25,6 @@ func printConfiguration() { // Add the port number on which DiceDB is running slog.Info("running with", slog.Int("port", config.DiceConfig.RespServer.Port)) - // HTTP and WebSocket server configuration - if config.DiceConfig.HTTP.Enabled { - slog.Info("running with", slog.Int("http-port", config.DiceConfig.HTTP.Port)) - } - - if config.DiceConfig.WebSocket.Enabled { - slog.Info("running with", slog.Int("websocket-port", config.DiceConfig.WebSocket.Port)) - } - // Add the number of CPU cores available on the machine slog.Info("running with", slog.Int("cores", runtime.NumCPU())) @@ -73,11 +64,7 @@ func Execute() { flag.IntVar(&flagsConfig.RespServer.Port, "port", 7379, "port for the DiceDB server") - flag.IntVar(&flagsConfig.HTTP.Port, "http-port", 8082, "port for accepting requets over HTTP") - flag.BoolVar(&flagsConfig.HTTP.Enabled, "enable-http", false, "enable DiceDB to listen, accept, and process HTTP") - - flag.IntVar(&flagsConfig.WebSocket.Port, "websocket-port", 8379, "port for accepting requets over WebSocket") - flag.BoolVar(&flagsConfig.WebSocket.Enabled, "enable-websocket", false, "enable DiceDB to listen, accept, and process WebSocket") + flag.BoolVar(&tempBool, "enable-websocket", false, "enable DiceDB to listen, accept, and process WebSocket") flag.IntVar(&tempInt, "num-shards", -1, "number shards to create. defaults to number of cores") @@ -137,14 +124,13 @@ func Execute() { } flag.Parse() - defaultConfig(&flagsConfig) + defaultConfig() } -func defaultConfig(flags *config.Config) { +func defaultConfig() { if err := config.CreateConfigFile(filepath.Join(config.DefaultConfigDir, config.DefaultConfigName)); err != nil { log.Fatal(err) } - config.MergeFlags(flags) render() } diff --git a/internal/server/httpws/httpServer.go b/internal/server/httpws/httpServer.go index 6361d4b47..273a52059 100644 --- a/internal/server/httpws/httpServer.go +++ b/internal/server/httpws/httpServer.go @@ -21,7 +21,6 @@ import ( "github.com/dicedb/dice/internal/server/abstractserver" "github.com/dicedb/dice/internal/wal" - "github.com/dicedb/dice/config" "github.com/dicedb/dice/internal/clientio" "github.com/dicedb/dice/internal/cmd" "github.com/dicedb/dice/internal/comm" @@ -70,7 +69,7 @@ func NewHTTPServer(shardManager *shard.ShardManager, wl wal.AbstractWAL) *HTTPSe mux := http.NewServeMux() caseInsensitiveMux := &CaseInsensitiveMux{mux: mux} srv := &http.Server{ - Addr: fmt.Sprintf(":%d", config.DiceConfig.HTTP.Port), + Addr: fmt.Sprintf(":%d", 7381), Handler: caseInsensitiveMux, ReadHeaderTimeout: 5 * time.Second, } diff --git a/internal/server/httpws/websocketServer.go b/internal/server/httpws/websocketServer.go index 1f62716fe..110f65f7c 100644 --- a/internal/server/httpws/websocketServer.go +++ b/internal/server/httpws/websocketServer.go @@ -135,7 +135,7 @@ func (s *WebsocketServer) WebsocketHandler(w http.ResponseWriter, r *http.Reques conn.Close() }() - maxRetries := config.DiceConfig.WebSocket.MaxWriteResponseRetries + maxRetries := config.WebSocketMaxWriteResponseRetries for { // read incoming message _, msg, err := conn.ReadMessage() @@ -224,7 +224,7 @@ func (s *WebsocketServer) processQwatchUpdates(clientIdentifierID uint32, conn * func (s *WebsocketServer) processQwatchResponse(conn *websocket.Conn, response interface{}) error { var result interface{} var err error - maxRetries := config.DiceConfig.WebSocket.MaxWriteResponseRetries + maxRetries := config.WebSocketMaxWriteResponseRetries // check response type switch resp := response.(type) { @@ -270,7 +270,7 @@ func (s *WebsocketServer) processQwatchResponse(conn *websocket.Conn, response i // success // Write response with retries for transient errors - if err := WriteResponseWithRetries(conn, respBytes, config.DiceConfig.WebSocket.MaxWriteResponseRetries); err != nil { + if err := WriteResponseWithRetries(conn, respBytes, config.WebSocketMaxWriteResponseRetries); err != nil { slog.Debug(fmt.Sprintf("Error writing message: %v", err)) return fmt.Errorf("error writing response: %v", err) } @@ -280,7 +280,7 @@ func (s *WebsocketServer) processQwatchResponse(conn *websocket.Conn, response i func (s *WebsocketServer) processResponse(conn *websocket.Conn, diceDBCmd *cmd.DiceDBCmd, response *ops.StoreResponse) error { var err error - maxRetries := config.DiceConfig.WebSocket.MaxWriteResponseRetries + maxRetries := config.WebSocketMaxWriteResponseRetries var responseValue interface{} // Check if the command is migrated, if it is we use EvalResponse values @@ -319,7 +319,7 @@ func (s *WebsocketServer) processResponse(conn *websocket.Conn, diceDBCmd *cmd.D // success // Write response with retries for transient errors - if err := WriteResponseWithRetries(conn, respBytes, config.DiceConfig.WebSocket.MaxWriteResponseRetries); err != nil { + if err := WriteResponseWithRetries(conn, respBytes, config.WebSocketMaxWriteResponseRetries); err != nil { slog.Debug(fmt.Sprintf("Error writing message: %v", err)) return fmt.Errorf("error writing response: %v", err) } @@ -330,7 +330,7 @@ func (s *WebsocketServer) processResponse(conn *websocket.Conn, diceDBCmd *cmd.D func WriteResponseWithRetries(conn *websocket.Conn, text []byte, maxRetries int) error { for attempts := 0; attempts < maxRetries; attempts++ { // Set a write deadline - if err := conn.SetWriteDeadline(time.Now().Add(config.DiceConfig.WebSocket.WriteResponseTimeout)); err != nil { + if err := conn.SetWriteDeadline(time.Now().Add(config.WebSocketWriteResponseTimeout)); err != nil { slog.Error(fmt.Sprintf("Error setting write deadline: %v", err)) return err } diff --git a/server/main.go b/server/main.go index 367bbaab4..786e6cbd0 100644 --- a/server/main.go +++ b/server/main.go @@ -150,14 +150,14 @@ func Start() { serverWg.Add(1) go runServer(ctx, &serverWg, respServer, serverErrCh) - if config.DiceConfig.HTTP.Enabled { + if false { httpServer := httpws.NewHTTPServer(shardManager, wl) serverWg.Add(1) go runServer(ctx, &serverWg, httpServer, serverErrCh) } - if config.DiceConfig.WebSocket.Enabled { - websocketServer := httpws.NewWebSocketServer(shardManager, config.DiceConfig.WebSocket.Port, wl) + if false { + websocketServer := httpws.NewWebSocketServer(shardManager, 7380, wl) serverWg.Add(1) go runServer(ctx, &serverWg, websocketServer, serverErrCh) } From 7c104e40ae693c58fb8da5d3e9fbfd7483c3c11d Mon Sep 17 00:00:00 2001 From: Arpit Date: Wed, 22 Jan 2025 16:53:28 +0000 Subject: [PATCH 10/12] WAL configuration moved top-level --- config/config.go | 142 ++------- config/constants.go | 3 + config/parser.go | 246 --------------- config/validator.go | 133 -------- integration_tests/commands/http/setup.go | 8 - .../commands/resp/abort/server_abort_test.go | 16 +- integration_tests/commands/resp/hello_test.go | 2 +- integration_tests/commands/resp/setup.go | 18 +- integration_tests/commands/websocket/setup.go | 9 - integration_tests/config/config_test.go | 102 ------- integration_tests/config/parser_test.go | 286 ------------------ integration_tests/server/max_conn_test.go | 65 ---- integration_tests/server/server_abort_test.go | 17 +- integration_tests/server/setup.go | 17 -- internal/auth/session_test.go | 8 +- internal/cli/cli.go | 136 --------- .../clientio/iohandler/netconn/netconn.go | 13 +- internal/clientio/resp_test.go | 9 - internal/commandhandler/cmd_custom.go | 2 +- internal/eval/eval.go | 2 +- internal/eval/eval_test.go | 2 +- internal/observability/ping.go | 66 ---- internal/server/resp/server.go | 4 +- internal/wal/wal_aof.go | 22 +- server/main.go | 38 ++- 25 files changed, 95 insertions(+), 1271 deletions(-) delete mode 100644 config/parser.go delete mode 100644 config/validator.go delete mode 100644 integration_tests/config/config_test.go delete mode 100644 integration_tests/config/parser_test.go delete mode 100644 integration_tests/server/max_conn_test.go delete mode 100644 integration_tests/server/setup.go delete mode 100644 internal/cli/cli.go delete mode 100644 internal/observability/ping.go diff --git a/config/config.go b/config/config.go index eb922cb95..2d6a1fe07 100644 --- a/config/config.go +++ b/config/config.go @@ -4,147 +4,41 @@ package config import ( - "fmt" - "log/slog" - "os" - "path/filepath" - "time" - - "github.com/dicedb/dice/internal/server/utils" "github.com/spf13/pflag" "github.com/spf13/viper" ) const ( - DiceDBVersion = "0.1.0" - DefaultConfigName = "dicedb.conf" - DefaultConfigDir = "." - - EvictSimpleFirst = "simple-first" - EvictAllKeysRandom = "allkeys-random" - EvictAllKeysLRU = "allkeys-lru" - EvictAllKeysLFU = "allkeys-lfu" - EvictBatchKeysLRU = "batch_keys_lru" -) - -var ( - CustomConfigFilePath = utils.EmptyStr - CustomConfigDirPath = utils.EmptyStr + DiceDBVersion = "0.1.0" ) -type Config struct { - Version string `config:"version" default:"0.1.0"` - InstanceID string `config:"instance_id"` - RespServer respServer `config:"async_server"` - WAL WALConfig `config:"WAL"` -} - -type respServer struct { - Addr string `config:"addr" default:"0.0.0.0" validate:"ipv4"` - Port int `config:"port" default:"7379" validate:"number,gte=0,lte=65535"` - KeepAlive int32 `config:"keepalive" default:"300"` - Timeout int32 `config:"timeout" default:"300"` - MaxConn int32 `config:"max_conn" default:"0"` -} - -type WALConfig struct { - // Directory where WAL log files will be stored - LogDir string `config:"log_dir" default:"tmp/dicedb-wal"` - // WAL buffering mode: 'buffered' (writes buffered in memory) or 'unbuffered' (immediate disk writes) - WalMode string `config:"wal_mode" default:"buffered" validate:"oneof=buffered unbuffered"` - // Write mode: 'default' (OS handles syncing) or 'fsync' (explicit fsync after writes) - WriteMode string `config:"write_mode" default:"default" validate:"oneof=default fsync"` - // Size of the write buffer in megabytes - BufferSizeMB int `config:"buffer_size_mb" default:"1" validate:"min=1"` - // How WAL rotation is triggered: 'segment-size' (based on file size) or 'time' (based on duration) - RotationMode string `config:"rotation_mode" default:"segemnt-size" validate:"oneof=segment-size time"` - // Maximum size of a WAL segment file in megabytes before rotation - MaxSegmentSizeMB int `config:"max_segment_size_mb" default:"16" validate:"min=1"` - // Time interval in seconds after which WAL segment is rotated when using time-based rotation - MaxSegmentRotationTime time.Duration `config:"max_segment_rotation_time" default:"60s" validate:"min=1s"` - // Time interval in Milliseconds after which buffered WAL data is synced to disk - BufferSyncInterval time.Duration `config:"buffer_sync_interval" default:"200ms" validate:"min=1ms"` - // How old segments are removed: 'num-segments' (keep N latest), 'time' (by age), or 'checkpoint' (after checkpoint) - RetentionMode string `config:"retention_mode" default:"num-segments" validate:"oneof=num-segments time checkpoint"` - // Maximum number of WAL segment files to retain when using num-segments retention - MaxSegmentCount int `config:"max_segment_count" default:"10" validate:"min=1"` - // Time interval in Seconds till which WAL segments are retained when using time-based retention - MaxSegmentRetentionDuration time.Duration `config:"max_segment_retention_duration" default:"600s" validate:"min=1s"` - // How to handle WAL corruption on recovery: 'strict' (fail), 'truncate' (truncate at corruption), 'ignore' (skip corrupted) - RecoveryMode string `config:"recovery_mode" default:"strict" validate:"oneof=strict truncate ignore"` -} - -// DiceConfig is the global configuration object for dice -var DiceConfig = &Config{} - -func CreateConfigFile(configFilePath string) error { - // Check if the config file already exists - if _, err := os.Stat(configFilePath); err == nil { - if err := loadDiceConfig(configFilePath); err != nil { - return fmt.Errorf("failed to load existing configuration: %w", err) - } - return nil - } - - // Attempt to write a new config file - if err := writeConfigFile(configFilePath); err != nil { - slog.Warn("Failed to create config file, starting with defaults.", slog.Any("error", err)) - return nil // Continuing with defaults; may reconsider behavior. - } - - // Load the new configuration - if err := loadDiceConfig(configFilePath); err != nil { - return fmt.Errorf("failed to load newly created configuration: %w", err) - } - - slog.Info("Config file successfully created.", slog.String("path", configFilePath)) - return nil -} - -// writeConfigFile writes the default configuration to the specified file path -func writeConfigFile(configFilePath string) error { - // Check if the directory exists or not - dir := filepath.Dir(configFilePath) - if _, err := os.Stat(dir); err != nil { - return err - } - - slog.Info("creating default config file at", slog.Any("path", configFilePath)) - file, err := os.Create(configFilePath) - if err != nil { - return err - } - defer file.Close() - - return nil -} - -func loadDiceConfig(configFilePath string) error { - parser := NewConfigParser() - if err := parser.ParseFromFile(configFilePath); err != nil { - slog.Warn("Failed to parse config file", slog.String("error", err.Error()), slog.String("message", "Loading default configurations")) - return parser.ParseDefaults(DiceConfig) - } - - return parser.Loadconfig(DiceConfig) -} - type DiceDBConfig struct { - Host string `mapstructure:"host" default:"0.0.0.0" description:"the host address to bind to"` - Port int `mapstructure:"port" default:"7379" description:"the port to bind to"` - EnableHTTP bool `mapstructure:"enable-http" default:"false" description:"enable http server"` + Host string `mapstructure:"host" default:"0.0.0.0" description:"the host address to bind to"` + Port int `mapstructure:"port" default:"7379" description:"the port to bind to"` Username string `mapstructure:"username" default:"dicedb" description:"the username to use for authentication"` Password string `mapstructure:"password" default:"" description:"the password to use for authentication"` LogLevel string `mapstructure:"log-level" default:"info" description:"the log level"` - EnableWAL bool `mapstructure:"enable-wal" default:"true" description:"enable write-ahead logging"` - WALEngine string `mapstructure:"wal-engine" default:"aof" description:"wal engine to use, values: sqlite, aof"` - EnableWatch bool `mapstructure:"enable-watch" default:"false" description:"enable support for .WATCH commands and real-time reactivity"` MaxClients int `mapstructure:"max-clients" default:"20000" description:"the maximum number of clients to accept"` NumShards int `mapstructure:"num-shards" default:"-1" description:"number of shards to create. defaults to number of cores"` + + EnableWAL bool `mapstructure:"enable-wal" default:"true" description:"enable write-ahead logging"` + WALEngine string `mapstructure:"wal-engine" default:"aof" description:"wal engine to use, values: sqlite, aof"` + WALDir string `mapstructure:"wal-dir" default:"/var/log/dicedb" description:"the directory to store WAL segments"` + WALMode string `mapstructure:"wal-mode" default:"buffered" description:"wal mode to use, values: buffered, unbuffered"` + WALWriteMode string `mapstructure:"wal-write-mode" default:"default" description:"wal file write mode to use, values: default, fsync"` + WALBufferSizeMB int `mapstructure:"wal-buffer-size-mb" default:"1" description:"the size of the wal write buffer in megabytes"` + WALRotationMode string `mapstructure:"wal-rotation-mode" default:"segment-size" description:"wal rotation mode to use, values: segment-size, time"` + WALMaxSegmentSizeMB int `mapstructure:"wal-max-segment-size-mb" default:"16" description:"the maximum size of a wal segment file in megabytes before rotation"` + WALMaxSegmentRotationTimeSec int `mapstructure:"wal-max-segment-rotation-time-sec" default:"60" description:"the time interval (in seconds) after which wal a segment is rotated"` + WALBufferSyncIntervalMillis int `mapstructure:"wal-buffer-sync-interval-ms" default:"200" description:"the interval (in milliseconds) at which the wal write buffer is synced to disk"` + WALRetentionMode string `mapstructure:"wal-retention-mode" default:"num-segments" description:"the new horizon for wal segment post cleanup. values: num-segments, time, checkpoint"` + WALMaxSegmentCount int `mapstructure:"wal-max-segment-count" default:"10" description:"the maximum number of segments to retain, if the retention mode is 'num-segments'"` + WALMaxSegmentRetentionDurationSec int `mapstructure:"wal-max-segment-retention-duration-sec" default:"600" description:"the maximum duration (in seconds) for wal segments retention"` + WALRecoveryMode string `mapstructure:"wal-recovery-mode" default:"strict" description:"wal recovery mode in case of a corruption, values: strict, truncate, ignore"` } var GlobalDiceDBConfig *DiceDBConfig diff --git a/config/constants.go b/config/constants.go index 9bd97b6cc..4ee9013bf 100644 --- a/config/constants.go +++ b/config/constants.go @@ -12,4 +12,7 @@ const ( EnableProfile bool = false WebSocketWriteResponseTimeout time.Duration = 10 * time.Second WebSocketMaxWriteResponseRetries int = 3 + + KeepAlive int32 = 300 + Timeout int32 = 300 ) diff --git a/config/parser.go b/config/parser.go deleted file mode 100644 index be1c46617..000000000 --- a/config/parser.go +++ /dev/null @@ -1,246 +0,0 @@ -// Copyright (c) 2022-present, DiceDB contributors -// All rights reserved. Licensed under the BSD 3-Clause License. See LICENSE file in the project root for full license information. - -package config - -import ( - "bufio" - "fmt" - "log/slog" - "os" - "reflect" - "strconv" - "strings" - "time" -) - -// ConfigParser handles the parsing of configuration files -type ConfigParser struct { - // store holds the raw key-value pairs from the config file - store map[string]string -} - -// NewConfigParser creates a new instance of ConfigParser -func NewConfigParser() *ConfigParser { - return &ConfigParser{ - store: make(map[string]string), - } -} - -// ParseFromFile reads the configuration data from a file -func (p *ConfigParser) ParseFromFile(filename string) error { - file, err := os.Open(filename) - if err != nil { - return fmt.Errorf("error opening config file: %w", err) - } - defer file.Close() - - scanner := bufio.NewScanner(file) - return processConfigData(scanner, p) -} - -// ParseFromStdin reads the configuration data from stdin -func (p *ConfigParser) ParseFromStdin() error { - scanner := bufio.NewScanner(os.Stdin) - return processConfigData(scanner, p) -} - -// ParseDefaults populates a struct with default values based on struct tag `default` -func (p *ConfigParser) ParseDefaults(cfg interface{}) error { - val := reflect.ValueOf(cfg) - if val.Kind() != reflect.Ptr || val.IsNil() { - return fmt.Errorf("config must be a non-nil pointer to a struct") - } - - val = val.Elem() - if val.Kind() != reflect.Struct { - return fmt.Errorf("config must be a pointer to a struct") - } - - return p.unmarshalStruct(val, "") -} - -// Loadconfig populates a struct with configuration values based on struct tags -func (p *ConfigParser) Loadconfig(cfg interface{}) error { - val := reflect.ValueOf(cfg) - if val.Kind() != reflect.Ptr || val.IsNil() { - return fmt.Errorf("config must be a non-nil pointer to a struct") - } - - val = val.Elem() - if val.Kind() != reflect.Struct { - return fmt.Errorf("config must be a pointer to a struct") - } - - if err := p.unmarshalStruct(val, ""); err != nil { - return fmt.Errorf("failed to unmarshal config: %w", err) - } - - if err := validateConfig(DiceConfig); err != nil { - return fmt.Errorf("failed to validate config: %w", err) - } - - return nil -} - -// processConfigData reads the configuration data line by line and stores it in the ConfigParser -func processConfigData(scanner *bufio.Scanner, p *ConfigParser) error { - for scanner.Scan() { - line := strings.TrimSpace(scanner.Text()) - if line == "" || strings.HasPrefix(line, "#") { - continue - } - - parts := strings.SplitN(line, "=", 2) - if len(parts) != 2 { - slog.Warn("invalid config line", slog.String("line", line)) - continue - } - - key := strings.TrimSpace(parts[0]) - value := strings.Trim(strings.TrimSpace(parts[1]), "\"") - p.store[key] = value - } - - return scanner.Err() -} - -// unmarshalStruct handles the recursive struct parsing. -func (p *ConfigParser) unmarshalStruct(val reflect.Value, prefix string) error { - typ := val.Type() - - for i := 0; i < val.NumField(); i++ { - field := val.Field(i) - fieldType := typ.Field(i) - - // Skip unexported fields just like how encoding/json does - if !field.CanSet() { - continue - } - - // Get config key or field name - key := fieldType.Tag.Get("config") - - // Use field name as key if not specified in tag - if key == "" { - key = strings.ToLower(fieldType.Name) - } - - // Skip fields with "-" tag - if key == "-" { - continue - } - - // Apply nested struct's tag as prefix - fullKey := key - if prefix != "" { - fullKey = fmt.Sprintf("%s.%s", prefix, key) - } - - // Recursively process nested structs with their prefix - if field.Kind() == reflect.Struct { - if err := p.unmarshalStruct(field, fullKey); err != nil { - return err - } - continue - } - - // Fetch and set value for non-struct fields - value, exists := p.store[fullKey] - if !exists { - // Use default value from tag if available - if defaultValue := fieldType.Tag.Get("default"); defaultValue != "" { - value = defaultValue - } else { - continue - } - } - - if err := setField(field, value); err != nil { - return fmt.Errorf("error setting field %s: %w", fullKey, err) - } - } - - return nil -} - -// setField sets the appropriate field value based on its type -func setField(field reflect.Value, value string) error { - switch field.Kind() { - case reflect.String: - field.SetString(value) - - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - if field.Type() == reflect.TypeOf(time.Duration(0)) { - // Handle time.Duration type - duration, err := parseDuration(value) - if err != nil { - return fmt.Errorf("failed to parse duration: %w", err) - } - field.Set(reflect.ValueOf(duration)) - } else { - // Handle other integer types - val, err := strconv.ParseInt(value, 10, 64) - if err != nil { - return fmt.Errorf("failed to parse integer: %w", err) - } - field.SetInt(val) - } - - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: - val, err := strconv.ParseUint(value, 10, 64) - if err != nil { - return fmt.Errorf("failed to parse unsigned integer: %w", err) - } - field.SetUint(val) - - case reflect.Float32, reflect.Float64: - val, err := strconv.ParseFloat(value, 64) - if err != nil { - return fmt.Errorf("failed to parse float: %w", err) - } - field.SetFloat(val) - - case reflect.Bool: - val, err := strconv.ParseBool(value) - if err != nil { - return fmt.Errorf("failed to parse boolean: %w", err) - } - field.SetBool(val) - - case reflect.Slice: - // Handle slices of basic types - elemType := field.Type().Elem() - values := strings.Split(value, ",") - slice := reflect.MakeSlice(field.Type(), len(values), len(values)) - for i, v := range values { - elem := slice.Index(i) - elemVal := reflect.New(elemType).Elem() - if err := setField(elemVal, strings.TrimSpace(v)); err != nil { - return fmt.Errorf("failed to parse slice element at index %d: %w", i, err) - } - elem.Set(elemVal) - } - field.Set(slice) - - default: - return fmt.Errorf("unsupported type: %s", field.Type()) - } - - return nil -} - -// parseDuration handles parsing of time.Duration with proper validation. -func parseDuration(value string) (time.Duration, error) { - if value == "" { - return 0, fmt.Errorf("duration string is empty") - } - duration, err := time.ParseDuration(value) - if err != nil { - return 0, fmt.Errorf("invalid duration format: %s", value) - } - if duration <= 0 { - return 0, fmt.Errorf("duration must be positive, got: %s", value) - } - return duration, nil -} diff --git a/config/validator.go b/config/validator.go deleted file mode 100644 index 740c850fe..000000000 --- a/config/validator.go +++ /dev/null @@ -1,133 +0,0 @@ -// Copyright (c) 2022-present, DiceDB contributors -// All rights reserved. Licensed under the BSD 3-Clause License. See LICENSE file in the project root for full license information. - -package config - -import ( - "fmt" - "log" - "reflect" - "strings" - - "github.com/go-playground/validator/v10" -) - -func validateConfig(config *Config) error { - validate := validator.New() - validate.RegisterStructValidation(validateShardCount, Config{}) - validate.RegisterStructValidation(validateWALConfig, Config{}) - - if err := validate.Struct(config); err != nil { - validationErrors, ok := err.(validator.ValidationErrors) - if !ok { - return fmt.Errorf("unexpected validation error type: %v", err) - } - - processedFields := make(map[string]struct{}) - - for _, validationErr := range validationErrors { - fieldName := strings.TrimPrefix(validationErr.Namespace(), "Config.") - - if _, ok := processedFields[fieldName]; ok { - continue - } - processedFields[fieldName] = struct{}{} - - log.Printf("Field %s failed validation: %s", fieldName, validationErr.Tag()) - - if err := applyDefaultValuesFromTags(config, fieldName); err != nil { - return fmt.Errorf("error setting default for %s: %v", fieldName, err) - } - } - } - return nil -} - -func validateShardCount(sl validator.StructLevel) { - if GlobalDiceDBConfig.NumShards <= 0 && GlobalDiceDBConfig.NumShards != -1 { - sl.ReportError(GlobalDiceDBConfig.NumShards, "NumShards", "NumShards", "invalidValue", "must be -1 or greater than 0") - } -} - -func applyDefaultValuesFromTags(config *Config, fieldName string) error { - configType := reflect.TypeOf(config).Elem() - configValue := reflect.ValueOf(config).Elem() - - // Split the field name if it refers to a nested struct - parts := strings.Split(fieldName, ".") - var field reflect.StructField - var fieldValue reflect.Value - var found bool - - // Traverse the struct to find the nested field - for i, part := range parts { - // If it's the first field, just look in the top-level struct - if i == 0 { - field, found = configType.FieldByName(part) - if !found { - log.Printf("Warning: %s field not found", part) - return fmt.Errorf("field %s not found in config struct", part) - } - fieldValue = configValue.FieldByName(part) - } else { - // Otherwise, the struct is nested, so navigate into it - if fieldValue.Kind() == reflect.Struct { - field, found = fieldValue.Type().FieldByName(part) - if !found { - log.Printf("Warning: %s field not found in %s", part, fieldValue.Type()) - return fmt.Errorf("field %s not found in struct %s", part, fieldValue.Type()) - } - fieldValue = fieldValue.FieldByName(part) - } else { - log.Printf("Warning: %s is not a struct", fieldName) - return fmt.Errorf("%s is not a struct", fieldName) - } - } - } - - defaultValue := field.Tag.Get("default") - if defaultValue == "" { - log.Printf("Warning: %s field has no default value to set, leaving empty string", fieldName) - return nil - } - - if err := setField(fieldValue, defaultValue); err != nil { - return fmt.Errorf("error setting default value for %s: %v", fieldName, err) - } - - log.Printf("Setting default value for %s to: %s", fieldName, defaultValue) - return nil -} - -func validateWALConfig(sl validator.StructLevel) { - config := sl.Current().Interface().(Config) - - // LogDir validation - if config.WAL.LogDir == "" { - sl.ReportError(config.WAL.LogDir, "LogDir", "LogDir", "required", "cannot be empty") - } - - // MaxSegmentSize validation - if config.WAL.MaxSegmentSizeMB <= 0 { - sl.ReportError(config.WAL.MaxSegmentSizeMB, "MaxSegmentSize", "MaxSegmentSize", "gt", "must be greater than 0") - } - - // MaxSegmentCount validation - if config.WAL.MaxSegmentCount <= 0 { - sl.ReportError(config.WAL.MaxSegmentCount, "MaxSegmentCount", "MaxSegmentCount", "gt", "must be greater than 0") - } - - // BufferSize validation - if config.WAL.BufferSizeMB <= 0 { - sl.ReportError(config.WAL.BufferSizeMB, "BufferSize", "BufferSize", "gt", "must be greater than 0") - } - - // WALMode and WriteMode compatibility checks - if config.WAL.WalMode == "buffered" && config.WAL.WriteMode == "fsync" { - sl.ReportError(config.WAL.WalMode, "WALMode", "WALMode", "incompatible", "walMode 'buffered' cannot be used with writeMode 'fsync'") - } - - if config.WAL.WalMode == "unbuffered" && config.WAL.WriteMode == "default" { - sl.ReportError(config.WAL.WalMode, "WALMode", "WALMode", "incompatible", "walMode 'unbuffered' cannot have writeMode as 'default'") - } -} diff --git a/integration_tests/commands/http/setup.go b/integration_tests/commands/http/setup.go index acee191ff..52b082f55 100644 --- a/integration_tests/commands/http/setup.go +++ b/integration_tests/commands/http/setup.go @@ -17,7 +17,6 @@ import ( "github.com/dicedb/dice/internal/server/httpws" - "github.com/dicedb/dice/config" derrors "github.com/dicedb/dice/internal/errors" "github.com/dicedb/dice/internal/shard" ) @@ -36,13 +35,6 @@ type HTTPCommandExecutor struct { baseURL string } -func init() { - parser := config.NewConfigParser() - if err := parser.ParseDefaults(config.DiceConfig); err != nil { - log.Fatalf("failed to load configuration: %v", err) - } -} - func NewHTTPCommandExecutor() *HTTPCommandExecutor { return &HTTPCommandExecutor{ baseURL: "http://localhost:8083", diff --git a/integration_tests/commands/resp/abort/server_abort_test.go b/integration_tests/commands/resp/abort/server_abort_test.go index 69fbb6f62..75da2aeb3 100644 --- a/integration_tests/commands/resp/abort/server_abort_test.go +++ b/integration_tests/commands/resp/abort/server_abort_test.go @@ -22,8 +22,8 @@ var testServerOptions = resp.TestServerOptions{ } func init() { - config.DiceConfig.RespServer.Port = testServerOptions.Port - log.Print("Setting port to ", config.DiceConfig.RespServer.Port) + config.GlobalDiceDBConfig.Port = testServerOptions.Port + log.Print("Setting port to ", config.GlobalDiceDBConfig.Port) } func TestAbortCommand(t *testing.T) { @@ -38,7 +38,7 @@ func TestAbortCommand(t *testing.T) { // Test 1: Ensure the server is running t.Run("ServerIsRunning", func(t *testing.T) { - conn, err := net.Dial("tcp", fmt.Sprintf("127.0.0.1:%d", config.DiceConfig.RespServer.Port)) + conn, err := net.Dial("tcp", fmt.Sprintf("127.0.0.1:%d", config.GlobalDiceDBConfig.Port)) if err != nil { t.Fatalf("Failed to connect to server: %v", err) } @@ -47,7 +47,7 @@ func TestAbortCommand(t *testing.T) { //Test 2: Send ABORT command and check if the server shuts down t.Run("AbortCommandShutdown", func(t *testing.T) { - conn, err := net.Dial("tcp", fmt.Sprintf("127.0.0.1:%d", config.DiceConfig.RespServer.Port)) + conn, err := net.Dial("tcp", fmt.Sprintf("127.0.0.1:%d", config.GlobalDiceDBConfig.Port)) if err != nil { t.Fatalf("Failed to connect to server: %v", err) } @@ -63,7 +63,7 @@ func TestAbortCommand(t *testing.T) { time.Sleep(1 * time.Second) // Try to connect again, it should fail - _, err = net.Dial("tcp", fmt.Sprintf("127.0.0.1:%d", config.DiceConfig.RespServer.Port)) + _, err = net.Dial("tcp", fmt.Sprintf("127.0.0.1:%d", config.GlobalDiceDBConfig.Port)) if err == nil { t.Fatal("Server did not shut down as expected") } @@ -72,7 +72,7 @@ func TestAbortCommand(t *testing.T) { // Test 3: Ensure the server port is released t.Run("PortIsReleased", func(t *testing.T) { // Try to bind to the same port - listener, err := net.Listen("tcp", fmt.Sprintf("127.0.0.1:%d", config.DiceConfig.RespServer.Port)) + listener, err := net.Listen("tcp", fmt.Sprintf("127.0.0.1:%d", config.GlobalDiceDBConfig.Port)) if err != nil { t.Fatalf("Port should be available after server shutdown: %v", err) } @@ -93,7 +93,7 @@ func TestServerRestartAfterAbort(t *testing.T) { time.Sleep(1 * time.Second) - conn, err := net.Dial("tcp", fmt.Sprintf("127.0.0.1:%d", config.DiceConfig.RespServer.Port)) + conn, err := net.Dial("tcp", fmt.Sprintf("127.0.0.1:%d", config.GlobalDiceDBConfig.Port)) if err != nil { t.Fatalf("Server should be running after restart: %v", err) } @@ -124,7 +124,7 @@ func TestServerRestartAfterAbort(t *testing.T) { time.Sleep(2 * time.Second) // Check if the server is running - conn2, err := net.Dial("tcp", fmt.Sprintf("127.0.0.1:%d", config.DiceConfig.RespServer.Port)) + conn2, err := net.Dial("tcp", fmt.Sprintf("127.0.0.1:%d", config.GlobalDiceDBConfig.Port)) if err != nil { t.Fatalf("Server should be running after restart: %v", err) } diff --git a/integration_tests/commands/resp/hello_test.go b/integration_tests/commands/resp/hello_test.go index 662619d90..8bbc8aca1 100644 --- a/integration_tests/commands/resp/hello_test.go +++ b/integration_tests/commands/resp/hello_test.go @@ -17,7 +17,7 @@ func TestHello(t *testing.T) { expected := []interface{}{ "proto", int64(2), - "id", fmt.Sprintf("%s:%d", config.DiceConfig.RespServer.Addr, config.DiceConfig.RespServer.Port), + "id", fmt.Sprintf("%s:%d", config.GlobalDiceDBConfig.Host, config.GlobalDiceDBConfig.Port), "mode", "standalone", "role", "master", "modules", []interface{}{}, diff --git a/integration_tests/commands/resp/setup.go b/integration_tests/commands/resp/setup.go index 1a3ec1bb5..cb9531d93 100644 --- a/integration_tests/commands/resp/setup.go +++ b/integration_tests/commands/resp/setup.go @@ -8,7 +8,6 @@ import ( "errors" "fmt" "io" - "log" "log/slog" "net" "os" @@ -36,16 +35,9 @@ type TestServerOptions struct { Port int } -func init() { - parser := config.NewConfigParser() - if err := parser.ParseDefaults(config.DiceConfig); err != nil { - log.Fatalf("failed to load configuration: %v", err) - } -} - //nolint:unused func getLocalConnection() net.Conn { - conn, err := net.Dial("tcp", fmt.Sprintf("localhost:%d", config.DiceConfig.RespServer.Port)) + conn, err := net.Dial("tcp", fmt.Sprintf("localhost:%d", config.GlobalDiceDBConfig.Port)) if err != nil { panic(err) } @@ -101,7 +93,7 @@ func deleteTestKeys(keysToDelete []string, store *dstore.Store) { //nolint:unused func getLocalSdk() *dicedb.Client { return dicedb.NewClient(&dicedb.Options{ - Addr: fmt.Sprintf(":%d", config.DiceConfig.RespServer.Port), + Addr: fmt.Sprintf(":%d", config.GlobalDiceDBConfig.Port), DialTimeout: 10 * time.Second, ReadTimeout: 30 * time.Second, @@ -185,9 +177,9 @@ func fireCommandAndGetRESPParser(conn net.Conn, cmd string) *clientio.RESPParser func RunTestServer(wg *sync.WaitGroup, opt TestServerOptions) { // #1261: Added here to prevent resp integration tests from failing on lower-spec machines if opt.Port != 0 { - config.DiceConfig.RespServer.Port = opt.Port + config.GlobalDiceDBConfig.Port = opt.Port } else { - config.DiceConfig.RespServer.Port = 9739 + config.GlobalDiceDBConfig.Port = 9739 } cmdWatchChan := make(chan dstore.CmdWatchEvent, config.WatchChanBufSize) @@ -202,7 +194,7 @@ func RunTestServer(wg *sync.WaitGroup, opt TestServerOptions) { testServer := resp.NewServer(shardManager, ioThreadManager, cmdHandlerManager, cmdWatchSubscriptionChan, cmdWatchChan, gec, wl) ctx, cancel := context.WithCancel(context.Background()) - fmt.Println("Starting the test server on port", config.DiceConfig.RespServer.Port) + fmt.Println("Starting the test server on port", config.GlobalDiceDBConfig.Port) shardManagerCtx, cancelShardManager := context.WithCancel(ctx) wg.Add(1) diff --git a/integration_tests/commands/websocket/setup.go b/integration_tests/commands/websocket/setup.go index 082c74b72..a9653c9a9 100644 --- a/integration_tests/commands/websocket/setup.go +++ b/integration_tests/commands/websocket/setup.go @@ -8,7 +8,6 @@ import ( "encoding/json" "errors" "fmt" - "log" "log/slog" "net/http" "sync" @@ -16,7 +15,6 @@ import ( "github.com/dicedb/dice/internal/server/httpws" - "github.com/dicedb/dice/config" derrors "github.com/dicedb/dice/internal/errors" "github.com/dicedb/dice/internal/shard" "github.com/gorilla/websocket" @@ -43,13 +41,6 @@ type WebsocketCommandExecutor struct { upgrader websocket.Upgrader } -func init() { - parser := config.NewConfigParser() - if err := parser.ParseDefaults(config.DiceConfig); err != nil { - log.Fatalf("failed to load configuration: %v", err) - } -} - func NewWebsocketCommandExecutor() *WebsocketCommandExecutor { return &WebsocketCommandExecutor{ baseURL: URL, diff --git a/integration_tests/config/config_test.go b/integration_tests/config/config_test.go deleted file mode 100644 index e787ccc68..000000000 --- a/integration_tests/config/config_test.go +++ /dev/null @@ -1,102 +0,0 @@ -// Copyright (c) 2022-present, DiceDB contributors -// All rights reserved. Licensed under the BSD 3-Clause License. See LICENSE file in the project root for full license information. - -package config_test - -import ( - "os" - "path/filepath" - "testing" - - "github.com/dicedb/dice/config" -) - -const configFileName = "dicedb.conf" - -// TestCreateConfigFile_FileExists tests the scenario when config file already exists -func TestCreateConfigFile_FileExists(t *testing.T) { - tempDir := t.TempDir() - configPath := filepath.Join(tempDir, configFileName) - - if err := os.WriteFile(configPath, []byte("test config"), 0644); err != nil { - t.Fatalf("Failed to create test config file: %v", err) - } - - config.CreateConfigFile(configPath) - - content, err := os.ReadFile(configPath) - if err != nil { - t.Fatalf("Failed to read config file: %v", err) - } - - if string(content) != "test config" { - t.Error("Config file content was modified when it should have been preserved") - } -} - -// TestCreateConfigFile_NewFile tests creating a new config file -func TestCreateConfigFile_NewFile(t *testing.T) { - tempDir := t.TempDir() - configPath := filepath.Join(tempDir, configFileName) - config.CreateConfigFile(configPath) - - if _, err := os.Stat(configPath); os.IsNotExist(err) { - t.Error("Config file was not created") - } - - content, err := os.ReadFile(configPath) - if err != nil { - t.Fatalf("Failed to read created config file: %v", err) - } - - if len(content) == 0 { - t.Error("Created config file is empty") - } -} - -// TestCreateConfigFile_InvalidPath tests creation with an invalid file path -func TestCreateConfigFile_InvalidPath(t *testing.T) { - configPath := "/nonexistent/directory/dicedb.conf" - config.CreateConfigFile(configPath) - - if _, err := os.Stat(configPath); !os.IsNotExist(err) { - t.Error("Config file should not have been created at invalid path") - } -} - -// TestCreateConfigFile_NoPermission tests creation without write permissions -func TestCreateConfigFile_NoPermission(t *testing.T) { - if os.Getuid() == 0 { - t.Skip("Skipping test when running as root") - } - - tempDir := t.TempDir() - err := os.Chmod(tempDir, 0555) // read + execute only - if err != nil { - t.Fatalf("Failed to change directory permissions: %v", err) - } - defer os.Chmod(tempDir, 0755) // restore permissions - - configPath := filepath.Join(tempDir, configFileName) - config.CreateConfigFile(configPath) - - if _, err := os.Stat(configPath); !os.IsNotExist(err) { - t.Error("Config file should not have been created without permissions") - } -} - -// TestCreateConfigFile_ExistingDirectory tests creation in existing directory -func TestCreateConfigFile_ExistingDirectory(t *testing.T) { - tempDir := t.TempDir() - configDir := filepath.Join(tempDir, "config") - if err := os.MkdirAll(configDir, 0755); err != nil { - t.Fatalf("Failed to create config directory: %v", err) - } - - configPath := filepath.Join(configDir, configFileName) - config.CreateConfigFile(configPath) - - if _, err := os.Stat(configPath); os.IsNotExist(err) { - t.Error("Config file was not created in existing directory") - } -} diff --git a/integration_tests/config/parser_test.go b/integration_tests/config/parser_test.go deleted file mode 100644 index 9a44caf7b..000000000 --- a/integration_tests/config/parser_test.go +++ /dev/null @@ -1,286 +0,0 @@ -// Copyright (c) 2022-present, DiceDB contributors -// All rights reserved. Licensed under the BSD 3-Clause License. See LICENSE file in the project root for full license information. - -package config_test - -import ( - "os" - "path/filepath" - "testing" - - "github.com/dicedb/dice/config" -) - -// TestConfig is a test struct that mimics your actual config structure -type TestConfig struct { - Version string `config:"version" default:"0.1.0"` - InstanceID string `config:"instance_id"` - Auth auth `config:"auth"` - AsyncServer asyncServer `config:"async_server"` - HTTP http `config:"http"` - WebSocket websocket `config:"websocket"` - Performance performance `config:"performance"` - Memory memory `config:"memory"` - Persistence persistence `config:"persistence"` - Logging logging `config:"logging"` - Network network `config:"network"` -} - -type auth struct { - UserName string `config:"username" default:"dice"` - Password string `config:"password"` -} - -type asyncServer struct { - Addr string `config:"addr" default:"0.0.0.0"` - Port int `config:"port" default:"7379" validate:"min=1024,max=65535"` - KeepAlive int32 `config:"keepalive" default:"300"` - Timeout int32 `config:"timeout" default:"300"` - MaxConn int32 `config:"max_conn" default:"0"` -} - -type http struct { - Enabled bool `config:"enabled" default:"true"` - Port int `config:"port" default:"8082" validate:"min=1024,max=65535"` -} - -type websocket struct { - Enabled bool `config:"enabled" default:"true"` - Port int `config:"port" default:"8379" validate:"min=1024,max=65535"` -} - -type performance struct { -} - -type memory struct { -} - -type persistence struct { - PersistenceEnabled bool `config:"persistence_enabled" default:"true"` - WALDir string `config:"wal-dir" default:"./" validate:"dirpath"` -} - -type logging struct { - LogLevel string `config:"log_level" default:"info" validate:"oneof=debug info warn error"` -} - -func TestNewConfigParser(t *testing.T) { - parser := config.NewConfigParser() - if parser == nil { - t.Fatal("NewConfigParser returned nil") - } -} - -func TestParseFromFile(t *testing.T) { - tests := []struct { - name string - content string - wantErr bool - setupErr bool - }{ - { - name: "valid config", - content: `host=testhost -port=9090 -log_level=debug`, - wantErr: false, - }, - { - name: "empty file", - content: "", - wantErr: false, - }, - { - name: "malformed config", - content: `host=testhost -invalid-line -port=9090`, - wantErr: false, - }, - { - name: "non-existent file", - setupErr: true, - wantErr: true, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - parser := config.NewConfigParser() - - // Create temporary config file - tempDir := t.TempDir() - filename := filepath.Join(tempDir, "dicedb.conf") - - if !tt.setupErr { - err := os.WriteFile(filename, []byte(tt.content), 0644) - if err != nil { - t.Fatalf("Failed to create test file: %v", err) - } - } - - err := parser.ParseFromFile(filename) - if (err != nil) != tt.wantErr { - t.Errorf("ParseFromFile() error = %v, wantErr %v", err, tt.wantErr) - } - }) - } -} - -func TestParseFromStdin(t *testing.T) { - tests := []struct { - name string - input string - wantErr bool - }{ - { - name: "valid input", - input: `host=testhost -port=9090 -log_level=debug`, - wantErr: false, - }, - { - name: "empty input", - input: "", - wantErr: false, - }, - { - name: "malformed input", - input: `host=testhost -invalid-line -port=9090`, - wantErr: false, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - parser := config.NewConfigParser() - - // Store original stdin - oldStdin := os.Stdin - defer func() { os.Stdin = oldStdin }() - - // Create a pipe and pass the test input - r, w, err := os.Pipe() - if err != nil { - t.Fatalf("Failed to create pipe: %v", err) - } - os.Stdin = r - - go func() { - defer w.Close() - w.Write([]byte(tt.input)) - }() - - err = parser.ParseFromStdin() - if (err != nil) != tt.wantErr { - t.Errorf("ParseFromStdin() error = %v, wantErr %v", err, tt.wantErr) - } - }) - } -} - -func TestParseDefaults(t *testing.T) { - tests := []struct { - name string - cfg interface{} - wantErr bool - }{ - { - name: "valid struct", - cfg: &TestConfig{}, - wantErr: false, - }, - { - name: "nil pointer", - cfg: nil, - wantErr: true, - }, - { - name: "non-pointer", - cfg: TestConfig{}, - wantErr: true, - }, - { - name: "pointer to non-struct", - cfg: new(string), - wantErr: true, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - parser := config.NewConfigParser() - err := parser.ParseDefaults(tt.cfg) - - if (err != nil) != tt.wantErr { - t.Errorf("ParseDefaults() error = %v, wantErr %v", err, tt.wantErr) - } - - if !tt.wantErr && tt.cfg != nil { - cfg := tt.cfg.(*TestConfig) - if cfg.AsyncServer.Addr != "0.0.0.0" || cfg.AsyncServer.Port != 7379 || cfg.Logging.LogLevel != "info" { - t.Error("Default values were not properly set") - } - } - }) - } -} - -// TestLoadconfig tests the Loadconfig method -func TestLoadconfig(t *testing.T) { - tests := []struct { - name string - cfg interface{} - content string - wantErr bool - }{ - { - name: "nil pointer", - cfg: nil, - content: "", - wantErr: true, - }, - { - name: "non-pointer", - cfg: TestConfig{}, - content: "", - wantErr: true, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - parser := config.NewConfigParser() - - // Create and populate config file if content is provided - if tt.content != "" { - tempDir := t.TempDir() - filename := filepath.Join(tempDir, "dicedb.conf") - err := os.WriteFile(filename, []byte(tt.content), 0644) - if err != nil { - t.Fatalf("Failed to create test file: %v", err) - } - - err = parser.ParseFromFile(filename) - if err != nil { - t.Fatalf("Failed to parse test file: %v", err) - } - } - - err := parser.Loadconfig(tt.cfg) - if (err != nil) != tt.wantErr { - t.Errorf("Loadconfig() error = %v, wantErr %v", err, tt.wantErr) - } - - if !tt.wantErr && tt.cfg != nil { - cfg := tt.cfg.(*TestConfig) - if tt.content != "" && (cfg.AsyncServer.Addr != "customhost" || cfg.AsyncServer.Port != 9090 || cfg.Logging.LogLevel != "debug") { - t.Error("Config values were not properly loaded") - } - } - }) - } -} diff --git a/integration_tests/server/max_conn_test.go b/integration_tests/server/max_conn_test.go deleted file mode 100644 index 71e874777..000000000 --- a/integration_tests/server/max_conn_test.go +++ /dev/null @@ -1,65 +0,0 @@ -// Copyright (c) 2022-present, DiceDB contributors -// All rights reserved. Licensed under the BSD 3-Clause License. See LICENSE file in the project root for full license information. - -package server - -import ( - "fmt" - "log/slog" - "net" - "sync" - "testing" - "time" - - "github.com/stretchr/testify/assert" - - commands "github.com/dicedb/dice/integration_tests/commands/resp" -) - -func getConnection(port int) (net.Conn, error) { - conn, err := net.Dial("tcp", fmt.Sprintf("localhost:%d", port)) - if err != nil { - return nil, err - } - return conn, nil -} - -func TestMaxConnection(t *testing.T) { - var wg sync.WaitGroup - var maxClients uint32 = 50 - var maxConnTestOptions = commands.TestServerOptions{ - Port: 8741, - } - commands.RunTestServer(&wg, maxConnTestOptions) - - time.Sleep(2 * time.Second) - - var maxConnLimit = maxClients + 2 - connections := make([]net.Conn, maxConnLimit) - defer func() { - // Ensure all connections are closed at the end of the test - for _, conn := range connections { - if conn != nil { - conn.Close() - } - } - }() - - for i := uint32(0); i < maxConnLimit; i++ { - conn, err := getConnection(maxConnTestOptions.Port) - if err == nil { - connections[i] = conn - } else { - t.Fatalf("unexpected error while getting connection %d: %v", i, err) - } - } - assert.Equal(t, maxConnLimit, int32(len(connections)), "should have reached the max connection limit") - - result := commands.FireCommand(connections[0], "ABORT") - if result != "OK" { - t.Fatalf("Unexpected response to ABORT command: %v", result) - } else { - slog.Info("Closed server for max_conn_test") - } - wg.Wait() -} diff --git a/integration_tests/server/server_abort_test.go b/integration_tests/server/server_abort_test.go index d4738832f..9da6b45ba 100644 --- a/integration_tests/server/server_abort_test.go +++ b/integration_tests/server/server_abort_test.go @@ -21,11 +21,6 @@ var testServerOptions = commands.TestServerOptions{ Port: 8740, } -func init() { - parser := config.NewConfigParser() - parser.ParseDefaults(config.DiceConfig) -} - func TestAbortCommand(t *testing.T) { _, cancel := context.WithCancel(context.Background()) defer cancel() @@ -39,7 +34,7 @@ func TestAbortCommand(t *testing.T) { // Test 1: Ensure the server is running t.Run("ServerIsRunning", func(t *testing.T) { - conn, err := net.Dial("tcp", fmt.Sprintf("127.0.0.1:%d", config.DiceConfig.RespServer.Port)) + conn, err := net.Dial("tcp", fmt.Sprintf("127.0.0.1:%d", config.GlobalDiceDBConfig.Port)) if err != nil { t.Fatalf("Failed to connect to server: %v", err) } @@ -48,7 +43,7 @@ func TestAbortCommand(t *testing.T) { //Test 2: Send ABORT command and check if the server shuts down t.Run("AbortCommandShutdown", func(t *testing.T) { - conn, err := net.Dial("tcp", fmt.Sprintf("127.0.0.1:%d", config.DiceConfig.RespServer.Port)) + conn, err := net.Dial("tcp", fmt.Sprintf("127.0.0.1:%d", config.GlobalDiceDBConfig.Port)) if err != nil { t.Fatalf("Failed to connect to server: %v", err) } @@ -64,7 +59,7 @@ func TestAbortCommand(t *testing.T) { time.Sleep(1 * time.Second) // Try to connect again, it should fail - _, err = net.Dial("tcp", fmt.Sprintf("127.0.0.1:%d", config.DiceConfig.RespServer.Port)) + _, err = net.Dial("tcp", fmt.Sprintf("127.0.0.1:%d", config.GlobalDiceDBConfig.Port)) if err == nil { t.Fatal("Server did not shut down as expected") } @@ -73,7 +68,7 @@ func TestAbortCommand(t *testing.T) { // Test 3: Ensure the server port is released t.Run("PortIsReleased", func(t *testing.T) { // Try to bind to the same port - listener, err := net.Listen("tcp", fmt.Sprintf("127.0.0.1:%d", config.DiceConfig.RespServer.Port)) + listener, err := net.Listen("tcp", fmt.Sprintf("127.0.0.1:%d", config.GlobalDiceDBConfig.Port)) if err != nil { t.Fatalf("Port should be available after server shutdown: %v", err) } @@ -93,7 +88,7 @@ func TestServerRestartAfterAbort(t *testing.T) { time.Sleep(1 * time.Second) - conn, err := net.Dial("tcp", fmt.Sprintf("127.0.0.1:%d", config.DiceConfig.RespServer.Port)) + conn, err := net.Dial("tcp", fmt.Sprintf("127.0.0.1:%d", config.GlobalDiceDBConfig.Port)) if err != nil { t.Fatalf("Server should be running at start: %v", err) } @@ -125,7 +120,7 @@ func TestServerRestartAfterAbort(t *testing.T) { time.Sleep(2 * time.Second) // Check if the server is running - conn2, err := net.Dial("tcp", fmt.Sprintf("127.0.0.1:%d", config.DiceConfig.RespServer.Port)) + conn2, err := net.Dial("tcp", fmt.Sprintf("127.0.0.1:%d", config.GlobalDiceDBConfig.Port)) if err != nil { t.Fatalf("Server should be running after restart: %v", err) } diff --git a/integration_tests/server/setup.go b/integration_tests/server/setup.go deleted file mode 100644 index e80eb3473..000000000 --- a/integration_tests/server/setup.go +++ /dev/null @@ -1,17 +0,0 @@ -// Copyright (c) 2022-present, DiceDB contributors -// All rights reserved. Licensed under the BSD 3-Clause License. See LICENSE file in the project root for full license information. - -package server - -import ( - "log" - - "github.com/dicedb/dice/config" -) - -func init() { - parser := config.NewConfigParser() - if err := parser.ParseDefaults(config.DiceConfig); err != nil { - log.Fatalf("failed to load configuration: %v", err) - } -} diff --git a/internal/auth/session_test.go b/internal/auth/session_test.go index 8309e25a4..d90a9efa4 100644 --- a/internal/auth/session_test.go +++ b/internal/auth/session_test.go @@ -86,7 +86,7 @@ func TestSessionIsActive(t *testing.T) { mockTime := &utils.MockClock{CurrTime: time.Now()} utils.CurrentTime = mockTime - config.DiceConfig.Auth.Password = "testpassword" + config.GlobalDiceDBConfig.Password = "testpassword" session := NewSession() if session.IsActive() { t.Error("New session should not be active") @@ -104,12 +104,12 @@ func TestSessionIsActive(t *testing.T) { if !session.LastAccessedAt.After(oldLastAccessed) { t.Error("IsActive() should update LastAccessedAt") } - config.DiceConfig.Auth.Password = utils.EmptyStr + config.GlobalDiceDBConfig.Password = utils.EmptyStr } func TestSessionActivate(t *testing.T) { session := NewSession() - user := &User{Username: config.DiceConfig.Auth.UserName} + user := &User{Username: config.GlobalDiceDBConfig.Username} session.Activate(user) @@ -122,7 +122,7 @@ func TestSessionActivate(t *testing.T) { } func TestSessionValidate(t *testing.T) { - username := config.DiceConfig.Auth.UserName + username := config.GlobalDiceDBConfig.Username password := "testpassword" user, _ := UserStore.Add(username) diff --git a/internal/cli/cli.go b/internal/cli/cli.go deleted file mode 100644 index f82197200..000000000 --- a/internal/cli/cli.go +++ /dev/null @@ -1,136 +0,0 @@ -// Copyright (c) 2022-present, DiceDB contributors -// All rights reserved. Licensed under the BSD 3-Clause License. See LICENSE file in the project root for full license information. - -package cli - -import ( - "flag" - "fmt" - "log" - "log/slog" - "os" - "path/filepath" - "runtime" - - "github.com/dicedb/dice/config" - "github.com/fatih/color" -) - -// configuration function used to add configuration values to the print table at the startup. -// add entry to this function to add a new row in the startup configuration table. -func printConfiguration() { - // Add the version of the DiceDB - slog.Info("starting DiceDB", slog.String("version", config.DiceDBVersion)) - - // Add the port number on which DiceDB is running - slog.Info("running with", slog.Int("port", config.DiceConfig.RespServer.Port)) - - // Add the number of CPU cores available on the machine - slog.Info("running with", slog.Int("cores", runtime.NumCPU())) - - // Conditionally add the number of shards to be used for DiceDB - numShards := runtime.NumCPU() - if config.GlobalDiceDBConfig.NumShards > 0 { - numShards = config.GlobalDiceDBConfig.NumShards - } - slog.Info("running with", slog.Int("shards", numShards)) -} - -// printConfigTable prints key-value pairs in a vertical table format. -func render() { - fmt.Print(` - ██████╗ ██╗ ██████╗███████╗██████╗ ██████╗ - ██╔══██╗██║██╔════╝██╔════╝██╔══██╗██╔══██╗ - ██║ ██║██║██║ █████╗ ██║ ██║██████╔╝ - ██║ ██║██║██║ ██╔══╝ ██║ ██║██╔══██╗ - ██████╔╝██║╚██████╗███████╗██████╔╝██████╔╝ - ╚═════╝ ╚═╝ ╚═════╝╚══════╝╚═════╝ ╚═════╝ - -`) - printConfiguration() -} - -func Execute() { - flagsConfig := config.Config{} - var tempStr string - var tempBool bool - var tempFloat float64 - var tempInt int - - flag.StringVar(&tempStr, "username", "dicedb", "deleted") - flag.StringVar(&tempStr, "password", "dicedb", "deleted") - - flag.StringVar(&flagsConfig.RespServer.Addr, "host", "0.0.0.0", "host for the DiceDB server") - - flag.IntVar(&flagsConfig.RespServer.Port, "port", 7379, "port for the DiceDB server") - - flag.BoolVar(&tempBool, "enable-websocket", false, "enable DiceDB to listen, accept, and process WebSocket") - - flag.IntVar(&tempInt, "num-shards", -1, "number shards to create. defaults to number of cores") - - flag.BoolVar(&tempBool, "enable-watch", false, "enable support for .WATCH commands and real-time reactivity") - - flag.StringVar(&tempStr, "log-level", "info", "log level, values: info, debug") - flag.StringVar(&tempStr, "log-dir", "/tmp/dicedb", "log directory path") - - flag.BoolVar(&tempBool, "enable-persistence", false, "enable write-ahead logging") - flag.BoolVar(&tempBool, "restore-wal", false, "restore the database from the WAL files") - flag.StringVar(&tempStr, "wal-engine", "null", "wal engine to use, values: sqlite, aof") - flag.BoolVar(&tempBool, "enable-wal", false, "enable wal") - flag.IntVar(&tempInt, "max-clients", 200000, "max clients") - - flag.StringVar(&config.CustomConfigFilePath, "o", config.CustomConfigFilePath, "dir path to create the flagsConfig file") - flag.StringVar(&config.CustomConfigDirPath, "c", config.CustomConfigDirPath, "file path of the config file") - - flag.IntVar(&tempInt, "keys-limit", config.DefaultKeysLimit, "keys limit for the DiceDB server. "+ - "This flag controls the number of keys each shard holds at startup. You can multiply this number with the "+ - "total number of shard threads to estimate how much memory will be required at system start up.") - flag.Float64Var(&tempFloat, "eviction-ratio", 0.9, "ratio of keys to evict when the "+ - "keys limit is reached") - - flag.Usage = func() { - color.Set(color.FgYellow) - fmt.Println("Usage: ./dicedb [options] [config-file]") - color.Unset() - - color.Set(color.FgGreen) - fmt.Println("Options:") - color.Unset() - - color.Set(color.FgCyan) - fmt.Println(" -v, --version Show the version of DiceDB") - fmt.Println(" -h, --help Show this help message") - fmt.Println(" -host Host for the DiceDB server (default: \"0.0.0.0\")") - fmt.Println(" -port Port for the DiceDB server (default: 7379)") - fmt.Println(" -http-port Port for accepting requests over HTTP (default: 8082)") - fmt.Println(" -enable-http Enable DiceDB to listen, accept, and process HTTP (default: false)") - fmt.Println(" -websocket-port Port for accepting requests over WebSocket (default: 8379)") - fmt.Println(" -enable-websocket Enable DiceDB to listen, accept, and process WebSocket (default: false)") - fmt.Println(" -num-shards Number of shards to create. Defaults to number of cores (default: -1)") - fmt.Println(" -enable-watch Enable support for .WATCH commands and real-time reactivity (default: false)") - fmt.Println(" -enable-profiling Enable profiling and capture critical metrics and traces in .prof files (default: false)") - fmt.Println(" -log-level Log level, values: info, debug (default: \"info\")") - fmt.Println(" -log-dir Log directory path (default: \"/tmp/dicedb\")") - fmt.Println(" -enable-persistence Enable write-ahead logging (default: false)") - fmt.Println(" -restore-wal Restore the database from the WAL files (default: false)") - fmt.Println(" -wal-engine WAL engine to use, values: sqlite, aof (default: \"null\")") - fmt.Println(" -requirepass Enable authentication for the default user (default: \"\")") - fmt.Println(" -o Directory path to create the config file (default: \"\")") - fmt.Println(" -c File path of the config file (default: \"\")") - fmt.Println(" -keys-limit Keys limit for the DiceDB server (default: 200000000)") - fmt.Println(" -eviction-ratio Ratio of keys to evict when the keys limit is reached (default: 0.9)") - color.Unset() - os.Exit(0) - } - - flag.Parse() - defaultConfig() -} - -func defaultConfig() { - if err := config.CreateConfigFile(filepath.Join(config.DefaultConfigDir, config.DefaultConfigName)); err != nil { - log.Fatal(err) - } - - render() -} diff --git a/internal/clientio/iohandler/netconn/netconn.go b/internal/clientio/iohandler/netconn/netconn.go index 9a5fcf878..62fd4412b 100644 --- a/internal/clientio/iohandler/netconn/netconn.go +++ b/internal/clientio/iohandler/netconn/netconn.go @@ -16,16 +16,15 @@ import ( "syscall" "time" + "github.com/dicedb/dice/config" "github.com/dicedb/dice/internal/clientio" "github.com/dicedb/dice/internal/clientio/iohandler" ) const ( - maxRequestSize = 32 * 1024 * 1024 // 32 MB - ioBufferSize = 16 * 1024 // 16 KB - idleTimeout = 30 * time.Minute - writeTimeout = 10 * time.Second - keepAlivePeriod = 30 * time.Second + maxRequestSize = 32 * 1024 * 1024 // 32 MB + ioBufferSize = 16 * 1024 // 16 KB + idleTimeout = 30 * time.Minute ) var ( @@ -94,7 +93,7 @@ func NewIOHandler(clientFD int) (*IOHandler, error) { if err := tcpConn.SetKeepAlive(true); err != nil { return nil, fmt.Errorf("failed to set keepalive: %w", err) } - if err := tcpConn.SetKeepAlivePeriod(keepAlivePeriod); err != nil { + if err := tcpConn.SetKeepAlivePeriod(time.Duration(config.KeepAlive) * time.Second); err != nil { return nil, fmt.Errorf("failed to set keepalive period: %w", err) } } @@ -221,7 +220,7 @@ func (h *IOHandler) Write(ctx context.Context, response interface{}) error { resp = clientio.Encode(response, true) } - deadline := time.Now().Add(writeTimeout) + deadline := time.Now().Add(time.Duration(config.Timeout) * time.Second) if err := h.conn.SetWriteDeadline(deadline); err != nil { slog.Warn("error setting write deadline", slog.Any("error", err)) } diff --git a/internal/clientio/resp_test.go b/internal/clientio/resp_test.go index c96282921..a3318c7db 100644 --- a/internal/clientio/resp_test.go +++ b/internal/clientio/resp_test.go @@ -6,23 +6,14 @@ package clientio_test import ( "bytes" "fmt" - "log" "math" "testing" - "github.com/dicedb/dice/config" "github.com/dicedb/dice/internal/clientio" "github.com/dicedb/dice/internal/server/utils" "github.com/stretchr/testify/assert" ) -func init() { - parser := config.NewConfigParser() - if err := parser.ParseDefaults(config.DiceConfig); err != nil { - log.Fatalf("failed to load configuration: %v", err) - } -} - func TestSimpleStringDecode(t *testing.T) { cases := map[string]string{ "+OK\r\n": "OK", diff --git a/internal/commandhandler/cmd_custom.go b/internal/commandhandler/cmd_custom.go index 53ac07505..77e5827b6 100644 --- a/internal/commandhandler/cmd_custom.go +++ b/internal/commandhandler/cmd_custom.go @@ -45,7 +45,7 @@ func RespHello(args []string) interface{} { } var resp []interface{} - serverID := fmt.Sprintf("%s:%d", config.DiceConfig.RespServer.Addr, config.DiceConfig.RespServer.Port) + serverID := fmt.Sprintf("%s:%d", config.GlobalDiceDBConfig.Host, config.GlobalDiceDBConfig.Port) resp = append(resp, "proto", 2, "id", serverID, diff --git a/internal/eval/eval.go b/internal/eval/eval.go index e2e9898bc..8ed0436ef 100644 --- a/internal/eval/eval.go +++ b/internal/eval/eval.go @@ -144,7 +144,7 @@ func evalHELLO(args []string, store *dstore.Store) []byte { } var resp []interface{} - serverID = fmt.Sprintf("%s:%d", config.DiceConfig.RespServer.Addr, config.DiceConfig.RespServer.Port) + serverID = fmt.Sprintf("%s:%d", config.GlobalDiceDBConfig.Host, config.GlobalDiceDBConfig.Port) resp = append(resp, "proto", 2, "id", serverID, diff --git a/internal/eval/eval_test.go b/internal/eval/eval_test.go index dfcb380a8..efa19d1e2 100644 --- a/internal/eval/eval_test.go +++ b/internal/eval/eval_test.go @@ -177,7 +177,7 @@ func testEvalECHO(t *testing.T, store *dstore.Store) { } func testEvalHELLO(t *testing.T, store *dstore.Store) { - serverID = fmt.Sprintf("%s:%d", config.DiceConfig.RespServer.Addr, config.DiceConfig.RespServer.Port) + serverID = fmt.Sprintf("%s:%d", config.GlobalDiceDBConfig.Host, config.GlobalDiceDBConfig.Port) resp := []interface{}{ "proto", 2, "id", serverID, diff --git a/internal/observability/ping.go b/internal/observability/ping.go deleted file mode 100644 index 742da841a..000000000 --- a/internal/observability/ping.go +++ /dev/null @@ -1,66 +0,0 @@ -// Copyright (c) 2022-present, DiceDB contributors -// All rights reserved. Licensed under the BSD 3-Clause License. See LICENSE file in the project root for full license information. - -package observability - -import ( - "bytes" - "context" - "encoding/json" - "log/slog" - "net/http" - "time" - - "github.com/dicedb/dice/config" -) - -type PingPayload struct { - Date string `json:"date"` - HardwareConfig HardwareConfig `json:"hardware_config"` - DBConfig DBConfig `json:"db_config"` - Version string `json:"version"` - InstanceID string `json:"instance_id"` - Err error `json:"error"` -} - -const ( - token = "p.eyJ1IjogIjhjNWQxMjdlLTczZmYtNGRjZS04Mzk5LTQyMDU0MThhYjc2OSIsI" + - "CJpZCI6ICJhZjcxNGExNC0xZWQyLTQ3ZDktOTM0MS0xMzgwNWNiOWFhNDYiLCAiaG9zdCI6ICJ1cy1lYXN0LWF3cyJ9.o9LqZqTZ9YkhbcusZOltsm95RzVQUzJLQOHV2YA7L0E" - url = "https://api.us-east.aws.tinybird.co/v0/events?name=ping2" -) - -type DBConfig struct { -} - -func Ping() { - hwConfig, err := GetHardwareMeta() - if err != nil { - return - } - - payload := &PingPayload{ - HardwareConfig: hwConfig, - InstanceID: config.DiceConfig.InstanceID, - Version: config.DiceConfig.Version, - Err: err, - Date: time.Now().UTC().Format("2006-01-02 15:04:05"), - DBConfig: DBConfig{}, - } - - b, _ := json.Marshal(payload) - ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) - defer cancel() - - req, _ := http.NewRequestWithContext(ctx, "POST", url, bytes.NewBuffer(b)) - req.Header.Set("Authorization", "Bearer "+token) - req.Header.Set("Content-Type", "application/json") - - client := &http.Client{Timeout: time.Second * 5} - resp, err := client.Do(req) - if err != nil { - slog.Error("Error reporting observability metrics.", slog.Any("error", err)) - return - } - - _ = resp.Body.Close() -} diff --git a/internal/server/resp/server.go b/internal/server/resp/server.go index 378035b11..f88e1cd79 100644 --- a/internal/server/resp/server.go +++ b/internal/server/resp/server.go @@ -62,8 +62,8 @@ func NewServer(shardManager *shard.ShardManager, ioThreadManager *iothread.Manag cmdWatchSubscriptionChan chan watchmanager.WatchSubscription, cmdWatchChan chan dstore.CmdWatchEvent, globalErrChan chan error, wl wal.AbstractWAL) *Server { return &Server{ - Host: config.DiceConfig.RespServer.Addr, - Port: config.DiceConfig.RespServer.Port, + Host: config.GlobalDiceDBConfig.Host, + Port: config.GlobalDiceDBConfig.Port, connBacklogSize: DefaultConnBacklogSize, ioThreadManager: ioThreadManager, cmdHandlerManager: cmdHandlerManager, diff --git a/internal/wal/wal_aof.go b/internal/wal/wal_aof.go index 567fa0858..f03a79438 100644 --- a/internal/wal/wal_aof.go +++ b/internal/wal/wal_aof.go @@ -58,17 +58,17 @@ func NewAOFWAL(directory string) (*AOF, error) { return &AOF{ logDir: directory, - walMode: config.DiceConfig.WAL.WalMode, - bufferSyncTicker: time.NewTicker(config.DiceConfig.WAL.BufferSyncInterval), - segmentRotationTicker: time.NewTicker(config.DiceConfig.WAL.MaxSegmentRotationTime), - segmentRetentionTicker: time.NewTicker(config.DiceConfig.WAL.MaxSegmentRetentionDuration), - writeMode: config.DiceConfig.WAL.WriteMode, - maxSegmentSize: config.DiceConfig.WAL.MaxSegmentSizeMB * 1024 * 1024, - maxSegmentCount: config.DiceConfig.WAL.MaxSegmentCount, - bufferSize: config.DiceConfig.WAL.BufferSizeMB * 1024 * 1024, - retentionMode: config.DiceConfig.WAL.RetentionMode, - recoveryMode: config.DiceConfig.WAL.RecoveryMode, - rotationMode: config.DiceConfig.WAL.RotationMode, + walMode: config.GlobalDiceDBConfig.WALMode, + bufferSyncTicker: time.NewTicker(time.Duration(config.GlobalDiceDBConfig.WALBufferSyncIntervalMillis) * time.Millisecond), + segmentRotationTicker: time.NewTicker(time.Duration(config.GlobalDiceDBConfig.WALMaxSegmentRotationTimeSec) * time.Second), + segmentRetentionTicker: time.NewTicker(time.Duration(config.GlobalDiceDBConfig.WALMaxSegmentRetentionDurationSec) * time.Second), + writeMode: config.GlobalDiceDBConfig.WALWriteMode, + maxSegmentSize: config.GlobalDiceDBConfig.WALMaxSegmentSizeMB * 1024 * 1024, + maxSegmentCount: config.GlobalDiceDBConfig.WALMaxSegmentCount, + bufferSize: config.GlobalDiceDBConfig.WALBufferSizeMB * 1024 * 1024, + retentionMode: config.GlobalDiceDBConfig.WALRetentionMode, + recoveryMode: config.GlobalDiceDBConfig.WALRecoveryMode, + rotationMode: config.GlobalDiceDBConfig.WALRotationMode, ctx: ctx, cancel: cancel, }, nil diff --git a/server/main.go b/server/main.go index 786e6cbd0..b73fc7f32 100644 --- a/server/main.go +++ b/server/main.go @@ -21,7 +21,6 @@ import ( "github.com/dicedb/dice/internal/auth" "github.com/dicedb/dice/internal/server/httpws" - "github.com/dicedb/dice/internal/cli" "github.com/dicedb/dice/internal/commandhandler" "github.com/dicedb/dice/internal/logger" "github.com/dicedb/dice/internal/server/abstractserver" @@ -31,15 +30,39 @@ import ( "github.com/dicedb/dice/config" diceerrors "github.com/dicedb/dice/internal/errors" "github.com/dicedb/dice/internal/iothread" - "github.com/dicedb/dice/internal/observability" "github.com/dicedb/dice/internal/server/resp" "github.com/dicedb/dice/internal/shard" dstore "github.com/dicedb/dice/internal/store" ) +func printConfiguration() { + slog.Info("starting DiceDB", slog.String("version", config.DiceDBVersion)) + slog.Info("running with", slog.Int("port", config.GlobalDiceDBConfig.Port)) + slog.Info("running on", slog.Int("cores", runtime.NumCPU())) + + // Conditionally add the number of shards to be used for DiceDB + numShards := runtime.NumCPU() + if config.GlobalDiceDBConfig.NumShards > 0 { + numShards = config.GlobalDiceDBConfig.NumShards + } + slog.Info("running with", slog.Int("shards", numShards)) +} + +func printBanner() { + fmt.Print(` + ██████╗ ██╗ ██████╗███████╗██████╗ ██████╗ + ██╔══██╗██║██╔════╝██╔════╝██╔══██╗██╔══██╗ + ██║ ██║██║██║ █████╗ ██║ ██║██████╔╝ + ██║ ██║██║██║ ██╔══╝ ██║ ██║██╔══██╗ + ██████╔╝██║╚██████╗███████╗██████╔╝██████╔╝ + ╚═════╝ ╚═╝ ╚═════╝╚══════╝╚═════╝ ╚═════╝ + +`) +} + func Start() { - iid := observability.GetOrCreateInstanceID() - config.DiceConfig.InstanceID = iid + printBanner() + printConfiguration() // TODO: Handle the addition of the default user // and new users in a much better way. Doing this using @@ -48,12 +71,7 @@ func Start() { _, _ = auth.UserStore.Add(config.GlobalDiceDBConfig.Username) } - // This is counter intuitive, but it's the first thing that should be done - // because this function parses the flags and prepares the config, - cli.Execute() - slog.SetDefault(logger.New()) - go observability.Ping() ctx, cancel := context.WithCancel(context.Background()) @@ -71,7 +89,7 @@ func Start() { wl, _ = wal.NewNullWAL() if config.GlobalDiceDBConfig.EnableWAL { if config.GlobalDiceDBConfig.WALEngine == "aof" { - _wl, err := wal.NewAOFWAL(config.DiceConfig.WAL.LogDir) + _wl, err := wal.NewAOFWAL(config.GlobalDiceDBConfig.WALDir) if err != nil { slog.Warn("could not create WAL with", slog.String("wal-engine", config.GlobalDiceDBConfig.WALEngine), slog.Any("error", err)) sigs <- syscall.SIGKILL From 168c9ce5a739c540a1b75a1798380a63a5d6a36c Mon Sep 17 00:00:00 2001 From: Arpit Date: Wed, 22 Jan 2025 16:55:36 +0000 Subject: [PATCH 11/12] Configuration nomenclature changed --- config/config.go | 6 ++-- config/constants.go | 3 ++ .../commands/resp/abort/server_abort_test.go | 16 +++++----- integration_tests/commands/resp/hello_test.go | 2 +- integration_tests/commands/resp/setup.go | 10 +++---- integration_tests/server/server_abort_test.go | 12 ++++---- internal/auth/session.go | 4 +-- internal/auth/session_test.go | 8 ++--- internal/commandhandler/cmd_custom.go | 2 +- internal/commandhandler/commandhandler.go | 4 +-- internal/commandhandler/registry.go | 2 +- internal/eval/eval.go | 6 ++-- internal/eval/eval_test.go | 2 +- internal/iomultiplexer/epoll_linux.go | 6 ++-- internal/iomultiplexer/kqueue_darwin.go | 6 ++-- internal/iothread/manager.go | 2 +- internal/logger/logger.go | 2 +- internal/server/resp/server.go | 4 +-- internal/shard/shard_thread.go | 2 +- internal/wal/wal_aof.go | 22 +++++++------- server/main.go | 30 +++++++++---------- 21 files changed, 77 insertions(+), 74 deletions(-) diff --git a/config/config.go b/config/config.go index 2d6a1fe07..dfed80386 100644 --- a/config/config.go +++ b/config/config.go @@ -12,6 +12,8 @@ const ( DiceDBVersion = "0.1.0" ) +var Config *DiceDBConfig + type DiceDBConfig struct { Host string `mapstructure:"host" default:"0.0.0.0" description:"the host address to bind to"` Port int `mapstructure:"port" default:"7379" description:"the port to bind to"` @@ -41,8 +43,6 @@ type DiceDBConfig struct { WALRecoveryMode string `mapstructure:"wal-recovery-mode" default:"strict" description:"wal recovery mode in case of a corruption, values: strict, truncate, ignore"` } -var GlobalDiceDBConfig *DiceDBConfig - func Init(flags *pflag.FlagSet) { viper.SetConfigName("dicedb") viper.SetConfigType("yaml") @@ -61,7 +61,7 @@ func Init(flags *pflag.FlagSet) { viper.Set(flag.Name, flag.Value.String()) }) - if err := viper.Unmarshal(&GlobalDiceDBConfig); err != nil { + if err := viper.Unmarshal(&Config); err != nil { panic(err) } } diff --git a/config/constants.go b/config/constants.go index 4ee9013bf..ad28549db 100644 --- a/config/constants.go +++ b/config/constants.go @@ -1,3 +1,6 @@ +// Copyright (c) 2022-present, DiceDB contributors +// All rights reserved. Licensed under the BSD 3-Clause License. See LICENSE file in the project root for full license information. + package config import "time" diff --git a/integration_tests/commands/resp/abort/server_abort_test.go b/integration_tests/commands/resp/abort/server_abort_test.go index 75da2aeb3..33a7a4459 100644 --- a/integration_tests/commands/resp/abort/server_abort_test.go +++ b/integration_tests/commands/resp/abort/server_abort_test.go @@ -22,8 +22,8 @@ var testServerOptions = resp.TestServerOptions{ } func init() { - config.GlobalDiceDBConfig.Port = testServerOptions.Port - log.Print("Setting port to ", config.GlobalDiceDBConfig.Port) + config.Config.Port = testServerOptions.Port + log.Print("Setting port to ", config.Config.Port) } func TestAbortCommand(t *testing.T) { @@ -38,7 +38,7 @@ func TestAbortCommand(t *testing.T) { // Test 1: Ensure the server is running t.Run("ServerIsRunning", func(t *testing.T) { - conn, err := net.Dial("tcp", fmt.Sprintf("127.0.0.1:%d", config.GlobalDiceDBConfig.Port)) + conn, err := net.Dial("tcp", fmt.Sprintf("127.0.0.1:%d", config.Config.Port)) if err != nil { t.Fatalf("Failed to connect to server: %v", err) } @@ -47,7 +47,7 @@ func TestAbortCommand(t *testing.T) { //Test 2: Send ABORT command and check if the server shuts down t.Run("AbortCommandShutdown", func(t *testing.T) { - conn, err := net.Dial("tcp", fmt.Sprintf("127.0.0.1:%d", config.GlobalDiceDBConfig.Port)) + conn, err := net.Dial("tcp", fmt.Sprintf("127.0.0.1:%d", config.Config.Port)) if err != nil { t.Fatalf("Failed to connect to server: %v", err) } @@ -63,7 +63,7 @@ func TestAbortCommand(t *testing.T) { time.Sleep(1 * time.Second) // Try to connect again, it should fail - _, err = net.Dial("tcp", fmt.Sprintf("127.0.0.1:%d", config.GlobalDiceDBConfig.Port)) + _, err = net.Dial("tcp", fmt.Sprintf("127.0.0.1:%d", config.Config.Port)) if err == nil { t.Fatal("Server did not shut down as expected") } @@ -72,7 +72,7 @@ func TestAbortCommand(t *testing.T) { // Test 3: Ensure the server port is released t.Run("PortIsReleased", func(t *testing.T) { // Try to bind to the same port - listener, err := net.Listen("tcp", fmt.Sprintf("127.0.0.1:%d", config.GlobalDiceDBConfig.Port)) + listener, err := net.Listen("tcp", fmt.Sprintf("127.0.0.1:%d", config.Config.Port)) if err != nil { t.Fatalf("Port should be available after server shutdown: %v", err) } @@ -93,7 +93,7 @@ func TestServerRestartAfterAbort(t *testing.T) { time.Sleep(1 * time.Second) - conn, err := net.Dial("tcp", fmt.Sprintf("127.0.0.1:%d", config.GlobalDiceDBConfig.Port)) + conn, err := net.Dial("tcp", fmt.Sprintf("127.0.0.1:%d", config.Config.Port)) if err != nil { t.Fatalf("Server should be running after restart: %v", err) } @@ -124,7 +124,7 @@ func TestServerRestartAfterAbort(t *testing.T) { time.Sleep(2 * time.Second) // Check if the server is running - conn2, err := net.Dial("tcp", fmt.Sprintf("127.0.0.1:%d", config.GlobalDiceDBConfig.Port)) + conn2, err := net.Dial("tcp", fmt.Sprintf("127.0.0.1:%d", config.Config.Port)) if err != nil { t.Fatalf("Server should be running after restart: %v", err) } diff --git a/integration_tests/commands/resp/hello_test.go b/integration_tests/commands/resp/hello_test.go index 8bbc8aca1..199541f15 100644 --- a/integration_tests/commands/resp/hello_test.go +++ b/integration_tests/commands/resp/hello_test.go @@ -17,7 +17,7 @@ func TestHello(t *testing.T) { expected := []interface{}{ "proto", int64(2), - "id", fmt.Sprintf("%s:%d", config.GlobalDiceDBConfig.Host, config.GlobalDiceDBConfig.Port), + "id", fmt.Sprintf("%s:%d", config.Config.Host, config.Config.Port), "mode", "standalone", "role", "master", "modules", []interface{}{}, diff --git a/integration_tests/commands/resp/setup.go b/integration_tests/commands/resp/setup.go index cb9531d93..8d383189c 100644 --- a/integration_tests/commands/resp/setup.go +++ b/integration_tests/commands/resp/setup.go @@ -37,7 +37,7 @@ type TestServerOptions struct { //nolint:unused func getLocalConnection() net.Conn { - conn, err := net.Dial("tcp", fmt.Sprintf("localhost:%d", config.GlobalDiceDBConfig.Port)) + conn, err := net.Dial("tcp", fmt.Sprintf("localhost:%d", config.Config.Port)) if err != nil { panic(err) } @@ -93,7 +93,7 @@ func deleteTestKeys(keysToDelete []string, store *dstore.Store) { //nolint:unused func getLocalSdk() *dicedb.Client { return dicedb.NewClient(&dicedb.Options{ - Addr: fmt.Sprintf(":%d", config.GlobalDiceDBConfig.Port), + Addr: fmt.Sprintf(":%d", config.Config.Port), DialTimeout: 10 * time.Second, ReadTimeout: 30 * time.Second, @@ -177,9 +177,9 @@ func fireCommandAndGetRESPParser(conn net.Conn, cmd string) *clientio.RESPParser func RunTestServer(wg *sync.WaitGroup, opt TestServerOptions) { // #1261: Added here to prevent resp integration tests from failing on lower-spec machines if opt.Port != 0 { - config.GlobalDiceDBConfig.Port = opt.Port + config.Config.Port = opt.Port } else { - config.GlobalDiceDBConfig.Port = 9739 + config.Config.Port = 9739 } cmdWatchChan := make(chan dstore.CmdWatchEvent, config.WatchChanBufSize) @@ -194,7 +194,7 @@ func RunTestServer(wg *sync.WaitGroup, opt TestServerOptions) { testServer := resp.NewServer(shardManager, ioThreadManager, cmdHandlerManager, cmdWatchSubscriptionChan, cmdWatchChan, gec, wl) ctx, cancel := context.WithCancel(context.Background()) - fmt.Println("Starting the test server on port", config.GlobalDiceDBConfig.Port) + fmt.Println("Starting the test server on port", config.Config.Port) shardManagerCtx, cancelShardManager := context.WithCancel(ctx) wg.Add(1) diff --git a/integration_tests/server/server_abort_test.go b/integration_tests/server/server_abort_test.go index 9da6b45ba..457b56325 100644 --- a/integration_tests/server/server_abort_test.go +++ b/integration_tests/server/server_abort_test.go @@ -34,7 +34,7 @@ func TestAbortCommand(t *testing.T) { // Test 1: Ensure the server is running t.Run("ServerIsRunning", func(t *testing.T) { - conn, err := net.Dial("tcp", fmt.Sprintf("127.0.0.1:%d", config.GlobalDiceDBConfig.Port)) + conn, err := net.Dial("tcp", fmt.Sprintf("127.0.0.1:%d", config.Config.Port)) if err != nil { t.Fatalf("Failed to connect to server: %v", err) } @@ -43,7 +43,7 @@ func TestAbortCommand(t *testing.T) { //Test 2: Send ABORT command and check if the server shuts down t.Run("AbortCommandShutdown", func(t *testing.T) { - conn, err := net.Dial("tcp", fmt.Sprintf("127.0.0.1:%d", config.GlobalDiceDBConfig.Port)) + conn, err := net.Dial("tcp", fmt.Sprintf("127.0.0.1:%d", config.Config.Port)) if err != nil { t.Fatalf("Failed to connect to server: %v", err) } @@ -59,7 +59,7 @@ func TestAbortCommand(t *testing.T) { time.Sleep(1 * time.Second) // Try to connect again, it should fail - _, err = net.Dial("tcp", fmt.Sprintf("127.0.0.1:%d", config.GlobalDiceDBConfig.Port)) + _, err = net.Dial("tcp", fmt.Sprintf("127.0.0.1:%d", config.Config.Port)) if err == nil { t.Fatal("Server did not shut down as expected") } @@ -68,7 +68,7 @@ func TestAbortCommand(t *testing.T) { // Test 3: Ensure the server port is released t.Run("PortIsReleased", func(t *testing.T) { // Try to bind to the same port - listener, err := net.Listen("tcp", fmt.Sprintf("127.0.0.1:%d", config.GlobalDiceDBConfig.Port)) + listener, err := net.Listen("tcp", fmt.Sprintf("127.0.0.1:%d", config.Config.Port)) if err != nil { t.Fatalf("Port should be available after server shutdown: %v", err) } @@ -88,7 +88,7 @@ func TestServerRestartAfterAbort(t *testing.T) { time.Sleep(1 * time.Second) - conn, err := net.Dial("tcp", fmt.Sprintf("127.0.0.1:%d", config.GlobalDiceDBConfig.Port)) + conn, err := net.Dial("tcp", fmt.Sprintf("127.0.0.1:%d", config.Config.Port)) if err != nil { t.Fatalf("Server should be running at start: %v", err) } @@ -120,7 +120,7 @@ func TestServerRestartAfterAbort(t *testing.T) { time.Sleep(2 * time.Second) // Check if the server is running - conn2, err := net.Dial("tcp", fmt.Sprintf("127.0.0.1:%d", config.GlobalDiceDBConfig.Port)) + conn2, err := net.Dial("tcp", fmt.Sprintf("127.0.0.1:%d", config.Config.Port)) if err != nil { t.Fatalf("Server should be running after restart: %v", err) } diff --git a/internal/auth/session.go b/internal/auth/session.go index 491a8f807..8fa66e2dc 100644 --- a/internal/auth/session.go +++ b/internal/auth/session.go @@ -108,7 +108,7 @@ func NewSession() (session *Session) { } func (session *Session) IsActive() (isActive bool) { - if config.GlobalDiceDBConfig.Password == utils.EmptyStr && session.Status != SessionStatusActive { + if config.Config.Password == utils.EmptyStr && session.Status != SessionStatusActive { session.Activate(session.User) } isActive = session.Status == SessionStatusActive @@ -133,7 +133,7 @@ func (session *Session) Validate(username, password string) error { if user, err = UserStore.Get(username); err != nil { return err } - if username == config.GlobalDiceDBConfig.Username && len(user.Passwords) == 0 { + if username == config.Config.Username && len(user.Passwords) == 0 { session.Activate(user) return nil } diff --git a/internal/auth/session_test.go b/internal/auth/session_test.go index d90a9efa4..d0df46c1e 100644 --- a/internal/auth/session_test.go +++ b/internal/auth/session_test.go @@ -86,7 +86,7 @@ func TestSessionIsActive(t *testing.T) { mockTime := &utils.MockClock{CurrTime: time.Now()} utils.CurrentTime = mockTime - config.GlobalDiceDBConfig.Password = "testpassword" + config.Config.Password = "testpassword" session := NewSession() if session.IsActive() { t.Error("New session should not be active") @@ -104,12 +104,12 @@ func TestSessionIsActive(t *testing.T) { if !session.LastAccessedAt.After(oldLastAccessed) { t.Error("IsActive() should update LastAccessedAt") } - config.GlobalDiceDBConfig.Password = utils.EmptyStr + config.Config.Password = utils.EmptyStr } func TestSessionActivate(t *testing.T) { session := NewSession() - user := &User{Username: config.GlobalDiceDBConfig.Username} + user := &User{Username: config.Config.Username} session.Activate(user) @@ -122,7 +122,7 @@ func TestSessionActivate(t *testing.T) { } func TestSessionValidate(t *testing.T) { - username := config.GlobalDiceDBConfig.Username + username := config.Config.Username password := "testpassword" user, _ := UserStore.Add(username) diff --git a/internal/commandhandler/cmd_custom.go b/internal/commandhandler/cmd_custom.go index 77e5827b6..35e4ae7db 100644 --- a/internal/commandhandler/cmd_custom.go +++ b/internal/commandhandler/cmd_custom.go @@ -45,7 +45,7 @@ func RespHello(args []string) interface{} { } var resp []interface{} - serverID := fmt.Sprintf("%s:%d", config.GlobalDiceDBConfig.Host, config.GlobalDiceDBConfig.Port) + serverID := fmt.Sprintf("%s:%d", config.Config.Host, config.Config.Port) resp = append(resp, "proto", 2, "id", serverID, diff --git a/internal/commandhandler/commandhandler.go b/internal/commandhandler/commandhandler.go index 0475831ad..82e1c0551 100644 --- a/internal/commandhandler/commandhandler.go +++ b/internal/commandhandler/commandhandler.go @@ -536,11 +536,11 @@ func (h *BaseCommandHandler) RespAuth(args []string) interface{} { return diceerrors.ErrWrongArgumentCount("AUTH") } - if config.GlobalDiceDBConfig.Password == "" { + if config.Config.Password == "" { return diceerrors.ErrAuth } - username := config.GlobalDiceDBConfig.Username + username := config.Config.Username var password string if len(args) == 1 { diff --git a/internal/commandhandler/registry.go b/internal/commandhandler/registry.go index d03443c5d..a38fc44bc 100644 --- a/internal/commandhandler/registry.go +++ b/internal/commandhandler/registry.go @@ -29,7 +29,7 @@ var ( func NewRegistry(sm *shard.ShardManager) *Registry { return &Registry{ - maxCmdHandlers: uint32(config.GlobalDiceDBConfig.MaxClients), + maxCmdHandlers: uint32(config.Config.MaxClients), ShardManager: sm, } } diff --git a/internal/eval/eval.go b/internal/eval/eval.go index 8ed0436ef..45f5f91d7 100644 --- a/internal/eval/eval.go +++ b/internal/eval/eval.go @@ -117,11 +117,11 @@ func evalECHO(args []string, store *dstore.Store) []byte { func EvalAUTH(args []string, c *comm.Client) []byte { var err error - if config.GlobalDiceDBConfig.Password == "" { + if config.Config.Password == "" { return diceerrors.NewErrWithMessage("AUTH called without any password configured for the default user. Are you sure your configuration is correct?") } - username := config.GlobalDiceDBConfig.Username + username := config.Config.Username var password string if len(args) == 1 { @@ -144,7 +144,7 @@ func evalHELLO(args []string, store *dstore.Store) []byte { } var resp []interface{} - serverID = fmt.Sprintf("%s:%d", config.GlobalDiceDBConfig.Host, config.GlobalDiceDBConfig.Port) + serverID = fmt.Sprintf("%s:%d", config.Config.Host, config.Config.Port) resp = append(resp, "proto", 2, "id", serverID, diff --git a/internal/eval/eval_test.go b/internal/eval/eval_test.go index efa19d1e2..a917841dd 100644 --- a/internal/eval/eval_test.go +++ b/internal/eval/eval_test.go @@ -177,7 +177,7 @@ func testEvalECHO(t *testing.T, store *dstore.Store) { } func testEvalHELLO(t *testing.T, store *dstore.Store) { - serverID = fmt.Sprintf("%s:%d", config.GlobalDiceDBConfig.Host, config.GlobalDiceDBConfig.Port) + serverID = fmt.Sprintf("%s:%d", config.Config.Host, config.Config.Port) resp := []interface{}{ "proto", 2, "id", serverID, diff --git a/internal/iomultiplexer/epoll_linux.go b/internal/iomultiplexer/epoll_linux.go index 60f49d986..8fbcc2c33 100644 --- a/internal/iomultiplexer/epoll_linux.go +++ b/internal/iomultiplexer/epoll_linux.go @@ -24,7 +24,7 @@ type Epoll struct { // New creates a new Epoll instance func New() (*Epoll, error) { - if config.GlobalDiceDBConfig.MaxClients == 0 { + if config.Config.MaxClients == 0 { return nil, ErrInvalidMaxClients } @@ -35,8 +35,8 @@ func New() (*Epoll, error) { return &Epoll{ fd: fd, - ePollEvents: make([]syscall.EpollEvent, config.GlobalDiceDBConfig.MaxClients), - diceEvents: make([]Event, config.GlobalDiceDBConfig.MaxClients), + ePollEvents: make([]syscall.EpollEvent, config.Config.MaxClients), + diceEvents: make([]Event, config.Config.MaxClients), }, nil } diff --git a/internal/iomultiplexer/kqueue_darwin.go b/internal/iomultiplexer/kqueue_darwin.go index 56189d924..7a0dc1afc 100644 --- a/internal/iomultiplexer/kqueue_darwin.go +++ b/internal/iomultiplexer/kqueue_darwin.go @@ -24,7 +24,7 @@ type KQueue struct { // New creates a new KQueue instance func New() (*KQueue, error) { - if config.GlobalDiceDBConfig.MaxClients < 0 { + if config.Config.MaxClients < 0 { return nil, ErrInvalidMaxClients } @@ -35,8 +35,8 @@ func New() (*KQueue, error) { return &KQueue{ fd: fd, - kQEvents: make([]syscall.Kevent_t, config.GlobalDiceDBConfig.MaxClients), - diceEvents: make([]Event, config.GlobalDiceDBConfig.MaxClients), + kQEvents: make([]syscall.Kevent_t, config.Config.MaxClients), + diceEvents: make([]Event, config.Config.MaxClients), }, nil } diff --git a/internal/iothread/manager.go b/internal/iothread/manager.go index 752cbd5dd..428ef4a2b 100644 --- a/internal/iothread/manager.go +++ b/internal/iothread/manager.go @@ -30,7 +30,7 @@ func (m *Manager) RegisterIOThread(ioThread IOThread) error { m.mu.Lock() defer m.mu.Unlock() - if m.IOThreadCount() >= uint32(config.GlobalDiceDBConfig.MaxClients) { + if m.IOThreadCount() >= uint32(config.Config.MaxClients) { return ErrMaxClientsReached } diff --git a/internal/logger/logger.go b/internal/logger/logger.go index 6ecc8e7bf..5897f3b94 100644 --- a/internal/logger/logger.go +++ b/internal/logger/logger.go @@ -13,7 +13,7 @@ import ( ) func getSLogLevel() slog.Level { - switch config.GlobalDiceDBConfig.LogLevel { + switch config.Config.LogLevel { case "debug": return slog.LevelDebug case "info": diff --git a/internal/server/resp/server.go b/internal/server/resp/server.go index f88e1cd79..bd283977a 100644 --- a/internal/server/resp/server.go +++ b/internal/server/resp/server.go @@ -62,8 +62,8 @@ func NewServer(shardManager *shard.ShardManager, ioThreadManager *iothread.Manag cmdWatchSubscriptionChan chan watchmanager.WatchSubscription, cmdWatchChan chan dstore.CmdWatchEvent, globalErrChan chan error, wl wal.AbstractWAL) *Server { return &Server{ - Host: config.GlobalDiceDBConfig.Host, - Port: config.GlobalDiceDBConfig.Port, + Host: config.Config.Host, + Port: config.Config.Port, connBacklogSize: DefaultConnBacklogSize, ioThreadManager: ioThreadManager, cmdHandlerManager: cmdHandlerManager, diff --git a/internal/shard/shard_thread.go b/internal/shard/shard_thread.go index 54fef62eb..86a5f15c2 100644 --- a/internal/shard/shard_thread.go +++ b/internal/shard/shard_thread.go @@ -142,7 +142,7 @@ func (shard *ShardThread) processRequest(op *ops.StoreOp) { // cleanup handles cleanup logic when the shard stops. func (shard *ShardThread) cleanup() { close(shard.ReqChan) - if !config.GlobalDiceDBConfig.EnableWAL { + if !config.Config.EnableWAL { return } } diff --git a/internal/wal/wal_aof.go b/internal/wal/wal_aof.go index f03a79438..0b7abef9b 100644 --- a/internal/wal/wal_aof.go +++ b/internal/wal/wal_aof.go @@ -58,17 +58,17 @@ func NewAOFWAL(directory string) (*AOF, error) { return &AOF{ logDir: directory, - walMode: config.GlobalDiceDBConfig.WALMode, - bufferSyncTicker: time.NewTicker(time.Duration(config.GlobalDiceDBConfig.WALBufferSyncIntervalMillis) * time.Millisecond), - segmentRotationTicker: time.NewTicker(time.Duration(config.GlobalDiceDBConfig.WALMaxSegmentRotationTimeSec) * time.Second), - segmentRetentionTicker: time.NewTicker(time.Duration(config.GlobalDiceDBConfig.WALMaxSegmentRetentionDurationSec) * time.Second), - writeMode: config.GlobalDiceDBConfig.WALWriteMode, - maxSegmentSize: config.GlobalDiceDBConfig.WALMaxSegmentSizeMB * 1024 * 1024, - maxSegmentCount: config.GlobalDiceDBConfig.WALMaxSegmentCount, - bufferSize: config.GlobalDiceDBConfig.WALBufferSizeMB * 1024 * 1024, - retentionMode: config.GlobalDiceDBConfig.WALRetentionMode, - recoveryMode: config.GlobalDiceDBConfig.WALRecoveryMode, - rotationMode: config.GlobalDiceDBConfig.WALRotationMode, + walMode: config.Config.WALMode, + bufferSyncTicker: time.NewTicker(time.Duration(config.Config.WALBufferSyncIntervalMillis) * time.Millisecond), + segmentRotationTicker: time.NewTicker(time.Duration(config.Config.WALMaxSegmentRotationTimeSec) * time.Second), + segmentRetentionTicker: time.NewTicker(time.Duration(config.Config.WALMaxSegmentRetentionDurationSec) * time.Second), + writeMode: config.Config.WALWriteMode, + maxSegmentSize: config.Config.WALMaxSegmentSizeMB * 1024 * 1024, + maxSegmentCount: config.Config.WALMaxSegmentCount, + bufferSize: config.Config.WALBufferSizeMB * 1024 * 1024, + retentionMode: config.Config.WALRetentionMode, + recoveryMode: config.Config.WALRecoveryMode, + rotationMode: config.Config.WALRotationMode, ctx: ctx, cancel: cancel, }, nil diff --git a/server/main.go b/server/main.go index b73fc7f32..b82ea81c8 100644 --- a/server/main.go +++ b/server/main.go @@ -37,13 +37,13 @@ import ( func printConfiguration() { slog.Info("starting DiceDB", slog.String("version", config.DiceDBVersion)) - slog.Info("running with", slog.Int("port", config.GlobalDiceDBConfig.Port)) + slog.Info("running with", slog.Int("port", config.Config.Port)) slog.Info("running on", slog.Int("cores", runtime.NumCPU())) // Conditionally add the number of shards to be used for DiceDB numShards := runtime.NumCPU() - if config.GlobalDiceDBConfig.NumShards > 0 { - numShards = config.GlobalDiceDBConfig.NumShards + if config.Config.NumShards > 0 { + numShards = config.Config.NumShards } slog.Info("running with", slog.Int("shards", numShards)) } @@ -67,8 +67,8 @@ func Start() { // TODO: Handle the addition of the default user // and new users in a much better way. Doing this using // and empty password check is not a good solution. - if config.GlobalDiceDBConfig.Password != "" { - _, _ = auth.UserStore.Add(config.GlobalDiceDBConfig.Username) + if config.Config.Password != "" { + _, _ = auth.UserStore.Add(config.Config.Username) } slog.SetDefault(logger.New()) @@ -87,18 +87,18 @@ func Start() { ) wl, _ = wal.NewNullWAL() - if config.GlobalDiceDBConfig.EnableWAL { - if config.GlobalDiceDBConfig.WALEngine == "aof" { - _wl, err := wal.NewAOFWAL(config.GlobalDiceDBConfig.WALDir) + if config.Config.EnableWAL { + if config.Config.WALEngine == "aof" { + _wl, err := wal.NewAOFWAL(config.Config.WALDir) if err != nil { - slog.Warn("could not create WAL with", slog.String("wal-engine", config.GlobalDiceDBConfig.WALEngine), slog.Any("error", err)) + slog.Warn("could not create WAL with", slog.String("wal-engine", config.Config.WALEngine), slog.Any("error", err)) sigs <- syscall.SIGKILL cancel() return } wl = _wl } else { - slog.Error("unsupported WAL engine", slog.String("engine", config.GlobalDiceDBConfig.WALEngine)) + slog.Error("unsupported WAL engine", slog.String("engine", config.Config.WALEngine)) sigs <- syscall.SIGKILL cancel() return @@ -112,14 +112,14 @@ func Start() { slog.Debug("WAL initialization complete") - if config.GlobalDiceDBConfig.EnableWAL { + if config.Config.EnableWAL { slog.Info("restoring database from WAL") wal.ReplayWAL(wl) slog.Info("database restored from WAL") } } - if config.GlobalDiceDBConfig.EnableWatch { + if config.Config.EnableWatch { bufSize := config.WatchChanBufSize cmdWatchChan = make(chan dstore.CmdWatchEvent, bufSize) } @@ -130,8 +130,8 @@ func Start() { // core count ensures the application can make full use of all available hardware. var numShards int numShards = runtime.NumCPU() - if config.GlobalDiceDBConfig.NumShards > 0 { - numShards = config.GlobalDiceDBConfig.NumShards + if config.Config.NumShards > 0 { + numShards = config.Config.NumShards } // The runtime.GOMAXPROCS(numShards) call limits the number of operating system @@ -202,7 +202,7 @@ func Start() { close(sigs) - if config.GlobalDiceDBConfig.EnableWAL { + if config.Config.EnableWAL { wal.ShutdownBG() } From ec0b852f6755432bd309f754d5dc2ad496a61c4e Mon Sep 17 00:00:00 2001 From: Arpit Date: Wed, 22 Jan 2025 18:03:04 +0000 Subject: [PATCH 12/12] Server initialization hook --- cmd/root.go | 1 + config/config.go | 2 +- internal/logger/logger.go | 2 +- server/main.go | 12 +++++++----- 4 files changed, 10 insertions(+), 7 deletions(-) diff --git a/cmd/root.go b/cmd/root.go index f532869c3..0a70e1fb0 100644 --- a/cmd/root.go +++ b/cmd/root.go @@ -41,6 +41,7 @@ var rootCmd = &cobra.Command{ Short: "an in-memory database;", Run: func(cmd *cobra.Command, args []string) { config.Init(cmd.Flags()) + server.Init() server.Start() }, } diff --git a/config/config.go b/config/config.go index dfed80386..11409af2e 100644 --- a/config/config.go +++ b/config/config.go @@ -27,7 +27,7 @@ type DiceDBConfig struct { MaxClients int `mapstructure:"max-clients" default:"20000" description:"the maximum number of clients to accept"` NumShards int `mapstructure:"num-shards" default:"-1" description:"number of shards to create. defaults to number of cores"` - EnableWAL bool `mapstructure:"enable-wal" default:"true" description:"enable write-ahead logging"` + EnableWAL bool `mapstructure:"enable-wal" default:"false" description:"enable write-ahead logging"` WALEngine string `mapstructure:"wal-engine" default:"aof" description:"wal engine to use, values: sqlite, aof"` WALDir string `mapstructure:"wal-dir" default:"/var/log/dicedb" description:"the directory to store WAL segments"` WALMode string `mapstructure:"wal-mode" default:"buffered" description:"wal mode to use, values: buffered, unbuffered"` diff --git a/internal/logger/logger.go b/internal/logger/logger.go index 5897f3b94..fae548188 100644 --- a/internal/logger/logger.go +++ b/internal/logger/logger.go @@ -27,7 +27,7 @@ func New() *slog.Logger { zerolog.TimeFieldFormat = zerolog.TimeFormatUnix zerologLogger := zerolog.New(zerolog.ConsoleWriter{ Out: os.Stderr, - NoColor: true, + NoColor: false, TimeFormat: time.RFC3339, }).Level(toZerologLevel(getSLogLevel())).With().Timestamp().Logger() return slog.New(newZerologHandler(&zerologLogger)) diff --git a/server/main.go b/server/main.go index b82ea81c8..edac4ada7 100644 --- a/server/main.go +++ b/server/main.go @@ -19,10 +19,10 @@ import ( "time" "github.com/dicedb/dice/internal/auth" + "github.com/dicedb/dice/internal/logger" "github.com/dicedb/dice/internal/server/httpws" "github.com/dicedb/dice/internal/commandhandler" - "github.com/dicedb/dice/internal/logger" "github.com/dicedb/dice/internal/server/abstractserver" "github.com/dicedb/dice/internal/wal" "github.com/dicedb/dice/internal/watchmanager" @@ -60,6 +60,10 @@ func printBanner() { `) } +func Init() { + slog.SetDefault(logger.New()) +} + func Start() { printBanner() printConfiguration() @@ -71,8 +75,6 @@ func Start() { _, _ = auth.UserStore.Add(config.Config.Username) } - slog.SetDefault(logger.New()) - ctx, cancel := context.WithCancel(context.Background()) // Handle SIGTERM and SIGINT @@ -113,9 +115,9 @@ func Start() { slog.Debug("WAL initialization complete") if config.Config.EnableWAL { - slog.Info("restoring database from WAL") + slog.Info("initializing wal restoration. this may take a while...") wal.ReplayWAL(wl) - slog.Info("database restored from WAL") + slog.Info("in-memory state restored. process complete") } }