diff --git a/pkg/service/restore/batch.go b/pkg/service/restore/batch.go index 50646e846..85bd5bb29 100644 --- a/pkg/service/restore/batch.go +++ b/pkg/service/restore/batch.go @@ -8,6 +8,7 @@ import ( "sync" "github.com/pkg/errors" + "github.com/scylladb/go-set/strset" . "github.com/scylladb/scylla-manager/v3/pkg/service/backup/backupspec" ) @@ -57,7 +58,7 @@ type batchDispatcher struct { hostShardCnt map[string]uint } -func newBatchDispatcher(workload Workload, batchSize int, hostShardCnt map[string]uint, locationHosts map[Location][]string) *batchDispatcher { +func newBatchDispatcher(workload Workload, batchSize int, hostShardCnt map[string]uint, locationHosts map[Location][]string, hostDCs map[string][]string) *batchDispatcher { sortWorkload(workload) var shards uint for _, sh := range hostShardCnt { @@ -70,7 +71,7 @@ func newBatchDispatcher(workload Workload, batchSize int, hostShardCnt map[strin mu: sync.Mutex{}, wait: make(chan struct{}), workload: workload, - workloadProgress: newWorkloadProgress(workload, locationHosts), + workloadProgress: newWorkloadProgress(workload, locationHosts, hostDCs), batchSize: batchSize, expectedShardWorkload: workload.TotalSize / int64(shards), hostShardCnt: hostShardCnt, @@ -106,7 +107,7 @@ type remoteSSTableDirProgress struct { RemainingSSTables []RemoteSSTable } -func newWorkloadProgress(workload Workload, locationHosts map[Location][]string) workloadProgress { +func newWorkloadProgress(workload Workload, locationHosts map[Location][]string, hostDCs map[string][]string) workloadProgress { dcBytes := make(map[string]int64) locationDC := make(map[string][]string) p := make([]remoteSSTableDirProgress, len(workload.RemoteDir)) @@ -121,7 +122,9 @@ func newWorkloadProgress(workload Workload, locationHosts map[Location][]string) hostDCAccess := make(map[string][]string) for loc, hosts := range locationHosts { for _, h := range hosts { - hostDCAccess[h] = append(hostDCAccess[h], locationDC[loc.StringWithoutDC()]...) + dcsInLoc := locationDC[loc.StringWithoutDC()] + hostAllDCs := hostDCs[h] + hostDCAccess[h] = append(hostDCAccess[h], strset.Intersection(strset.New(dcsInLoc...), strset.New(hostAllDCs...)).List()...) } } return workloadProgress{ @@ -201,8 +204,8 @@ func (bd *batchDispatcher) ValidateAllDispatched() error { for i, rdp := range bd.workloadProgress.remoteDir { if rdp.RemainingSize != 0 || len(rdp.RemainingSSTables) != 0 { rdw := bd.workload.RemoteDir[i] - return errors.Errorf("failed to restore sstables from location %s table %s.%s (%d bytes). See logs for more info", - rdw.Location, rdw.Keyspace, rdw.Table, rdw.Size) + return errors.Errorf("failed to restore sstables from location %s dc %s table %s.%s (%d bytes). See logs for more info", + rdw.Location, rdw.DC, rdw.Keyspace, rdw.Table, rdw.Size) } } for dc, bytes := range bd.workloadProgress.dcBytesToBeRestored { diff --git a/pkg/service/restore/batch_test.go b/pkg/service/restore/batch_test.go index 9f206716e..a79c58b4c 100644 --- a/pkg/service/restore/batch_test.go +++ b/pkg/service/restore/batch_test.go @@ -5,6 +5,8 @@ package restore import ( "testing" + "github.com/google/go-cmp/cmp" + "github.com/scylladb/scylla-manager/v3/pkg/service/backup/backupspec" ) @@ -114,7 +116,13 @@ func TestBatchDispatcher(t *testing.T) { "h3": 3, } - bd := newBatchDispatcher(workload, 1, hostToShard, locationHosts) + hostDCs := map[string][]string{ + "h1": {"dc1", "dc2"}, + "h2": {"dc1", "dc2"}, + "h3": {"dc3"}, + } + + bd := newBatchDispatcher(workload, 1, hostToShard, locationHosts, hostDCs) scenario := []struct { host string @@ -166,3 +174,162 @@ func TestBatchDispatcher(t *testing.T) { t.Fatalf("Expected sstables to be batched: %s", err) } } + +func TestNewWorkloadProgress(t *testing.T) { + testCases := []struct { + name string + + workload Workload + locationHosts map[backupspec.Location][]string + hostDCs map[string][]string + + expected map[string][]string + }{ + { + name: "one location with one DC", + workload: generateWorkload(t, []string{""}, map[string][]string{"": {"dc1"}}), + locationHosts: map[backupspec.Location][]string{ + {}: {"host1", "host2"}, + }, + hostDCs: map[string][]string{ + "host1": {"dc1"}, + "host2": {"dc1"}, + }, + + expected: map[string][]string{ + "host1": {"dc1"}, + "host2": {"dc1"}, + }, + }, + { + name: "one location with two DC's", + workload: generateWorkload(t, []string{""}, map[string][]string{"": {"dc1", "dc2"}}), + locationHosts: map[backupspec.Location][]string{ + {}: {"host1", "host2"}, + }, + hostDCs: map[string][]string{ + "host1": {"dc1"}, + "host2": {"dc2"}, + }, + + expected: map[string][]string{ + "host1": {"dc1"}, + "host2": {"dc2"}, + }, + }, + { + name: "one location with two DC's, more nodes", + workload: generateWorkload(t, []string{""}, map[string][]string{"": {"dc1", "dc2"}}), + locationHosts: map[backupspec.Location][]string{ + {}: {"host1", "host2", "host3", "host4"}, + }, + hostDCs: map[string][]string{ + "host1": {"dc1"}, + "host2": {"dc1"}, + "host3": {"dc2"}, + "host4": {"dc2"}, + }, + + expected: map[string][]string{ + "host1": {"dc1"}, + "host2": {"dc1"}, + "host3": {"dc2"}, + "host4": {"dc2"}, + }, + }, + { + name: "two locations with one DC each", + workload: generateWorkload(t, + []string{"location1", "location2"}, + map[string][]string{"location1": {"dc1"}, "location2": {"dc2"}}, + ), + locationHosts: map[backupspec.Location][]string{ + {Path: "location1"}: {"host1", "host2"}, + {Path: "location2"}: {"host1", "host2"}, + }, + hostDCs: map[string][]string{ + "host1": {"dc1"}, + "host2": {"dc2"}, + }, + + expected: map[string][]string{ + "host1": {"dc1"}, + "host2": {"dc2"}, + }, + }, + { + name: "two locations with one DC each, but some hosts doesn't have access", + workload: generateWorkload(t, + []string{"location1", "location2"}, + map[string][]string{"location1": {"dc1"}, "location2": {"dc2"}}, + ), + locationHosts: map[backupspec.Location][]string{ + {Path: "location1"}: {"host1", "host3", "host4"}, + {Path: "location2"}: {"host1", "host2", "host4"}, + }, + hostDCs: map[string][]string{ + "host1": {"dc1"}, + "host2": {"dc1"}, + "host3": {"dc2"}, + "host4": {"dc2"}, + }, + + expected: map[string][]string{ + "host1": {"dc1"}, + "host2": nil, + "host3": nil, + "host4": {"dc2"}, + }, + }, + { + name: "two locations with one DC each, but hosts maps to all dcs", + workload: generateWorkload(t, + []string{"location1", "location2"}, + map[string][]string{"location1": {"dc1"}, "location2": {"dc2"}}, + ), + locationHosts: map[backupspec.Location][]string{ + {Path: "location1"}: {"host1", "host2"}, + {Path: "location2"}: {"host1", "host2"}, + }, + hostDCs: map[string][]string{ + "host1": {"dc1", "dc2"}, + "host2": {"dc1", "dc2"}, + }, + + expected: map[string][]string{ + "host1": {"dc1", "dc2"}, + "host2": {"dc1", "dc2"}, + }, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + wp := newWorkloadProgress(tc.workload, tc.locationHosts, tc.hostDCs) + if diff := cmp.Diff(wp.hostDCAccess, tc.expected); diff != "" { + t.Fatalf("Actual != Expected: %s", diff) + } + }) + } +} + +func generateWorkload(t *testing.T, locationPaths []string, dcsInLocation map[string][]string) Workload { + t.Helper() + + var remoteDirs []RemoteDirWorkload + for _, path := range locationPaths { + dcs, ok := dcsInLocation[path] + if !ok { + t.Fatalf("each location should have corresponding entry in dcsInLocation map") + } + for _, dc := range dcs { + remoteDirs = append(remoteDirs, RemoteDirWorkload{ + ManifestInfo: &backupspec.ManifestInfo{ + DC: dc, + Location: backupspec.Location{Path: path}, + }, + }) + } + } + return Workload{RemoteDir: remoteDirs} +} diff --git a/pkg/service/restore/index.go b/pkg/service/restore/index.go index 32f91dee9..b80417d9a 100644 --- a/pkg/service/restore/index.go +++ b/pkg/service/restore/index.go @@ -76,6 +76,10 @@ func (w *tablesWorker) indexLocationWorkload(ctx context.Context, location Locat func (w *tablesWorker) createRemoteDirWorkloads(ctx context.Context, location Location) ([]RemoteDirWorkload, error) { var rawWorkload []RemoteDirWorkload err := w.forEachManifest(ctx, location, func(m ManifestInfoWithContent) error { + if slices.Contains(w.target.ignoredSourceDC, m.DC) { + w.logger.Info(ctx, "Ignoring DC", "dc", m.DC, "location", location) + return nil + } return m.ForEachIndexIterWithError(nil, func(fm FilesMeta) error { if !unitsContainTable(w.run.Units, fm.Keyspace, fm.Table) { return nil diff --git a/pkg/service/restore/model.go b/pkg/service/restore/model.go index 2dc654399..84f776f03 100644 --- a/pkg/service/restore/model.go +++ b/pkg/service/restore/model.go @@ -10,6 +10,7 @@ import ( "github.com/gocql/gocql" "github.com/pkg/errors" + "github.com/scylladb/go-set/strset" "github.com/scylladb/gocqlx/v2" "github.com/scylladb/scylla-manager/v3/pkg/scyllaclient" "github.com/scylladb/scylla-manager/v3/pkg/service/repair" @@ -33,9 +34,14 @@ type Target struct { RestoreSchema bool `json:"restore_schema,omitempty"` RestoreTables bool `json:"restore_tables,omitempty"` Continue bool `json:"continue"` + DCMappings DCMappings `json:"dc-mapping"` // Cache for host with access to remote location locationHosts map[Location][]string `json:"-"` + // Cache for host and their DC after applying DCMappings + hostDCs map[string][]string + // Cache for dcs that shouldn't be restored from location + ignoredSourceDC []string } const ( @@ -82,6 +88,16 @@ func (t Target) validateProperties(dcMap map[string][]string) error { if t.RestoreSchema && t.Keyspace != nil { return errors.New("restore schema always restores 'system_schema.*' tables only, no need to specify '--keyspace' flag") } + // Check for duplicates in Location + allLocations := strset.New() + for _, l := range t.Location { + p := l.RemotePath("") + if allLocations.Has(p) { + return errors.Errorf("location %s is specified multiple times", l) + } + allLocations.Add(p) + } + return nil } @@ -289,3 +305,42 @@ type HostInfo struct { Transfers int RateLimit int } + +// DCMappings represents how DCs from the backup cluster are mapped to DCs in the restore cluster. +// For details about how DCs can be mapped refer to --dc-mapping documentation. +type DCMappings []DCMapping + +// DCMapping represent single instance of datacenter mappings. See DCMappings for details. +type DCMapping struct { + Source []string `json:"source"` + IgnoreSource []string `json:"ignore_source"` + Target []string `json:"target"` + IgnoreTarget []string `json:"ignore_target"` +} + +func (mappings DCMappings) calculateMappings() (targetMap map[string][]string, ignoreSource, ignoreTarget []string) { + targetMap = map[string][]string{} + for _, mapping := range mappings { + ignoreSource = append(ignoreSource, mapping.IgnoreSource...) + ignoreTarget = append(ignoreTarget, mapping.IgnoreTarget...) + + if len(mapping.Source) == 0 || len(mapping.Target) == 0 { + continue + } + tIdx, sIdx := 0, 0 + for { + target, source := mapping.Target[tIdx], mapping.Source[sIdx] + targetMap[target] = append(targetMap[target], source) + if tIdx == len(mapping.Target)-1 && sIdx == len(mapping.Source)-1 { + break + } + if tIdx < len(mapping.Target)-1 { + tIdx++ + } + if sIdx < len(mapping.Source)-1 { + sIdx++ + } + } + } + return targetMap, ignoreSource, ignoreTarget +} diff --git a/pkg/service/restore/model_test.go b/pkg/service/restore/model_test.go new file mode 100644 index 000000000..7d304b0cd --- /dev/null +++ b/pkg/service/restore/model_test.go @@ -0,0 +1,167 @@ +// Copyright (C) 2025 ScyllaDB +package restore + +import ( + "maps" + "slices" + "testing" +) + +func TestCalculateMappings(t *testing.T) { + testCases := []struct { + name string + + mappings DCMappings + expectedTargetMap map[string][]string + expectedIgnoreSource []string + expectedIgnoreTarget []string + }{ + { + name: "dc1=>dc2", + mappings: []DCMapping{ + { + Source: []string{"dc1"}, + Target: []string{"dc2"}, + }, + }, + expectedTargetMap: map[string][]string{ + "dc2": {"dc1"}, + }, + }, + { + name: "dc1=>dc1,dc2", + mappings: []DCMapping{ + { + Source: []string{"dc1"}, + Target: []string{"dc1", "dc2"}, + }, + }, + expectedTargetMap: map[string][]string{ + "dc1": {"dc1"}, + "dc2": {"dc1"}, + }, + }, + { + name: "dc1,dc2=>dc3", + mappings: []DCMapping{ + { + Source: []string{"dc1", "dc2"}, + Target: []string{"dc3"}, + }, + }, + expectedTargetMap: map[string][]string{ + "dc3": {"dc1", "dc2"}, + }, + }, + { + name: "dc1,dc2=>dc2", + mappings: []DCMapping{ + { + Source: []string{"dc1", "dc2"}, + Target: []string{"dc2"}, + }, + }, + expectedTargetMap: map[string][]string{ + "dc2": {"dc1", "dc2"}, + }, + }, + { + name: "empty Source", + mappings: []DCMapping{ + { + Source: []string{}, + Target: []string{"dc2"}, + }, + }, + expectedTargetMap: map[string][]string{}, + }, + { + name: "empty Target", + mappings: []DCMapping{ + { + Source: []string{"dc1"}, + Target: []string{}, + }, + }, + expectedTargetMap: map[string][]string{}, + }, + { + name: "dc1,dc2,dc3=>dc1,dc2", + mappings: []DCMapping{ + { + Source: []string{"dc1", "dc2", "dc3"}, + Target: []string{"dc1", "dc2"}, + }, + }, + expectedTargetMap: map[string][]string{ + "dc1": {"dc1"}, + "dc2": {"dc2", "dc3"}, + }, + }, + { + name: "dc1,dc2=>dc1,dc2;dc2=>dc3", + mappings: []DCMapping{ + { + Source: []string{"dc1", "dc2"}, + Target: []string{"dc1", "dc2"}, + }, + { + Source: []string{"dc2"}, + Target: []string{"dc3"}, + }, + }, + expectedTargetMap: map[string][]string{ + "dc1": {"dc1"}, + "dc2": {"dc2"}, + "dc3": {"dc2"}, + }, + }, + { + name: "dc1,!dc2=>dc1,dc2", + mappings: []DCMapping{ + { + Source: []string{"dc1"}, + Target: []string{"dc1", "dc2"}, + IgnoreSource: []string{"dc2"}, + }, + }, + expectedTargetMap: map[string][]string{ + "dc1": {"dc1"}, + "dc2": {"dc1"}, + }, + expectedIgnoreSource: []string{"dc2"}, + }, + { + name: "dc1,dc2=>dc1,!dc2", + mappings: []DCMapping{ + { + Source: []string{"dc1", "dc2"}, + Target: []string{"dc1"}, + IgnoreTarget: []string{"dc2"}, + }, + }, + expectedTargetMap: map[string][]string{ + "dc1": {"dc1", "dc2"}, + }, + expectedIgnoreTarget: []string{"dc2"}, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + targetMap, ignoreSource, ignoreTarget := tc.mappings.calculateMappings() + + if !maps.EqualFunc(targetMap, tc.expectedTargetMap, slices.Equal) { + t.Fatalf("Expected %v, but got %v", tc.expectedTargetMap, targetMap) + } + + if !slices.Equal(ignoreSource, tc.expectedIgnoreSource) { + t.Fatalf("Expected %v, but got %v\n", tc.expectedIgnoreSource, ignoreSource) + } + + if !slices.Equal(ignoreTarget, tc.expectedIgnoreTarget) { + t.Fatalf("Expected %v, but got %v\n", tc.expectedIgnoreTarget, ignoreTarget) + } + }) + } +} diff --git a/pkg/service/restore/tables_worker.go b/pkg/service/restore/tables_worker.go index 91c87c73f..b3af87eb3 100644 --- a/pkg/service/restore/tables_worker.go +++ b/pkg/service/restore/tables_worker.go @@ -213,7 +213,7 @@ func (w *tablesWorker) stageRestoreData(ctx context.Context) error { } } - bd := newBatchDispatcher(workload, w.target.BatchSize, w.hostShardCnt, w.target.locationHosts) + bd := newBatchDispatcher(workload, w.target.BatchSize, w.hostShardCnt, w.target.locationHosts, w.target.hostDCs) f := func(n int) error { host := w.hosts[n] diff --git a/pkg/service/restore/worker.go b/pkg/service/restore/worker.go index c6a42d1b3..d2b50bb7e 100644 --- a/pkg/service/restore/worker.go +++ b/pkg/service/restore/worker.go @@ -6,6 +6,7 @@ import ( "context" "encoding/json" "fmt" + "maps" "math/rand" "path" "regexp" @@ -96,20 +97,19 @@ func (w *worker) initTarget(ctx context.Context, properties json.RawMessage) err return errors.Wrap(err, "get status") } + targetMap, ignoreSource, _ := t.DCMappings.calculateMappings() + t.hostDCs = w.applyDCMapping(status, targetMap) + w.logger.Info(ctx, "Applied dc mappings", "mappings", targetMap, "host_dcs", t.hostDCs) + + t.ignoredSourceDC = ignoreSource + // All nodes should be up during restore if err := w.client.VerifyNodesAvailability(ctx); err != nil { return errors.Wrap(err, "verify all nodes availability") } - allLocations := strset.New() locationHosts := make(map[Location][]string) for _, l := range t.Location { - p := l.RemotePath("") - if allLocations.Has(p) { - return errors.Errorf("location %s is specified multiple times", l) - } - allLocations.Add(p) - var ( remotePath = l.RemotePath("") locationStatus = status @@ -161,12 +161,96 @@ func (w *worker) initTarget(ctx context.Context, properties json.RawMessage) err } else { w.logger.Info(ctx, "Found schema file") } + return nil + } + + // The only way to have a list of dcs from the source cluster is to + // get them from backup locations. + sourceDC, err := w.collectDCFromLocation(ctx, t.locationHosts) + if err != nil { + return err + } + targetDC := slices.Collect(maps.Keys(dcMap)) + if err := w.validateDCMappings(t.DCMappings, sourceDC, targetDC); err != nil { + w.logger.Debug(ctx, + "Validate dc mapping", + "source_dc", sourceDC, + "target_dc", targetDC, + "mappings", t.DCMappings, + ) + return err } w.logger.Info(ctx, "Initialized target", "target", t) return nil } +// applyDCMapping applices given mappings to each node, mapping each host's datacenter in the target cluster to its corresponding +// datacenter(s) in the source cluster. +func (w *worker) applyDCMapping(status scyllaclient.NodeStatusInfoSlice, targetMap map[string][]string) map[string][]string { + hostDCs := map[string][]string{} + for _, n := range status { + hostDCs[n.Addr] = append(hostDCs[n.Addr], targetMap[n.Datacenter]...) + } + return hostDCs +} + +func (w *worker) collectDCFromLocation(ctx context.Context, locationHosts map[Location][]string) ([]string, error) { + sourceDCs := strset.New() + for loc, hosts := range locationHosts { + manifests, err := w.getManifestInfo(ctx, hosts[0], loc) + if err != nil { + return nil, errors.Wrap(err, "getManifestInfo") + } + if len(manifests) == 0 { + return nil, errors.Errorf("no snapshot with tag %s", w.run.SnapshotTag) + } + for _, m := range manifests { + sourceDCs.Add(m.DC) + } + } + return sourceDCs.List(), nil +} + +// validateDCMappings validates that every dc in source cluster has corresponding dc in target cluster +// taking into account dc mappings. +func (w *worker) validateDCMappings(mappings DCMappings, sourceDC, targetDC []string) error { + slices.Sort(sourceDC) + slices.Sort(targetDC) + + if len(mappings) == 0 { + if slices.Equal(sourceDC, targetDC) { + return nil + } + return errors.Errorf("Source DC(%s) != Target DC(%s)", sourceDC, targetDC) + } + targetMap, ignoreSource, ignoreTarget := mappings.calculateMappings() + mappedTargetDCs := strset.New() + // Make sure that each dc from target cluster has corresponding dc (or mapping) in source cluster + for _, dc := range targetDC { + if slices.Contains(ignoreTarget, dc) { + continue + } + if dcs, ok := targetMap[dc]; ok { + mappedTargetDCs.Add(dcs...) + continue + } + return errors.Errorf("Target DC(%s) doesn't have a match in the source cluster: %v", dc, sourceDC) + } + + sourceDCSet := strset.New(sourceDC...) + ignoreSourceSet := strset.New(ignoreSource...) + sourceDCSet = strset.Difference(sourceDCSet, ignoreSourceSet) + + // Check that every dc from source has been mapped to the target dc + if !sourceDCSet.IsEqual(mappedTargetDCs) { + return errors.Errorf( + "Source DCs(%v) doesn't have a match in the target cluster: %v", + strset.Difference(sourceDCSet, mappedTargetDCs), targetDC) + } + return nil +} + func skipRestorePatterns(ctx context.Context, client *scyllaclient.Client, session gocqlx.Session) ([]string, error) { keyspaces, err := client.KeyspacesByType(ctx) if err != nil { @@ -344,6 +428,10 @@ func (w *worker) initUnits(ctx context.Context) error { var foundManifest bool for _, l := range w.target.Location { manifestHandler := func(miwc ManifestInfoWithContent) error { + // For now dc mapping is applied only to restore tables + if w.target.RestoreTables && slices.Contains(w.target.ignoredSourceDC, miwc.DC) { + return nil + } foundManifest = true filesHandler := func(fm FilesMeta) { diff --git a/pkg/service/restore/worker_test.go b/pkg/service/restore/worker_test.go new file mode 100644 index 000000000..4b5bfd1af --- /dev/null +++ b/pkg/service/restore/worker_test.go @@ -0,0 +1,109 @@ +// Copyright (C) 2025 ScyllaDB +package restore + +import "testing" + +func TestValidateDCMappings(t *testing.T) { + testCases := []struct { + name string + sourceDC []string + targetDC []string + dcMappings DCMappings + + expectedErr bool + }{ + { + name: "sourceDC != targetDC, but with full mapping", + sourceDC: []string{"dc1"}, + targetDC: []string{"dc2"}, + dcMappings: []DCMapping{ + {Source: []string{"dc1"}, Target: []string{"dc2"}}, + }, + expectedErr: false, + }, + { + name: "source != target, but will full mapping, two dcs per cluster", + sourceDC: []string{"dc1", "dc2"}, + targetDC: []string{"dc3", "dc4"}, + dcMappings: []DCMapping{ + {Source: []string{"dc1", "dc2"}, Target: []string{"dc3", "dc4"}}, + }, + expectedErr: false, + }, + { + name: "sourceDCs == targetDCs, no mapping", + sourceDC: []string{"dc1"}, + targetDC: []string{"dc1"}, + expectedErr: false, + }, + { + name: "sourceDCs == targetDCs, no mapping, two dcs per cluster", + sourceDC: []string{"dc1", "dc2"}, + targetDC: []string{"dc1", "dc2"}, + expectedErr: false, + }, + { + name: "sourceDCs != targetDCs, no mapping", + sourceDC: []string{"dc1"}, + targetDC: []string{"dc2"}, + expectedErr: true, + }, + { + name: "sourceDCs != targetDCs, but with full mapping", + sourceDC: []string{"dc1", "dc2"}, + targetDC: []string{"dc2"}, + dcMappings: []DCMapping{ + {Source: []string{"dc1", "dc2"}, Target: []string{"dc2"}}, + }, + expectedErr: false, + }, + { + name: "sourceDCs != targetDCs, but with partial mapping", + sourceDC: []string{"dc1", "dc2"}, + targetDC: []string{"dc2"}, + dcMappings: []DCMapping{ + {Source: []string{"dc1"}, Target: []string{"dc2"}}, + }, + expectedErr: true, + }, + { + name: "sourceDCs != targetDCs, with deletion in mapping", + sourceDC: []string{"dc1", "dc2"}, + targetDC: []string{"dc2"}, + dcMappings: []DCMapping{ + { + Source: []string{"dc1"}, + Target: []string{"dc2"}, + IgnoreSource: []string{"dc2"}, + }, + }, + expectedErr: false, + }, + { + name: "sourceDCs != targetDCs, with deletion in mapping", + sourceDC: []string{"dc1"}, + targetDC: []string{"dc1", "dc2"}, + dcMappings: []DCMapping{ + { + Source: []string{"dc1"}, + Target: []string{"dc1"}, + IgnoreTarget: []string{"dc2"}, + }, + }, + expectedErr: false, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + w := &worker{} + err := w.validateDCMappings(tc.dcMappings, tc.sourceDC, tc.targetDC) + if tc.expectedErr && err == nil { + t.Fatalf("Expected err, but got nil") + } + if !tc.expectedErr && err != nil { + t.Fatalf("Unexpected err: %v", err) + } + }) + } +}