From 910a2057f8eaa17cac742e1fe1d53eac93451b28 Mon Sep 17 00:00:00 2001
From: ThreadDao <yufen.zong@zilliz.com>
Date: Thu, 21 Nov 2024 19:15:28 +0800
Subject: [PATCH] test: update case for resource group feature

Signed-off-by: ThreadDao <yufen.zong@zilliz.com>
---
 test/base/milvus_client.go            |  12 +-
 test/common/response_check.go         |  59 +++-
 test/common/utils.go                  |   2 +-
 test/testcases/configure_test.go      |   6 +-
 test/testcases/delete_test.go         |   2 +-
 test/testcases/insert_test.go         |   8 +-
 test/testcases/main_test.go           |   4 +-
 test/testcases/option.go              |   2 +-
 test/testcases/resource_group_test.go | 370 +++++++++++++++++++++-----
 test/testcases/search_test.go         |  10 +-
 10 files changed, 390 insertions(+), 85 deletions(-)

diff --git a/test/base/milvus_client.go b/test/base/milvus_client.go
index 4dda3f34..564a81f8 100644
--- a/test/base/milvus_client.go
+++ b/test/base/milvus_client.go
@@ -601,9 +601,9 @@ func (mc *MilvusClient) ListResourceGroups(ctx context.Context) ([]string, error
 }
 
 // CreateResourceGroup
-func (mc *MilvusClient) CreateResourceGroup(ctx context.Context, rgName string) error {
+func (mc *MilvusClient) CreateResourceGroup(ctx context.Context, rgName string, opts ...client.CreateResourceGroupOption) error {
 	preRequest("CreateResourceGroup", ctx, rgName)
-	err := mc.mClient.CreateResourceGroup(ctx, rgName)
+	err := mc.mClient.CreateResourceGroup(ctx, rgName, opts...)
 	postResponse("CreateResourceGroup", err)
 	return err
 }
@@ -624,6 +624,14 @@ func (mc *MilvusClient) DropResourceGroup(ctx context.Context, rgName string) er
 	return err
 }
 
+// UpdateResourceGroups drop resource group
+func (mc *MilvusClient) UpdateResourceGroups(ctx context.Context, opt ...client.UpdateResourceGroupsOption) error {
+	preRequest("UpdateResourceGroups", ctx, opt)
+	err := mc.mClient.UpdateResourceGroups(ctx, opt...)
+	postResponse("UpdateResourceGroups", err)
+	return err
+}
+
 // TransferNode transfer node
 func (mc *MilvusClient) TransferNode(ctx context.Context, sourceRg, targetRg string, nodesNum int32) error {
 	preRequest("TransferNode", ctx, sourceRg, targetRg, nodesNum)
diff --git a/test/common/response_check.go b/test/common/response_check.go
index 351cf799..c54c2f00 100644
--- a/test/common/response_check.go
+++ b/test/common/response_check.go
@@ -106,14 +106,14 @@ func CheckNotContainsCollection(t *testing.T, collections []*entity.Collection,
 }
 
 // CheckInsertResult check insert result, ids len (insert count), ids data (pks, but no auto ids)
-func CheckInsertResult(t *testing.T, actualIds entity.Column, expIds entity.Column) {
-	require.Equal(t, actualIds.Len(), expIds.Len())
-	switch expIds.Type() {
+func CheckInsertResult(t *testing.T, actualIDs entity.Column, expIDs entity.Column) {
+	require.Equal(t, actualIDs.Len(), expIDs.Len())
+	switch expIDs.Type() {
 	// pk field support int64 and varchar type
 	case entity.FieldTypeInt64:
-		require.ElementsMatch(t, actualIds.(*entity.ColumnInt64).Data(), expIds.(*entity.ColumnInt64).Data())
+		require.ElementsMatch(t, actualIDs.(*entity.ColumnInt64).Data(), expIDs.(*entity.ColumnInt64).Data())
 	case entity.FieldTypeVarChar:
-		require.ElementsMatch(t, actualIds.(*entity.ColumnVarChar).Data(), expIds.(*entity.ColumnVarChar).Data())
+		require.ElementsMatch(t, actualIDs.(*entity.ColumnVarChar).Data(), expIDs.(*entity.ColumnVarChar).Data())
 	default:
 		log.Printf("The primary field only support type: [%v, %v]", entity.FieldTypeInt64, entity.FieldTypeVarChar)
 	}
@@ -322,8 +322,55 @@ func CheckPersistentSegments(t *testing.T, actualSegments []*entity.Segment, exp
 	require.Equal(t, actualNb, expNb)
 }
 
+func CheckTransfer(t *testing.T, actualRgs []*entity.ResourceGroupTransfer, expRgs []*entity.ResourceGroupTransfer) {
+	if len(expRgs) == 0 {
+		require.Len(t, actualRgs, 0)
+	} else {
+		_expRgs := make([]string, 0, len(expRgs))
+		_actualRgs := make([]string, 0, len(actualRgs))
+		for _, rg := range expRgs {
+			_expRgs = append(_expRgs, rg.ResourceGroup)
+		}
+		for _, rg := range actualRgs {
+			_actualRgs = append(_actualRgs, rg.ResourceGroup)
+		}
+		require.ElementsMatch(t, _expRgs, _actualRgs)
+	}
+
+}
+
+func checkResourceGroupConfig(t *testing.T, actualConfig *entity.ResourceGroupConfig, expConfig *entity.ResourceGroupConfig) {
+	if expConfig.Requests != nil {
+		require.EqualValuesf(t, expConfig.Requests.NodeNum, actualConfig.Requests.NodeNum, "Requests.NodeNum mismatch")
+	}
+
+	if expConfig.Limits != nil {
+		require.EqualValuesf(t, expConfig.Limits.NodeNum, actualConfig.Limits.NodeNum, "Limits.NodeNum mismatch")
+	}
+
+	if expConfig.TransferFrom != nil {
+		CheckTransfer(t, expConfig.TransferFrom, actualConfig.TransferFrom)
+	}
+
+	if expConfig.TransferTo != nil {
+		CheckTransfer(t, expConfig.TransferTo, actualConfig.TransferTo)
+	}
+}
+
 func CheckResourceGroup(t *testing.T, actualRg *entity.ResourceGroup, expRg *entity.ResourceGroup) {
-	require.EqualValues(t, expRg, actualRg)
+	require.EqualValues(t, expRg.Name, actualRg.Name, "ResourceGroup name mismatch")
+	require.EqualValues(t, expRg.Capacity, actualRg.Capacity, "ResourceGroup capacity mismatch")
+	if expRg.AvailableNodesNumber >= 0 {
+		require.EqualValues(t, expRg.AvailableNodesNumber, len(actualRg.Nodes), "AvailableNodesNumber mismatch")
+	}
+
+	if expRg.Config != nil {
+		checkResourceGroupConfig(t, actualRg.Config, expRg.Config)
+	}
+
+	if expRg.Nodes != nil {
+		require.EqualValues(t, len(expRg.Nodes), len(actualRg.Nodes), "Nodes count mismatch")
+	}
 }
 
 func getDbNames(dbs []entity.Database) []string {
diff --git a/test/common/utils.go b/test/common/utils.go
index 3c94e4f6..047a4ee7 100644
--- a/test/common/utils.go
+++ b/test/common/utils.go
@@ -1421,7 +1421,7 @@ var InvalidExpressions = []InvalidExprStruct{
 	// {Expr: fmt.Sprintf("json_contains_aby (%s['list'], 2)", DefaultJSONFieldName), ErrNil: false, ErrMsg: "invalid expression: json_contains_aby"},
 	// {Expr: fmt.Sprintf("json_contains_aby (%s['list'], 2)", DefaultJSONFieldName), ErrNil: false, ErrMsg: "invalid expression: json_contains_aby"},
 	{Expr: fmt.Sprintf("%s[-1] > %d", DefaultInt8ArrayField, TestCapacity), ErrNil: false, ErrMsg: "cannot parse expression"}, //  array[-1] >
-	{Expr: fmt.Sprintf(fmt.Sprintf("%s[-1] > 1", DefaultJSONFieldName)), ErrNil: false, ErrMsg: "invalid expression"},         //  json[-1] >
+	{Expr: fmt.Sprintf("%s[-1] > 1", DefaultJSONFieldName), ErrNil: false, ErrMsg: "invalid expression"},                      //  json[-1] >
 }
 
 func GenBatchSizes(limit int, batch int) []int {
diff --git a/test/testcases/configure_test.go b/test/testcases/configure_test.go
index 79ace802..2782b4a6 100644
--- a/test/testcases/configure_test.go
+++ b/test/testcases/configure_test.go
@@ -65,14 +65,14 @@ func TestCompactAfterDelete(t *testing.T) {
 	errFlush := mc.Flush(ctx, collName, false)
 	common.CheckErr(t, errFlush, true)
 
-	segments, _ := mc.GetPersistentSegmentInfo(ctx, collName)
-	require.Len(t, segments, 1)
-
 	// index
 	indexHnsw, _ := entity.NewIndexHNSW(entity.L2, 8, 96)
 	err := mc.CreateIndex(ctx, collName, common.DefaultFloatVecFieldName, indexHnsw, false)
 	common.CheckErr(t, err, true)
 
+	segments, _ := mc.GetPersistentSegmentInfo(ctx, collName)
+	require.Len(t, segments, 1)
+
 	// delete half ids
 	deleteIds := ids.Slice(0, common.DefaultNb/2)
 	errDelete := mc.DeleteByPks(ctx, collName, "", deleteIds)
diff --git a/test/testcases/delete_test.go b/test/testcases/delete_test.go
index 740c6c8a..bdb908a0 100644
--- a/test/testcases/delete_test.go
+++ b/test/testcases/delete_test.go
@@ -186,7 +186,7 @@ func TestDeletePartitionIdsNotMatch(t *testing.T) {
 	partitionName, vecColumnDefault, _ := createInsertTwoPartitions(ctx, t, mc, collName, common.DefaultNb)
 
 	// delete [0:10) from new partition -> delete nothing
-	deleteIds := vecColumnDefault.IdsColumn.Slice(0, 10)
+	deleteIds := vecColumnDefault.IDsColumn.Slice(0, 10)
 	errDelete := mc.DeleteByPks(ctx, collName, partitionName, deleteIds)
 	common.CheckErr(t, errDelete, true)
 
diff --git a/test/testcases/insert_test.go b/test/testcases/insert_test.go
index 00f44b07..c7c787ea 100644
--- a/test/testcases/insert_test.go
+++ b/test/testcases/insert_test.go
@@ -439,9 +439,9 @@ func TestInsertRepeatedDynamicField(t *testing.T) {
 	}
 
 	type dataRows struct {
-		Int64    int64       `json:"int64" milvus:"name:int64"`
-		Float    float32     `json:"float" milvus:"name:float"`
-		FloatVec []float32   `json:"floatVec" milvus:"name:floatVec"`
+		Int64    int64     `json:"int64" milvus:"name:int64"`
+		Float    float32   `json:"float" milvus:"name:float"`
+		FloatVec []float32 `json:"floatVec" milvus:"name:floatVec"`
 		DynamicRows
 	}
 	rows := make([]interface{}, 0, 100)
@@ -450,7 +450,7 @@ func TestInsertRepeatedDynamicField(t *testing.T) {
 			Int64:    int64(i),
 			Float:    float32(i),
 			FloatVec: common.GenFloatVector(common.DefaultDim),
-			DynamicRows:  DynamicRows{
+			DynamicRows: DynamicRows{
 				Float: 0.0,
 			},
 		}
diff --git a/test/testcases/main_test.go b/test/testcases/main_test.go
index bfcd69c0..d9519e22 100644
--- a/test/testcases/main_test.go
+++ b/test/testcases/main_test.go
@@ -435,13 +435,13 @@ func createInsertTwoPartitions(ctx context.Context, t *testing.T, mc *base.Milvu
 
 	defaultPartition = HelpPartitionColumns{
 		PartitionName: common.DefaultPartition,
-		IdsColumn:     idsDefault,
+		IDsColumn:     idsDefault,
 		VectorColumn:  vecColumn,
 	}
 
 	newPartition = HelpPartitionColumns{
 		PartitionName: partitionName,
-		IdsColumn:     idsPartition,
+		IDsColumn:     idsPartition,
 		VectorColumn:  vecColumnNew,
 	}
 
diff --git a/test/testcases/option.go b/test/testcases/option.go
index 6a88973d..1109c19a 100644
--- a/test/testcases/option.go
+++ b/test/testcases/option.go
@@ -7,7 +7,7 @@ import (
 
 type HelpPartitionColumns struct {
 	PartitionName string
-	IdsColumn     entity.Column
+	IDsColumn     entity.Column
 	VectorColumn  entity.Column
 }
 
diff --git a/test/testcases/resource_group_test.go b/test/testcases/resource_group_test.go
index d69aa9c1..82671536 100644
--- a/test/testcases/resource_group_test.go
+++ b/test/testcases/resource_group_test.go
@@ -4,7 +4,9 @@ package testcases
 
 import (
 	"context"
+	"fmt"
 	"log"
+	"math/rand"
 	"testing"
 	"time"
 
@@ -24,6 +26,7 @@ const (
 )
 
 func resetRgs(t *testing.T, ctx context.Context, mc *base.MilvusClient) {
+	t.Helper()
 	// release and drop all collections
 	collections, _ := mc.ListCollections(ctx)
 	for _, coll := range collections {
@@ -35,19 +38,20 @@ func resetRgs(t *testing.T, ctx context.Context, mc *base.MilvusClient) {
 	// reset resource groups
 	rgs, errList := mc.ListResourceGroups(ctx)
 	common.CheckErr(t, errList, true)
+	for _, rg := range rgs {
+		//if rg != common.DefaultRgName {
+		// describe rg and get available node
+		err := mc.UpdateResourceGroups(ctx, client.WithUpdateResourceGroupConfig(rg, &entity.ResourceGroupConfig{
+			Requests:     &entity.ResourceGroupLimit{NodeNum: 0},
+			Limits:       &entity.ResourceGroupLimit{NodeNum: 0},
+			TransferFrom: []*entity.ResourceGroupTransfer{},
+			TransferTo:   []*entity.ResourceGroupTransfer{},
+		}))
+		common.CheckErr(t, err, true)
+		//}
+	}
 	for _, rg := range rgs {
 		if rg != common.DefaultRgName {
-			// describe rg and get available node
-			rgInfo, errDescribe := mc.DescribeResourceGroup(ctx, rg)
-			common.CheckErr(t, errDescribe, true)
-
-			// transfer available nodes into default rg
-			if rgInfo.AvailableNodesNumber > 0 {
-				errTransfer := mc.TransferNode(ctx, rg, common.DefaultRgName, rgInfo.AvailableNodesNumber)
-				common.CheckErr(t, errTransfer, true)
-			}
-
-			// drop rg
 			errDrop := mc.DropResourceGroup(ctx, rg)
 			common.CheckErr(t, errDrop, true)
 		}
@@ -58,6 +62,28 @@ func resetRgs(t *testing.T, ctx context.Context, mc *base.MilvusClient) {
 	require.Len(t, rgs2, 1)
 }
 
+// No need for now
+func onCheckRgAvailable(t *testing.T, ctx context.Context, mc *base.MilvusClient, rgName string, expAvailable int32) {
+	for {
+		select {
+		case <-ctx.Done():
+			require.FailNowf(t, "Available nodes cannot reach within timeout", "expAvailableNodeNum: %d", expAvailable)
+		default:
+			rg, _ := mc.DescribeResourceGroup(ctx, rgName)
+			if int32(len(rg.Nodes)) == expAvailable {
+				return
+			}
+			time.Sleep(time.Second * 2)
+		}
+	}
+}
+
+func checkResourceGroup(t *testing.T, ctx context.Context, mc *base.MilvusClient, expRg *entity.ResourceGroup) {
+	actualRg, err := mc.DescribeResourceGroup(ctx, expRg.Name)
+	common.CheckErr(t, err, true)
+	common.CheckResourceGroup(t, actualRg, expRg)
+}
+
 // test rg default: list rg, create rg, describe rg, transfer node
 func TestRgDefault(t *testing.T) {
 	ctx := createContext(t, time.Second*common.DefaultTimeout)
@@ -65,19 +91,23 @@ func TestRgDefault(t *testing.T) {
 	mc := createMilvusClient(ctx, t)
 	resetRgs(t, ctx, mc)
 
-	// describe default rg and check default rg info
-	defaultRg, errDescribe := mc.DescribeResourceGroup(ctx, common.DefaultRgName)
-	common.CheckErr(t, errDescribe, true)
+	// describe default rg and check default rg info: Limits: 1000000, Nodes: all
 	expDefaultRg := &entity.ResourceGroup{
 		Name:                 common.DefaultRgName,
 		Capacity:             common.DefaultRgCapacity,
 		AvailableNodesNumber: configQnNodes,
+		Config: &entity.ResourceGroupConfig{
+			Limits: &entity.ResourceGroupLimit{NodeNum: 0},
+		},
 	}
-	common.CheckResourceGroup(t, defaultRg, expDefaultRg)
+	checkResourceGroup(t, ctx, mc, expDefaultRg)
 
 	// create new rg
 	rgName := common.GenRandomString(6)
-	errCreate := mc.CreateResourceGroup(ctx, rgName)
+	errCreate := mc.CreateResourceGroup(ctx, rgName, client.WithCreateResourceGroupConfig(&entity.ResourceGroupConfig{
+		Requests: &entity.ResourceGroupLimit{NodeNum: newRgNode},
+		Limits:   &entity.ResourceGroupLimit{NodeNum: newRgNode},
+	}))
 	common.CheckErr(t, errCreate, true)
 
 	// list rgs
@@ -86,42 +116,109 @@ func TestRgDefault(t *testing.T) {
 	require.ElementsMatch(t, rgs, []string{common.DefaultRgName, rgName})
 
 	// describe new rg and check new rg info
-	myRg, errDescribe2 := mc.DescribeResourceGroup(ctx, rgName)
-	common.CheckErr(t, errDescribe2, true)
 	expRg := &entity.ResourceGroup{
 		Name:                 rgName,
-		Capacity:             0,
-		AvailableNodesNumber: 0,
+		Capacity:             newRgNode,
+		AvailableNodesNumber: newRgNode,
+		Config: &entity.ResourceGroupConfig{
+			Requests: &entity.ResourceGroupLimit{NodeNum: newRgNode},
+			Limits:   &entity.ResourceGroupLimit{NodeNum: newRgNode},
+		},
 	}
-	common.CheckResourceGroup(t, myRg, expRg)
+	checkResourceGroup(t, ctx, mc, expRg)
 
-	// transfer node from default rg into new rg
-	errTransfer := mc.TransferNode(ctx, common.DefaultRgName, rgName, newRgNode)
-	common.CheckErr(t, errTransfer, true)
+	// update resource group
+	errUpdate := mc.UpdateResourceGroups(ctx, client.WithUpdateResourceGroupConfig(rgName, &entity.ResourceGroupConfig{
+		Requests: &entity.ResourceGroupLimit{NodeNum: configQnNodes},
+		Limits:   &entity.ResourceGroupLimit{NodeNum: configQnNodes},
+	}))
+	common.CheckErr(t, errUpdate, true)
 
 	// check rg info after transfer nodes
-	myRg2, _ := mc.DescribeResourceGroup(ctx, rgName)
 	transferRg := &entity.ResourceGroup{
 		Name:                 rgName,
-		Capacity:             newRgNode,
-		AvailableNodesNumber: newRgNode,
+		Capacity:             configQnNodes,
+		AvailableNodesNumber: configQnNodes,
+		Config: &entity.ResourceGroupConfig{
+			Requests: &entity.ResourceGroupLimit{NodeNum: configQnNodes},
+		},
 	}
-	common.CheckResourceGroup(t, myRg2, transferRg)
+	checkResourceGroup(t, ctx, mc, transferRg)
 
-	// check default rg info
-	defaultRg2, _ := mc.DescribeResourceGroup(ctx, common.DefaultRgName)
+	// check default rg info: 0 Nodes
 	expDefaultRg2 := &entity.ResourceGroup{
 		Name:                 common.DefaultRgName,
 		Capacity:             common.DefaultRgCapacity,
-		AvailableNodesNumber: configQnNodes - newRgNode,
+		AvailableNodesNumber: 0,
+		Config: &entity.ResourceGroupConfig{
+			Limits: &entity.ResourceGroupLimit{NodeNum: 0},
+		},
 	}
-	common.CheckResourceGroup(t, defaultRg2, expDefaultRg2)
+	checkResourceGroup(t, ctx, mc, expDefaultRg2)
 
 	// try to drop default rg
 	errDropDefault := mc.DropResourceGroup(ctx, common.DefaultRgName)
 	common.CheckErr(t, errDropDefault, false, "default resource group is not deletable")
 }
 
+func TestCreateRgWithTransfer(t *testing.T) {
+	ctx := createContext(t, time.Second*common.DefaultTimeout)
+	// connect
+	mc := createMilvusClient(ctx, t)
+	resetRgs(t, ctx, mc)
+
+	// create rg0 with requests=2, limits=3, total 4 nodes
+	rg0 := common.GenRandomString(6)
+	rg0Limits := newRgNode + 1
+	errCreate := mc.CreateResourceGroup(ctx, rg0, client.WithCreateResourceGroupConfig(&entity.ResourceGroupConfig{
+		Requests: &entity.ResourceGroupLimit{NodeNum: newRgNode},
+		Limits:   &entity.ResourceGroupLimit{NodeNum: rg0Limits},
+	}))
+	common.CheckErr(t, errCreate, true)
+
+	// check rg0 available node: 3, default available node: 1
+	actualRg0, _ := mc.DescribeResourceGroup(ctx, rg0)
+	require.Lenf(t, actualRg0.Nodes, int(rg0Limits), fmt.Sprintf("expected %s has %d available nodes", rg0, rg0Limits))
+	actualRgDef, _ := mc.DescribeResourceGroup(ctx, common.DefaultRgName)
+	require.Lenf(t, actualRgDef.Nodes, int(configQnNodes-rg0Limits), fmt.Sprintf("expected %s has %d available nodes", common.DefaultRgName, int(configQnNodes-rg0Limits)))
+
+	// create rg1 with TransferFrom & TransferTo & requests=3, limits=4
+	rg1 := common.GenRandomString(6)
+	rg1Requests := newRgNode + 1
+	errCreate = mc.CreateResourceGroup(ctx, rg1, client.WithCreateResourceGroupConfig(&entity.ResourceGroupConfig{
+		Requests: &entity.ResourceGroupLimit{NodeNum: rg1Requests},
+		Limits:   &entity.ResourceGroupLimit{NodeNum: configQnNodes},
+		TransferFrom: []*entity.ResourceGroupTransfer{
+			{ResourceGroup: rg0},
+		},
+		TransferTo: []*entity.ResourceGroupTransfer{
+			{ResourceGroup: common.DefaultRgName},
+		},
+	}))
+
+	// verify available nodes: rg0 + rg1 = configQnNodes = 4
+	time.Sleep(time.Duration(rand.Intn(6)) * time.Second)
+	actualRg0, _ = mc.DescribeResourceGroup(ctx, rg0)
+	actualRg1, _ := mc.DescribeResourceGroup(ctx, rg1)
+	require.EqualValuesf(t, configQnNodes, len(actualRg0.Nodes)+len(actualRg1.Nodes), fmt.Sprintf("Expected the total available nodes of %s and %s is %d ", rg0, rg1, configQnNodes))
+	expDefaultRg1 := &entity.ResourceGroup{
+		Name:                 rg1,
+		Capacity:             rg1Requests,
+		AvailableNodesNumber: -1, // not check
+		Config: &entity.ResourceGroupConfig{
+			Requests: &entity.ResourceGroupLimit{NodeNum: rg1Requests},
+			Limits:   &entity.ResourceGroupLimit{NodeNum: configQnNodes},
+			TransferFrom: []*entity.ResourceGroupTransfer{
+				{ResourceGroup: rg0},
+			},
+			TransferTo: []*entity.ResourceGroupTransfer{
+				{ResourceGroup: common.DefaultRgName},
+			},
+		},
+	}
+	checkResourceGroup(t, ctx, mc, expDefaultRg1)
+}
+
 // test create rg with invalid name
 func TestCreateRgInvalidNames(t *testing.T) {
 	t.Parallel()
@@ -149,6 +246,159 @@ func TestCreateRgInvalidNames(t *testing.T) {
 	}
 }
 
+func TestCreateUpdateRgWithNotExistTransfer(t *testing.T) {
+	ctx := createContext(t, time.Second*common.DefaultTimeout)
+	// connect
+	mc := createMilvusClient(ctx, t)
+	resetRgs(t, ctx, mc)
+
+	// create/update rg with not existed TransferFrom rg
+	rgName := common.GenRandomString(6)
+	errCreate := mc.CreateResourceGroup(ctx, rgName, client.WithCreateResourceGroupConfig(&entity.ResourceGroupConfig{
+		Requests: &entity.ResourceGroupLimit{NodeNum: newRgNode},
+		Limits:   &entity.ResourceGroupLimit{NodeNum: newRgNode},
+		TransferFrom: []*entity.ResourceGroupTransfer{
+			{ResourceGroup: "aaa"},
+		},
+	}))
+	common.CheckErr(t, errCreate, false, "resource group in `TransferFrom` aaa not exist")
+	errUpdate := mc.UpdateResourceGroups(ctx, client.WithUpdateResourceGroupConfig(rgName, &entity.ResourceGroupConfig{
+		Requests: &entity.ResourceGroupLimit{NodeNum: newRgNode},
+		Limits:   &entity.ResourceGroupLimit{NodeNum: newRgNode},
+		TransferFrom: []*entity.ResourceGroupTransfer{
+			{ResourceGroup: "aaa"},
+		},
+	}))
+	common.CheckErr(t, errUpdate, false, "resource group not found")
+
+	// create/update rg with not existed TransferTo rg
+	errCreate = mc.CreateResourceGroup(ctx, rgName, client.WithCreateResourceGroupConfig(&entity.ResourceGroupConfig{
+		Requests: &entity.ResourceGroupLimit{NodeNum: newRgNode},
+		Limits:   &entity.ResourceGroupLimit{NodeNum: newRgNode},
+		TransferTo: []*entity.ResourceGroupTransfer{
+			{ResourceGroup: "aaa"},
+		},
+	}))
+	common.CheckErr(t, errCreate, false, "resource group in `TransferTo` aaa not exist")
+	errUpdate = mc.UpdateResourceGroups(ctx, client.WithUpdateResourceGroupConfig(rgName, &entity.ResourceGroupConfig{
+		Requests: &entity.ResourceGroupLimit{NodeNum: newRgNode},
+		Limits:   &entity.ResourceGroupLimit{NodeNum: newRgNode},
+		TransferTo: []*entity.ResourceGroupTransfer{
+			{ResourceGroup: "aaa"},
+		},
+	}))
+	common.CheckErr(t, errUpdate, false, "resource group not found")
+}
+
+func TestCreateRgWithRequestsLimits(t *testing.T) {
+	type requestsLimits struct {
+		requests  int32
+		limits    int32
+		available int32
+		errMsg    string
+	}
+	reqAndLimits := []requestsLimits{
+		{requests: 0, limits: 0, available: 0},
+		{requests: -1, limits: 0, errMsg: "node num in `requests` or `limits` should not less than 0"},
+		{requests: 0, limits: -2, errMsg: "node num in `requests` or `limits` should not less than 0"},
+		{requests: 10, limits: 1, errMsg: "limits node num should not less than requests node num"},
+		{requests: 2, limits: 3, available: 3},
+		{requests: configQnNodes * 2, limits: configQnNodes * 3, available: configQnNodes},
+		{requests: configQnNodes, limits: configQnNodes, available: configQnNodes},
+	}
+	// connect
+	ctx := createContext(t, time.Second*20)
+	mc := createMilvusClient(ctx, t)
+
+	rgName := common.GenRandomString(6)
+	err := mc.CreateResourceGroup(ctx, rgName, client.WithCreateResourceGroupConfig(&entity.ResourceGroupConfig{
+		Limits: &entity.ResourceGroupLimit{NodeNum: newRgNode},
+	}))
+	common.CheckErr(t, err, false, "requests or limits is required")
+
+	for _, rl := range reqAndLimits {
+		log.Println(rl)
+		rgName := common.GenRandomString(6)
+		resetRgs(t, ctx, mc)
+		errCreate := mc.CreateResourceGroup(ctx, rgName, client.WithCreateResourceGroupConfig(&entity.ResourceGroupConfig{
+			Requests: &entity.ResourceGroupLimit{NodeNum: rl.requests},
+			Limits:   &entity.ResourceGroupLimit{NodeNum: rl.limits},
+		}))
+		if rl.errMsg != "" {
+			common.CheckErr(t, errCreate, false, rl.errMsg)
+		} else {
+			expDefaultRg := &entity.ResourceGroup{
+				Name:                 rgName,
+				Capacity:             rl.requests,
+				AvailableNodesNumber: rl.available,
+				Config: &entity.ResourceGroupConfig{
+					Requests: &entity.ResourceGroupLimit{NodeNum: rl.requests},
+					Limits:   &entity.ResourceGroupLimit{NodeNum: rl.limits},
+				},
+			}
+			checkResourceGroup(t, ctx, mc, expDefaultRg)
+			// check available node
+			//onDescribeRg(t, ctx, mc, rgName, rl.available)
+		}
+	}
+}
+
+func TestUpdateRgWithRequestsLimits(t *testing.T) {
+	type requestsLimits struct {
+		requests  int32
+		limits    int32
+		available int32
+		errMsg    string
+	}
+	reqAndLimits := []requestsLimits{
+		{requests: 0, limits: 0, available: 0},
+		{requests: -1, limits: 0, errMsg: "node num in `requests` or `limits` should not less than 0"},
+		{requests: 0, limits: -2, errMsg: "node num in `requests` or `limits` should not less than 0"},
+		{requests: 10, limits: 1, errMsg: "limits node num should not less than requests node num"},
+		{requests: 2, limits: 3, available: 3},
+		{requests: configQnNodes * 2, limits: configQnNodes * 3, available: configQnNodes},
+		{requests: configQnNodes, limits: configQnNodes, available: configQnNodes},
+	}
+	// connect
+	ctx := createContext(t, time.Second*common.DefaultTimeout)
+	mc := createMilvusClient(ctx, t)
+	resetRgs(t, ctx, mc)
+
+	rgName := common.GenRandomString(6)
+	err := mc.CreateResourceGroup(ctx, rgName, client.WithCreateResourceGroupConfig(&entity.ResourceGroupConfig{
+		Requests: &entity.ResourceGroupLimit{NodeNum: newRgNode},
+		Limits:   &entity.ResourceGroupLimit{NodeNum: newRgNode},
+	}))
+	err = mc.UpdateResourceGroups(ctx, client.WithUpdateResourceGroupConfig(rgName, &entity.ResourceGroupConfig{
+		Requests: &entity.ResourceGroupLimit{NodeNum: newRgNode},
+	}))
+	common.CheckErr(t, err, false, "requests or limits is required")
+
+	for _, rl := range reqAndLimits {
+		log.Println(rl)
+		errCreate := mc.UpdateResourceGroups(ctx, client.WithUpdateResourceGroupConfig(rgName, &entity.ResourceGroupConfig{
+			Requests: &entity.ResourceGroupLimit{NodeNum: rl.requests},
+			Limits:   &entity.ResourceGroupLimit{NodeNum: rl.limits},
+		}))
+		if rl.errMsg != "" {
+			common.CheckErr(t, errCreate, false, rl.errMsg)
+		} else {
+			expDefaultRg := &entity.ResourceGroup{
+				Name:                 rgName,
+				Capacity:             rl.requests,
+				AvailableNodesNumber: rl.available,
+				Config: &entity.ResourceGroupConfig{
+					Requests: &entity.ResourceGroupLimit{NodeNum: rl.requests},
+					Limits:   &entity.ResourceGroupLimit{NodeNum: rl.limits},
+				},
+			}
+			checkResourceGroup(t, ctx, mc, expDefaultRg)
+			// check available node
+			//onDescribeRg(t, ctx, mc, rgName, rl.available)
+		}
+	}
+}
+
 // describe rg with not existed name
 func TestDescribeRgNotExisted(t *testing.T) {
 	ctx := createContext(t, time.Second*common.DefaultTimeout)
@@ -178,13 +428,12 @@ func TestDropRgNonEmpty(t *testing.T) {
 
 	// create new rg
 	rgName := common.GenRandomString(6)
-	errCreate := mc.CreateResourceGroup(ctx, rgName)
+	errCreate := mc.CreateResourceGroup(ctx, rgName, client.WithCreateResourceGroupConfig(&entity.ResourceGroupConfig{
+		Requests: &entity.ResourceGroupLimit{NodeNum: newRgNode},
+		Limits:   &entity.ResourceGroupLimit{NodeNum: newRgNode},
+	}))
 	common.CheckErr(t, errCreate, true)
 
-	// transfer node
-	errTransfer := mc.TransferNode(ctx, common.DefaultRgName, rgName, 1)
-	common.CheckErr(t, errTransfer, true)
-
 	// drop rg and rg available node is not 0
 	errDrop := mc.DropResourceGroup(ctx, rgName)
 	common.CheckErr(t, errDrop, false, "resource group's limits node num is not 0")
@@ -199,22 +448,19 @@ func TestDropEmptyRg(t *testing.T) {
 
 	// create new rg
 	rgName := common.GenRandomString(6)
-	errCreate := mc.CreateResourceGroup(ctx, rgName)
+	errCreate := mc.CreateResourceGroup(ctx, rgName, client.WithCreateResourceGroupConfig(&entity.ResourceGroupConfig{
+		Requests: &entity.ResourceGroupLimit{NodeNum: configQnNodes},
+		Limits:   &entity.ResourceGroupLimit{NodeNum: configQnNodes},
+	}))
 	common.CheckErr(t, errCreate, true)
 
-	// transfer node
-	errTransfer := mc.TransferNode(ctx, common.DefaultRgName, rgName, configQnNodes)
-	common.CheckErr(t, errTransfer, true)
-
 	// describe default rg
-	defaultRg, errDescribe := mc.DescribeResourceGroup(ctx, common.DefaultRgName)
-	common.CheckErr(t, errDescribe, true)
 	transferRg := &entity.ResourceGroup{
 		Name:                 common.DefaultRgName,
 		Capacity:             common.DefaultRgCapacity,
 		AvailableNodesNumber: 0,
 	}
-	common.CheckResourceGroup(t, defaultRg, transferRg)
+	checkResourceGroup(t, ctx, mc, transferRg)
 
 	// drop empty default rg
 	errDrop := mc.DropResourceGroup(ctx, common.DefaultRgName)
@@ -303,55 +549,59 @@ func TestTransferReplicas(t *testing.T) {
 	mc := createMilvusClient(ctx, t)
 	resetRgs(t, ctx, mc)
 
-	// create new rg
+	// create new rg with requests 2
 	rgName := common.GenRandomString(6)
-	errCreate := mc.CreateResourceGroup(ctx, rgName)
+	errCreate := mc.CreateResourceGroup(ctx, rgName, client.WithCreateResourceGroupConfig(&entity.ResourceGroupConfig{
+		Requests: &entity.ResourceGroupLimit{NodeNum: newRgNode},
+		Limits:   &entity.ResourceGroupLimit{NodeNum: newRgNode},
+	}))
 	common.CheckErr(t, errCreate, true)
 
-	// transfer nodes into new rg
-	errTransfer := mc.TransferNode(ctx, common.DefaultRgName, rgName, newRgNode)
-	common.CheckErr(t, errTransfer, true)
-
 	// load two replicas
 	collName, _ := createCollectionWithDataIndex(ctx, t, mc, true, true)
 
 	// load two replicas into default rg
 	errLoad := mc.LoadCollection(ctx, collName, false, client.WithReplicaNumber(2), client.WithResourceGroups([]string{common.DefaultRgName}))
 	common.CheckErr(t, errLoad, true)
-	defaultRg, errDescribe := mc.DescribeResourceGroup(ctx, common.DefaultRgName)
-	common.CheckErr(t, errDescribe, true)
 	transferRg := &entity.ResourceGroup{
 		Name:                 common.DefaultRgName,
 		Capacity:             common.DefaultRgCapacity,
 		AvailableNodesNumber: configQnNodes - newRgNode,
 		LoadedReplica:        map[string]int32{collName: 2},
+		Config: &entity.ResourceGroupConfig{
+			Limits: &entity.ResourceGroupLimit{NodeNum: 0},
+		},
 	}
-	common.CheckResourceGroup(t, defaultRg, transferRg)
+	checkResourceGroup(t, ctx, mc, transferRg)
 
 	// transfer replica into new rg
 	errReplica := mc.TransferReplica(ctx, common.DefaultRgName, rgName, collName, 2)
 	common.CheckErr(t, errReplica, true)
 
 	// check default rg
-	defaultRg2, _ := mc.DescribeResourceGroup(ctx, common.DefaultRgName)
 	transferRg2 := &entity.ResourceGroup{
 		Name:                 common.DefaultRgName,
 		Capacity:             common.DefaultRgCapacity,
 		AvailableNodesNumber: configQnNodes - newRgNode,
 		IncomingNodeNum:      map[string]int32{collName: 2},
+		Config: &entity.ResourceGroupConfig{
+			Limits: &entity.ResourceGroupLimit{NodeNum: 0},
+		},
 	}
-	common.CheckResourceGroup(t, defaultRg2, transferRg2)
+	checkResourceGroup(t, ctx, mc, transferRg2)
 
 	// check new rg after transfer replica
-	newRg, _ := mc.DescribeResourceGroup(ctx, rgName)
 	expRg := &entity.ResourceGroup{
 		Name:                 rgName,
 		Capacity:             newRgNode,
 		AvailableNodesNumber: newRgNode,
 		LoadedReplica:        map[string]int32{collName: 2},
 		OutgoingNodeNum:      map[string]int32{collName: 2},
+		Config: &entity.ResourceGroupConfig{
+			Limits: &entity.ResourceGroupLimit{NodeNum: newRgNode},
+		},
 	}
-	common.CheckResourceGroup(t, newRg, expRg)
+	checkResourceGroup(t, ctx, mc, expRg)
 
 	// drop new rg that loaded collection
 	err := mc.DropResourceGroup(ctx, rgName)
@@ -389,7 +639,7 @@ func TestTransferReplicaNotExistedCollection(t *testing.T) {
 
 	// transfer replica
 	errTransfer := mc.TransferReplica(ctx, common.DefaultRgName, rgName, common.GenRandomString(3), 1)
-	common.CheckErr(t, errTransfer, false, "can't find collection")
+	common.CheckErr(t, errTransfer, false, "collection not found")
 }
 
 // test transfer replicas with invalid replica number
@@ -465,5 +715,5 @@ func TestTransferReplicaRgNotExisted(t *testing.T) {
 		AvailableNodesNumber: newRgNode,
 		IncomingNodeNum:      map[string]int32{collName: newRgNode},
 	}
-	common.CheckResourceGroup(t, newRg, expRg)
+	checkResourceGroup(t, ctx, mc, expRg)
 }
diff --git a/test/testcases/search_test.go b/test/testcases/search_test.go
index 7190f709..bf86babe 100644
--- a/test/testcases/search_test.go
+++ b/test/testcases/search_test.go
@@ -286,8 +286,8 @@ func TestSearchEmptyPartitions(t *testing.T) {
 		nq0IDs := searchResult[0].IDs.(*entity.ColumnInt64).Data()
 		nq1IDs := searchResult[1].IDs.(*entity.ColumnInt64).Data()
 		common.CheckSearchResult(t, searchResult, 2, common.DefaultTopK)
-		require.Contains(t, nq0IDs, vecColumnDefault.IdsColumn.(*entity.ColumnInt64).Data()[0])
-		require.Contains(t, nq1IDs, vecColumnPartition.IdsColumn.(*entity.ColumnInt64).Data()[0])
+		require.Contains(t, nq0IDs, vecColumnDefault.IDsColumn.(*entity.ColumnInt64).Data()[0])
+		require.Contains(t, nq1IDs, vecColumnPartition.IDsColumn.(*entity.ColumnInt64).Data()[0])
 	}
 }
 
@@ -361,7 +361,7 @@ func TestSearchPartitions(t *testing.T) {
 	)
 	// check search result contains search vector, which from all partitions
 	common.CheckSearchResult(t, searchSingleRes, 2, common.DefaultTopK)
-	require.Contains(t, searchSingleRes[0].IDs.(*entity.ColumnInt64).Data(), vecColumnDefault.IdsColumn.(*entity.ColumnInt64).Data()[0])
+	require.Contains(t, searchSingleRes[0].IDs.(*entity.ColumnInt64).Data(), vecColumnDefault.IDsColumn.(*entity.ColumnInt64).Data()[0])
 
 	// search multi partitions
 	searchMultiRes, _ := mc.Search(
@@ -379,8 +379,8 @@ func TestSearchPartitions(t *testing.T) {
 		sp,
 	)
 	common.CheckSearchResult(t, searchMultiRes, 2, common.DefaultTopK)
-	require.Contains(t, searchMultiRes[0].IDs.(*entity.ColumnInt64).Data(), vecColumnDefault.IdsColumn.(*entity.ColumnInt64).Data()[0])
-	require.Contains(t, searchMultiRes[1].IDs.(*entity.ColumnInt64).Data(), vecColumnPartition.IdsColumn.(*entity.ColumnInt64).Data()[0])
+	require.Contains(t, searchMultiRes[0].IDs.(*entity.ColumnInt64).Data(), vecColumnDefault.IDsColumn.(*entity.ColumnInt64).Data()[0])
+	require.Contains(t, searchMultiRes[1].IDs.(*entity.ColumnInt64).Data(), vecColumnPartition.IDsColumn.(*entity.ColumnInt64).Data()[0])
 }
 
 // test search empty output fields []string{} -> [], []string{""}