-
Notifications
You must be signed in to change notification settings - Fork 6
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
e2e, persistent-ip: Add primary-UDN tests #72
Merged
Merged
Changes from all commits
Commits
Show all changes
7 commits
Select commit
Hold shift + click to select a range
35c8704
cluster-up: Add nse flag to OVNK kind script
RamLavi da4651d
tests/e2e, persistentips: Add role to GenerateLayer2WithSubnetNAD
RamLavi b119fa5
tests/e2e, persistentip: Move GetIPs to general function
RamLavi f007a5e
tests/e2e, persistentip: Move generateVMI to general function
RamLavi 9505683
tests/e2e, persistentip: Expand test matric using DescribeTableSubtree
RamLavi 8c58962
tests/e2e, persistentip: Introduce primary-UDN helper functions
RamLavi a873dfb
tests/e2e, persistentip: Add primary UDN test case
RamLavi File filter
Filter by extension
Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -40,7 +40,23 @@ import ( | |
"sigs.k8s.io/controller-runtime/pkg/client" | ||
) | ||
|
||
var _ = Describe("Persistent IPs", func() { | ||
const ( | ||
secondaryLogicalNetworkInterfaceName = "multus" | ||
nadName = "l2-net-attach-def" | ||
) | ||
|
||
const ( | ||
rolePrimary = "primary" | ||
roleSecondary = "secondary" | ||
) | ||
|
||
type testParams struct { | ||
role string | ||
ipsFrom func(vmi *kubevirtv1.VirtualMachineInstance) ([]string, error) | ||
qinqon marked this conversation as resolved.
Show resolved
Hide resolved
|
||
vmi func(namespace string) *kubevirtv1.VirtualMachineInstance | ||
} | ||
|
||
var _ = DescribeTableSubtree("Persistent IPs", func(params testParams) { | ||
var failureCount int = 0 | ||
JustAfterEach(func() { | ||
if CurrentSpecReport().Failed() { | ||
|
@@ -57,21 +73,21 @@ var _ = Describe("Persistent IPs", func() { | |
|
||
When("network attachment definition created with allowPersistentIPs=true", func() { | ||
var ( | ||
td testenv.TestData | ||
networkInterfaceName = "multus" | ||
vm *kubevirtv1.VirtualMachine | ||
vmi *kubevirtv1.VirtualMachineInstance | ||
nad *nadv1.NetworkAttachmentDefinition | ||
td testenv.TestData | ||
vm *kubevirtv1.VirtualMachine | ||
vmi *kubevirtv1.VirtualMachineInstance | ||
nad *nadv1.NetworkAttachmentDefinition | ||
) | ||
|
||
BeforeEach(func() { | ||
td = testenv.GenerateTestData() | ||
td.SetUp() | ||
DeferCleanup(func() { | ||
td.TearDown() | ||
}) | ||
|
||
nad = testenv.GenerateLayer2WithSubnetNAD(td.Namespace) | ||
vmi = testenv.GenerateAlpineWithMultusVMI(td.Namespace, networkInterfaceName, nad.Name) | ||
nad = testenv.GenerateLayer2WithSubnetNAD(nadName, td.Namespace, params.role) | ||
vmi = params.vmi(td.Namespace) | ||
vm = testenv.NewVirtualMachine(vmi, testenv.WithRunning()) | ||
|
||
By("Create NetworkAttachmentDefinition") | ||
|
@@ -94,14 +110,14 @@ var _ = Describe("Persistent IPs", func() { | |
WithPolling(time.Second). | ||
ShouldNot(BeEmpty()) | ||
|
||
Expect(testenv.Client.Get(context.Background(), client.ObjectKeyFromObject(vmi), vmi)).To(Succeed()) | ||
|
||
Expect(vmi.Status.Interfaces).NotTo(BeEmpty()) | ||
Expect(vmi.Status.Interfaces[0].IPs).NotTo(BeEmpty()) | ||
Expect(testenv.ThisVMI(vmi)()).Should(testenv.MatchIPs(params.ipsFrom, Not(BeEmpty()))) | ||
}) | ||
|
||
It("should keep ips after live migration", func() { | ||
vmiIPsBeforeMigration := vmi.Status.Interfaces[0].IPs | ||
Expect(testenv.Client.Get(context.Background(), client.ObjectKeyFromObject(vmi), vmi)).To(Succeed()) | ||
vmiIPsBeforeMigration, err := params.ipsFrom(vmi) | ||
Expect(err).NotTo(HaveOccurred()) | ||
Expect(vmiIPsBeforeMigration).NotTo(BeEmpty()) | ||
|
||
testenv.LiveMigrateVirtualMachine(td.Namespace, vm.Name) | ||
testenv.CheckLiveMigrationSucceeded(td.Namespace, vm.Name) | ||
|
@@ -112,8 +128,7 @@ var _ = Describe("Persistent IPs", func() { | |
WithTimeout(5 * time.Minute). | ||
Should(testenv.ContainConditionVMIReady()) | ||
|
||
Expect(testenv.ThisVMI(vmi)()).Should(testenv.MatchIPsAtInterfaceByName(networkInterfaceName, ConsistOf(vmiIPsBeforeMigration))) | ||
|
||
Expect(testenv.ThisVMI(vmi)()).Should(testenv.MatchIPs(params.ipsFrom, ConsistOf(vmiIPsBeforeMigration))) | ||
}) | ||
|
||
It("should garbage collect IPAMClaims after VM deletion", func() { | ||
|
@@ -171,7 +186,10 @@ var _ = Describe("Persistent IPs", func() { | |
}) | ||
|
||
It("should keep ips after restart", func() { | ||
vmiIPsBeforeRestart := vmi.Status.Interfaces[0].IPs | ||
Expect(testenv.Client.Get(context.Background(), client.ObjectKeyFromObject(vmi), vmi)).To(Succeed()) | ||
vmiIPsBeforeRestart, err := params.ipsFrom(vmi) | ||
Expect(err).NotTo(HaveOccurred()) | ||
Expect(vmiIPsBeforeRestart).NotTo(BeEmpty()) | ||
vmiUUIDBeforeRestart := vmi.UID | ||
|
||
By("Re-starting the VM") | ||
|
@@ -190,7 +208,7 @@ var _ = Describe("Persistent IPs", func() { | |
WithTimeout(5 * time.Minute). | ||
Should(testenv.ContainConditionVMIReady()) | ||
|
||
Expect(testenv.ThisVMI(vmi)()).Should(testenv.MatchIPsAtInterfaceByName(networkInterfaceName, ConsistOf(vmiIPsBeforeRestart))) | ||
Expect(testenv.ThisVMI(vmi)()).Should(testenv.MatchIPs(params.ipsFrom, ConsistOf(vmiIPsBeforeRestart))) | ||
}) | ||
}) | ||
|
||
|
@@ -217,9 +235,9 @@ var _ = Describe("Persistent IPs", func() { | |
ShouldNot(BeEmpty()) | ||
|
||
Expect(testenv.Client.Get(context.Background(), client.ObjectKeyFromObject(vmi), vmi)).To(Succeed()) | ||
|
||
Expect(vmi.Status.Interfaces).NotTo(BeEmpty()) | ||
Expect(vmi.Status.Interfaces[0].IPs).NotTo(BeEmpty()) | ||
ips, err := params.ipsFrom(vmi) | ||
Expect(err).NotTo(HaveOccurred()) | ||
Expect(ips).NotTo(BeEmpty()) | ||
}) | ||
|
||
It("should garbage collect IPAMClaims after VM foreground deletion, only after VMI is gone", func() { | ||
|
@@ -260,20 +278,19 @@ var _ = Describe("Persistent IPs", func() { | |
WithPolling(time.Second). | ||
ShouldNot(BeEmpty()) | ||
|
||
Expect(testenv.Client.Get(context.Background(), client.ObjectKeyFromObject(vmi), vmi)).To(Succeed()) | ||
|
||
Expect(vmi.Status.Interfaces).NotTo(BeEmpty()) | ||
Expect(vmi.Status.Interfaces[0].IPs).NotTo(BeEmpty()) | ||
Expect(testenv.ThisVMI(vmi)()).Should(testenv.MatchIPs(params.ipsFrom, Not(BeEmpty()))) | ||
}) | ||
|
||
It("should keep ips after live migration", func() { | ||
vmiIPsBeforeMigration := vmi.Status.Interfaces[0].IPs | ||
Expect(testenv.Client.Get(context.Background(), client.ObjectKeyFromObject(vmi), vmi)).To(Succeed()) | ||
vmiIPsBeforeMigration, err := params.ipsFrom(vmi) | ||
Expect(err).NotTo(HaveOccurred()) | ||
Expect(vmiIPsBeforeMigration).NotTo(BeEmpty()) | ||
|
||
testenv.LiveMigrateVirtualMachine(td.Namespace, vmi.Name) | ||
testenv.CheckLiveMigrationSucceeded(td.Namespace, vmi.Name) | ||
|
||
Expect(testenv.ThisVMI(vmi)()).Should(testenv.MatchIPsAtInterfaceByName(networkInterfaceName, ConsistOf(vmiIPsBeforeMigration))) | ||
|
||
Expect(testenv.ThisVMI(vmi)()).Should(testenv.MatchIPs(params.ipsFrom, ConsistOf(vmiIPsBeforeMigration))) | ||
}) | ||
|
||
It("should garbage collect IPAMClaims after VMI deletion", func() { | ||
|
@@ -294,7 +311,20 @@ var _ = Describe("Persistent IPs", func() { | |
}) | ||
|
||
}) | ||
}) | ||
}, | ||
Entry("secondary interfaces", | ||
testParams{ | ||
role: roleSecondary, | ||
ipsFrom: secondaryNetworkVMIStatusIPs, | ||
vmi: vmiWithMultus, | ||
}), | ||
Entry("primary UDN", | ||
testParams{ | ||
role: rolePrimary, | ||
ipsFrom: defaultNetworkStatusAnnotationIPs, | ||
vmi: vmiWithPasst, | ||
}), | ||
) | ||
|
||
func foregroundDeleteOptions() *client.DeleteOptions { | ||
foreground := metav1.DeletePropagationForeground | ||
|
@@ -311,3 +341,67 @@ func removeFinalizersPatch() ([]byte, error) { | |
} | ||
return json.Marshal(patch) | ||
} | ||
|
||
func secondaryNetworkVMIStatusIPs(vmi *kubevirtv1.VirtualMachineInstance) ([]string, error) { | ||
return testenv.GetIPsFromVMIStatus(vmi, secondaryLogicalNetworkInterfaceName), nil | ||
} | ||
|
||
func defaultNetworkStatusAnnotationIPs(vmi *kubevirtv1.VirtualMachineInstance) ([]string, error) { | ||
defNetworkStatus, err := testenv.DefaultNetworkStatus(vmi) | ||
if err != nil { | ||
return nil, err | ||
} | ||
|
||
return defNetworkStatus.IPs, nil | ||
} | ||
|
||
func vmiWithMultus(namespace string) *kubevirtv1.VirtualMachineInstance { | ||
interfaceName := secondaryLogicalNetworkInterfaceName | ||
return testenv.NewVirtualMachineInstance( | ||
namespace, | ||
testenv.WithMemory("128Mi"), | ||
testenv.WithInterface(kubevirtv1.Interface{ | ||
Name: interfaceName, | ||
InterfaceBindingMethod: kubevirtv1.InterfaceBindingMethod{ | ||
Bridge: &kubevirtv1.InterfaceBridge{}, | ||
}, | ||
}), | ||
testenv.WithNetwork(kubevirtv1.Network{ | ||
|
||
Name: interfaceName, | ||
NetworkSource: kubevirtv1.NetworkSource{ | ||
Multus: &kubevirtv1.MultusNetwork{ | ||
NetworkName: nadName, | ||
}, | ||
}, | ||
}), | ||
) | ||
} | ||
|
||
func vmiWithPasst(namespace string) *kubevirtv1.VirtualMachineInstance { | ||
const ( | ||
interfaceName = "passtnet" | ||
cloudInitNetworkData = ` | ||
version: 2 | ||
ethernets: | ||
eth0: | ||
dhcp4: true` | ||
Comment on lines
+385
to
+388
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. btw this is not required as far as i remember for our machines right ? |
||
) | ||
return testenv.NewVirtualMachineInstance( | ||
namespace, | ||
testenv.WithMemory("2048Mi"), | ||
testenv.WithInterface(kubevirtv1.Interface{ | ||
Name: interfaceName, | ||
Binding: &kubevirtv1.PluginBinding{ | ||
Name: "passt", | ||
}, | ||
}), | ||
testenv.WithNetwork(kubevirtv1.Network{ | ||
Name: interfaceName, | ||
NetworkSource: kubevirtv1.NetworkSource{ | ||
Pod: &kubevirtv1.PodNetwork{}, | ||
}, | ||
}), | ||
testenv.WithCloudInitNoCloudVolume(cloudInitNetworkData), | ||
) | ||
} |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Oops, something went wrong.
Oops, something went wrong.
Add this suggestion to a batch that can be applied as a single commit.
This suggestion is invalid because no changes were made to the code.
Suggestions cannot be applied while the pull request is closed.
Suggestions cannot be applied while viewing a subset of changes.
Only one suggestion per line can be applied in a batch.
Add this suggestion to a batch that can be applied as a single commit.
Applying suggestions on deleted lines is not supported.
You must change the existing code in this line in order to create a valid suggestion.
Outdated suggestions cannot be applied.
This suggestion has been applied or marked resolved.
Suggestions cannot be applied from pending reviews.
Suggestions cannot be applied on multi-line comments.
Suggestions cannot be applied while the pull request is queued to merge.
Suggestion cannot be applied right now. Please check back later.
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
do we want this on all lanes, or we want to add an additional lane, and test it there ?
I'm inclined to say we want the latter.
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
hmm.. regardless of the question whether we want to run the tests in a parallel lane, I don't think we should only enable it when we want to test it.
It's a feature that is planned to be enabled by default, so I don't see a good reason why not to add it for all the cluster lanes.
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
i also dont see why to add another lane tbh, we should not over test
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
OK, let's roll with it.