From 0cff9b1f1a35acb95f147350d197178e32afd7e6 Mon Sep 17 00:00:00 2001 From: khushboo-rancher Date: Fri, 10 Jan 2025 17:57:46 +0000 Subject: [PATCH] Update testing docs Signed-off-by: khushboo-rancher --- backend/apis/index.html | 17 +- backend/apis/test_addons.html | 76 +- backend/apis/test_hosts.html | 222 +- backend/apis/test_images.html | 285 ++- backend/apis/test_keypairs.html | 134 +- backend/apis/test_networks.html | 135 +- backend/apis/test_settings.html | 206 +- backend/apis/test_support_bundle.html | 124 +- backend/apis/test_vm_templates.html | 135 +- backend/apis/test_vms.html | 50 +- backend/apis/test_volumes.html | 170 +- backend/conftest.html | 458 +++- backend/fixtures/api_client.html | 451 +++- backend/fixtures/base.html | 36 +- backend/fixtures/images.html | 97 +- backend/fixtures/index.html | 17 +- backend/fixtures/networks.html | 58 +- backend/fixtures/rancher_api_client.html | 86 +- backend/fixtures/settings.html | 121 +- backend/fixtures/terraform.html | 471 +++- backend/fixtures/virtualmachines.html | 489 +++- backend/fixtures/volumes.html | 44 +- backend/index.html | 17 +- backend/integrations/index.html | 17 +- .../integrations/test_0_storage_network.html | 161 +- backend/integrations/test_1_images.html | 509 +++- backend/integrations/test_1_volumes.html | 355 ++- backend/integrations/test_3_vm.html | 524 +++- backend/integrations/test_3_vm_functions.html | 2192 ++++++++++++++++- .../test_4_vm_backup_restore.html | 1183 ++++++++- .../test_4_vm_host_powercycle.html | 601 ++++- backend/integrations/test_4_vm_snapshot.html | 678 ++++- backend/integrations/test_4_vm_template.html | 217 +- backend/integrations/test_5_vm_networks.html | 386 ++- .../test_5_vm_networks_interact.html | 1014 +++++++- .../test_9_rancher_integration.html | 1066 +++++++- backend/integrations/test_upgrade.html | 1299 +++++++++- backend/integrations/test_z_terraform.html | 349 ++- .../test_z_terraform_rancher.html | 318 ++- backend/integrations/vmconsole.html | 66 +- index.html | 2 +- integration/modules/skel_skel_spec.html | 6 +- ..._settings_cloud_config_templates_spec.html | 2 +- .../testcases_VM_settings_ssh_keys_spec.html | 4 +- .../testcases_networks_network_spec.html | 8 +- ..._virtualmachines_virtual_machine_spec.html | 4 +- 46 files changed, 14618 insertions(+), 242 deletions(-) diff --git a/backend/apis/index.html b/backend/apis/index.html index 8d573f1c7..7d7d49621 100644 --- a/backend/apis/index.html +++ b/backend/apis/index.html @@ -3,19 +3,30 @@ - + harvester_e2e_tests.apis API documentation - + @@ -106,7 +117,7 @@

Sub-modules

diff --git a/backend/apis/test_addons.html b/backend/apis/test_addons.html index a3445b169..05d3991d3 100644 --- a/backend/apis/test_addons.html +++ b/backend/apis/test_addons.html @@ -3,19 +3,30 @@ - + harvester_e2e_tests.apis.test_addons API documentation - + @@ -39,7 +50,6 @@

Classes

class TestDefaultAddons
-
Expand source code @@ -99,6 +109,7 @@

Classes

f"API Status({code}): {data}" )
+

Class variables

var pytestmark
@@ -112,18 +123,75 @@

Methods

def test_disable(self, api_client, addon, wait_timeout)
+
+ +Expand source code + +
@pytest.mark.dependency(depends=["enable_addon"])
+def test_disable(self, api_client, addon, wait_timeout):
+    code, data = api_client.addons.disable(addon)
+
+    assert 200 == code, (code, data)
+    assert not data.get('spec', {}).get('enabled', True), (code, data)
+
+    endtime = datetime.now() + timedelta(seconds=wait_timeout)
+    while endtime > datetime.now():
+        code, data = api_client.addons.get(addon)
+        if "Disabled" in data.get('status', {}).get('status', ""):
+            break
+        sleep(3)
+    else:
+        raise AssertionError(
+            f"Failed to disable addon {addon} with {wait_timeout} timed out\n"
+            f"API Status({code}): {data}"
+        )
+
def test_enable(self, api_client, wait_timeout, addon)
+
+ +Expand source code + +
@pytest.mark.dependency(name="enable_addon")
+def test_enable(self, api_client, wait_timeout, addon):
+    code, data = api_client.addons.enable(addon)
+
+    assert 200 == code, (code, data)
+    assert data.get('spec', {}).get('enabled', False), (code, data)
+
+    endtime = datetime.now() + timedelta(seconds=wait_timeout)
+    while endtime > datetime.now():
+        code, data = api_client.addons.get(addon)
+        if data.get('status', {}).get('status', "") in ("deployed", "AddonDeploySuccessful"):
+            break
+        sleep(3)
+    else:
+        raise AssertionError(
+            f"Failed to enable addon {addon} with {wait_timeout} timed out\n"
+            f"API Status({code}): {data}"
+        )
+
def test_get(self, api_client, addon)
+
+ +Expand source code + +
def test_get(self, api_client, addon):
+    code, data = api_client.addons.get(addon)
+
+    assert 200 == code, (code, data)
+    assert not data.get('spec', {}).get('enabled', True), (code, data)
+    assert "AddonDisabled" == data.get('status', {}).get('status')
+
@@ -158,7 +226,7 @@

diff --git a/backend/apis/test_hosts.html b/backend/apis/test_hosts.html index 03c3cb2f6..7ab575d88 100644 --- a/backend/apis/test_hosts.html +++ b/backend/apis/test_hosts.html @@ -3,19 +3,30 @@ - + harvester_e2e_tests.apis.test_hosts API documentation - + @@ -37,6 +48,38 @@

Functions

def test_delete_host(api_client, wait_timeout)
+
+ +Expand source code + +
@pytest.mark.dependency(depends=["get_host"], name="delete_host")
+@pytest.mark.p0
+@pytest.mark.hosts
+@pytest.mark.delete_host
+def test_delete_host(api_client, wait_timeout):
+    """
+    Test the hosts are the nodes which make the cluster
+    Covers:
+        hosts-07-Delete the host
+    """
+    _, nodes_info = api_client.hosts.get()
+
+    node = nodes_info['data'][-1]
+
+    delete_resp = api_client.hosts.delete(node['id'])
+
+    endtime = datetime.now() + timedelta(seconds=wait_timeout)
+
+    while endtime > datetime.now():
+        status_code, node_stats = api_client.hosts.get(node['id'])
+        if status_code == 404:
+            break
+        sleep(5)
+    else:
+        raise AssertionError(f"Failed to delete host {node['id']}\n",
+                             f"Delete response: {delete_resp}\n"
+                             f"timeout {wait_timeout} and still got: {status_code}, {node_stats}")
+

Test the hosts are the nodes which make the cluster

Covers

hosts-07-Delete the host

@@ -45,12 +88,89 @@

Covers

def test_get_host(api_client)
+
+ +Expand source code + +
@pytest.mark.dependency(name="get_host")
+@pytest.mark.hosts
+@pytest.mark.p0
+def test_get_host(api_client):
+    # Case 1: Get all nodes
+    status_code, nodes_info = api_client.hosts.get()
+
+    assert status_code == 200, f"Failed to list nodes with error: {nodes_info}"
+    assert len(nodes_info['data']) >= 1, "Incorrect hosts amount"
+
+    # Case 2: Get specific node
+    node_id = nodes_info['data'][-1]['id']
+
+    status_code, node = api_client.hosts.get(node_id)
+
+    assert status_code == 200, f"Failed ot get node {node_id} with error: {node}"
+    assert node_id == node['id'], f"Responsed unexpected Node {node['id']}"
+
def test_maintenance_mode(api_client, wait_timeout)
+
+ +Expand source code + +
@pytest.mark.dependency(depends=["get_host"], name="enable_maintenance_mode")
+@pytest.mark.hosts
+@pytest.mark.p2
+def test_maintenance_mode(api_client, wait_timeout):
+    """
+    Test the hosts are the nodes which make the cluster
+    Covers:
+        hosts-03-Verify Enabling maintenance mode
+        hosts-12-Host with no VMs on it
+    """
+
+    _, nodes_info = api_client.hosts.get()
+
+    # Test on the last node to avoid affect VIP
+    node_id = nodes_info['data'][-1]['id']
+
+    # Case 1: enable
+    status_code, node_stats = api_client.hosts.maintenance_mode(node_id, enable=True)
+    assert 204 == status_code, (status_code, node_stats)
+
+    maintain_stat = "Unknown"
+    endtime = datetime.now() + timedelta(seconds=wait_timeout)
+    while endtime > datetime.now():
+        _, stats = api_client.hosts.get(node_id)
+        if stats['spec'].get('unschedulable'):
+            maintain_stat = stats["metadata"]["annotations"].get("harvesterhci.io/maintain-status")
+            if maintain_stat in ("running", "completed"):
+                break
+        sleep(5)
+    else:
+        raise AssertionError(
+            f"Node({node_id}) not entered maintenance mode after {wait_timeout} secs\n"
+            f"maintain-status: {maintain_stat}\t"
+        )
+
+    # Case 2: disable
+    status_code, node_stats = api_client.hosts.maintenance_mode(node_id, enable=False)
+
+    endtime = datetime.now() + timedelta(seconds=wait_timeout)
+    while endtime > datetime.now():
+        _, stats = api_client.hosts.get(node_id)
+        if ("harvesterhci.io/maintain-status" not in stats["metadata"]["annotations"]
+           and "unschedulable" not in stats["spec"]):
+            break
+        sleep(5)
+    else:
+        raise AssertionError(
+            f"Node({node_id}) not leave maintenance mode after disabled {wait_timeout} secs\n"
+            f'maintain stat:{stats["metadata"]["annotations"]["harvesterhci.io/maintain-status"]}'
+        )
+

Test the hosts are the nodes which make the cluster

Covers

hosts-03-Verify Enabling maintenance mode @@ -60,6 +180,53 @@

Covers

def test_update_node(api_client)
+
+ +Expand source code + +
@pytest.mark.dependency(depends=["get_host"])
+@pytest.mark.hosts
+@pytest.mark.p0
+def test_update_node(api_client):
+    """
+    Test the hosts are the nodes which make the cluster
+    Covers:
+        hosts-04-Edit Config Add description and other details
+    """
+
+    fields = ('field.cattle.io/description', 'harvesterhci.io/host-custom-name')
+
+    _, nodes_info = api_client.hosts.get()
+
+    # There is no different to update any nodes, so we use the last.
+    node = nodes_info['data'][-1]
+    original_annotations = {k: node['metadata']['annotations'].get(k, "")
+                            for k in fields}
+
+    test_annotations = {k: f"Test_update_{k}_{datetime.now()}" for k in fields}
+    test_data = dict(metadata=dict(annotations=test_annotations))
+
+    status_code, node_stats = api_client.hosts.update(node['id'], test_data)
+
+    assert 200 == status_code, (status_code, node_stats)
+
+    not_updated_fields = list()
+    for k, v in test_annotations.items():
+        if node_stats['metadata']['annotations'].get(k) != v:
+            not_updated_fields.append((k, v, node_stats['metadata']['annotations'].get(k)))
+
+    assert len(not_updated_fields) == 0, (
+        f"Fields not fully updated, API return `{status_code}`\n"
+        "\n".join(f"field:{k}, expected: {v}, got {o}" for k, v, o in not_updated_fields)
+    )
+
+    # For teardown
+    sleep(1)  # to prevent update too fast cause code 409 conflict: 'object has been modified'
+    code, data = api_client.hosts.update(node['id'],
+                                         dict(metadata=dict(annotations=original_annotations)))
+
+    assert 200 == code, (code, data)
+

Test the hosts are the nodes which make the cluster

Covers

hosts-04-Edit Config Add description and other details

@@ -68,6 +235,55 @@

Covers

def test_update_using_yaml(api_client)
+
+ +Expand source code + +
@pytest.mark.dependency(depends=["get_host"])
+@pytest.mark.hosts
+@pytest.mark.p2
+def test_update_using_yaml(api_client):
+    """
+    Test the hosts are the nodes which make the cluster
+    Covers:
+        hosts-05-Edit through Yaml
+    """
+
+    fields = ('field.cattle.io/description', 'harvesterhci.io/host-custom-name')
+
+    _, nodes_info = api_client.hosts.get()
+
+    # There is no different to update any nodes, so we use the last.
+    node = nodes_info['data'][-1]
+    original_annotations = {k: node['metadata']['annotations'].get(k, "")
+                            for k in fields}
+
+    test_annotations = {k: f"Test_update_{k}_{datetime.now()}" for k in fields}
+    for k, v in test_annotations.items():
+        node['metadata']['annotations'][k] = v
+
+    api_client.hosts.update(node['id'], yaml.dump(node, sort_keys=False), as_json=False,
+                            headers={"Content-Type": "application/yaml"})
+
+    status_code, node_stats = api_client.hosts.get(node['id'])
+
+    not_updated_fields = list()
+    for k, v in test_annotations.items():
+        if node_stats['metadata']['annotations'].get(k) != v:
+            not_updated_fields.append((k, v, node_stats['metadata']['annotations'].get(k)))
+
+    assert len(not_updated_fields) == 0, (
+        f"Fields not fully updated, API return `{status_code}`\n"
+        "\n".join(f"field:{k}, expected: {v}, got {o}" for k, v, o in not_updated_fields)
+    )
+
+    # For teardown
+    sleep(1)  # to prevent update too fast cause code 409 conflict: 'object has been modified'
+    code, data = api_client.hosts.update(node['id'],
+                                         dict(metadata=dict(annotations=original_annotations)))
+
+    assert 200 == code, (code, data)
+

Test the hosts are the nodes which make the cluster

Covers

hosts-05-Edit through Yaml

@@ -100,7 +316,7 @@

Covers

diff --git a/backend/apis/test_images.html b/backend/apis/test_images.html index b7480c806..0b26c3574 100644 --- a/backend/apis/test_images.html +++ b/backend/apis/test_images.html @@ -3,19 +3,30 @@ - + harvester_e2e_tests.apis.test_images API documentation - + @@ -37,6 +48,36 @@

Functions

def test_create_with_invalid_url(api_client, gen_unique_name, wait_timeout)
+
+ +Expand source code + +
@pytest.mark.dependency(depends=["create_image", "get_image", "delete_image"])
+@pytest.mark.p0
+@pytest.mark.negative
+@pytest.mark.images
+@parametrize_with_cases("gen_unique_name", cases=CasesImages, has_tag='gen-unique-name')
+def test_create_with_invalid_url(api_client, gen_unique_name, wait_timeout):
+    unique_name = gen_unique_name()
+    code, data = api_client.images.create_by_url(unique_name, f"https://{unique_name}.img")
+
+    assert 201 == code, (code, data)
+
+    endtime = datetime.now() + timedelta(seconds=wait_timeout)
+    while endtime > datetime.now():
+        code, data = api_client.images.get(unique_name)
+        image_conds = data.get('status', {}).get('conditions', [])
+        if len(image_conds) > 0 and "Initialized" == image_conds[-1].get("type"):
+            break
+        sleep(3)
+
+    assert len(image_conds) in (1, 2), f"Got unexpected image conditions!\n{data}"
+    assert "Initialized" == image_conds[-1].get("type")
+    assert "False" == image_conds[-1].get("status")
+    assert "no such host" in image_conds[-1].get("message")
+
+    api_client.images.delete(unique_name)
+
@@ -48,7 +89,6 @@

Classes

class CasesImages
-
Expand source code @@ -104,12 +144,31 @@

Classes

current_long = (current*6)[:43] return lambda: current_long
+

Methods

def case_gen_unique_name_long(self, gen_unique_name)
+
+ +Expand source code + +
@case(tags='gen-unique-name')
+def case_gen_unique_name_long(self, gen_unique_name):
+    """generate unique name, but long version
+
+    Args:
+        gen_unique_name (_type_): long gen_unique_name is returned
+
+    Returns:
+        _type_: long gen_unique_name on demand but long
+    """
+    current = datetime.now().strftime("%Hh%Mm%Ss%f-%m-%d")
+    current_long = (current*6)[:43]
+    return lambda: current_long
+

generate unique name, but long version

Args

@@ -126,6 +185,22 @@

Returns

def case_gen_unique_name_regular(self, gen_unique_name)
+
+ +Expand source code + +
@case(tags='gen-unique-name')
+def case_gen_unique_name_regular(self, gen_unique_name):
+    """generate unique name regular
+
+    Args:
+        gen_unique_name (_type_): regular returns default fixture
+
+    Returns:
+        _type_: default setup with gen_unique_name lambda
+    """
+    return gen_unique_name
+

generate unique name regular

Args

@@ -142,6 +217,22 @@

Returns

def case_unique_name_long(self, unique_name)
+
+ +Expand source code + +
@case(tags='unique-name')
+def case_unique_name_long(self, unique_name):
+    """basic unique_name_long
+
+    Args:
+        unique_name (_type_): fixture
+
+    Returns:
+        _type_: default setup with unique_name long
+    """
+    return (unique_name*6)[:43]
+

basic unique_name_long

Args

@@ -158,6 +249,22 @@

Returns

def case_unique_name_regular(self, unique_name)
+
+ +Expand source code + +
@case(tags='unique-name')
+def case_unique_name_regular(self, unique_name):
+    """basic unique_name_regular
+
+    Args:
+        unique_name (_type_): fixture
+
+    Returns:
+        _type_: default setup with unique_name regular
+    """
+    return unique_name
+

basic unique_name_regular

Args

@@ -176,7 +283,6 @@

Returns

class TestImages
-
Expand source code @@ -285,6 +391,7 @@

Returns

f"Still got {code} with {data}" )
+

Class variables

var pytestmark
@@ -298,30 +405,150 @@

Methods

def test_create(self, api_client, unique_name, fake_image_file)
+
+ +Expand source code + +
@pytest.mark.dependency(name="create_image")
+@parametrize_with_cases("unique_name", cases=CasesImages, has_tag='unique-name')
+def test_create(self, api_client, unique_name, fake_image_file):
+    resp = api_client.images.create_by_file(unique_name, fake_image_file)
+
+    assert 200 == resp.status_code, (
+        f"Failed to upload fake image with error:{resp.status_code}, {resp.content}"
+    )
+
def test_create_with_reuse_display_name(self, wait_timeout, api_client, unique_name, fake_image_file)
+
+ +Expand source code + +
@pytest.mark.skip_version_if("> v1.2.0", "<= v1.4.0", reason="Issue#4293 fix after `v1.4.0`")
+@pytest.mark.dependency(depends=["create_image", "get_image", "delete_image"])
+@parametrize_with_cases("unique_name", cases=CasesImages, has_tag='unique-name')
+def test_create_with_reuse_display_name(
+        self, wait_timeout, api_client, unique_name, fake_image_file):
+    code, data = api_client.images.get(unique_name)
+
+    assert 404 == code, f"Image {unique_name} not be deleted by previous test."
+
+    resp = api_client.images.create_by_file(unique_name, fake_image_file)
+
+    assert 200 == resp.status_code, (
+        f"failed to upload fake image with reused name {unique_name}, "
+        f"got error: {resp.status_code}, {resp.content}"
+    )
+
+    _ = api_client.images.delete(unique_name)
+    endtime = datetime.now() + timedelta(seconds=wait_timeout)
+
+    while endtime > datetime.now():
+        code, data = api_client.images.get(unique_name)
+        if code == 404:
+            break
+        sleep(5)
+    else:
+        raise AssertionError(
+            f"Failed to delete image {unique_name} with {wait_timeout} timed out\n"
+            f"Still got {code} with {data}"
+        )
+
def test_delete(self, api_client, unique_name, wait_timeout)
+
+ +Expand source code + +
@pytest.mark.dependency(name="delete_image")
+@parametrize_with_cases("unique_name", cases=CasesImages, has_tag='unique-name')
+def test_delete(self, api_client, unique_name, wait_timeout):
+    code, data = api_client.images.delete(unique_name)
+
+    assert 200 == code, (f"Failed to delete image with error: {code}, {data}")
+
+    endtime = datetime.now() + timedelta(seconds=wait_timeout)
+
+    while endtime > datetime.now():
+        code, data = api_client.images.get(unique_name)
+        if code == 404:
+            break
+        sleep(5)
+    else:
+        raise AssertionError(
+            f"Failed to delete image {unique_name} with {wait_timeout} timed out\n"
+            f"Still got {code} with {data}"
+        )
+
def test_get(self, api_client, unique_name)
+
+ +Expand source code + +
@pytest.mark.dependency(name="get_image")
+@parametrize_with_cases("unique_name", cases=CasesImages, has_tag='unique-name')
+def test_get(self, api_client, unique_name):
+    # Case 1: get all images
+    code, data = api_client.images.get()
+
+    assert len(data['items']) > 0, (code, data)
+
+    # Case 2: get created image
+    code, data = api_client.images.get(unique_name)
+    assert 200 == code, (code, data)
+    assert unique_name == data['metadata']['name']
+
def test_update(self, api_client, unique_name)
+
+ +Expand source code + +
@parametrize_with_cases("unique_name", cases=CasesImages, has_tag='unique-name')
+def test_update(self, api_client, unique_name):
+    updates = {
+        "labels": {
+            "test-label": "42"
+        },
+        "annotations": {
+            "test-annotation": "dummy",
+            "field.cattle.io/description": 'test description'
+        },
+
+    }
+
+    code, data = api_client.images.update(unique_name, dict(metadata=updates))
+
+    assert 200 == code, (f"Failed to update image with error: {code}, {data}")
+
+    unexpected = list()
+    for field, pairs in updates.items():
+        for k, val in pairs.items():
+            if data['metadata'][field].get(k) != val:
+                unexpected.append((field, k, val, data['metadata'][field].get(k)))
+
+    assert not unexpected, (
+        "\n".join(f"Update {f} failed, set key {k} as {v} but got {n}"
+                  for f, k, v, n in unexpected)
+    )
+
@@ -330,7 +557,6 @@

Methods

class TestImagesNegative
-
Expand source code @@ -369,6 +595,7 @@

Methods

assert 422 == code, (code, data) assert "Invalid" == data.get("reason"), (code, data)
+

Class variables

var pytestmark
@@ -382,24 +609,70 @@

Methods

def test_create_with_empty_data(self, api_client, unique_name)
+
+ +Expand source code + +
@parametrize_with_cases("unique_name", cases=CasesImages, has_tag='unique-name')
+def test_create_with_empty_data(self, api_client, unique_name):
+    # name, url, description, sourcetype, namespace
+    image_json = api_client.images.create_data(unique_name, "", "", "", "")
+    code, data = api_client.images.create(unique_name, json=image_json)
+
+    assert 422 == code, (code, data)
+    assert "Invalid" == data.get("reason"), (code, data)
+
def test_create_with_empty_url(self, api_client, unique_name)
+
+ +Expand source code + +
@parametrize_with_cases("unique_name", cases=CasesImages, has_tag='unique-name')
+def test_create_with_empty_url(self, api_client, unique_name):
+    code, data = api_client.images.create_by_url(unique_name, "")
+
+    assert 422 == code, (code, data)
+    assert "Invalid" == data.get("reason"), (code, data)
+
def test_delete_not_exist(self, api_client, unique_name)
+
+ +Expand source code + +
@parametrize_with_cases("unique_name", cases=CasesImages, has_tag='unique-name')
+def test_delete_not_exist(self, api_client, unique_name):
+    code, data = api_client.images.delete(unique_name)
+
+    assert 404 == code, (code, data)
+    assert "NotFound" == data.get("reason"), (code, data)
+
def test_get_not_exist(self, api_client, unique_name)
+
+ +Expand source code + +
@parametrize_with_cases("unique_name", cases=CasesImages, has_tag='unique-name')
+def test_get_not_exist(self, api_client, unique_name):
+    code, data = api_client.images.get(unique_name)
+
+    assert 404 == code, (code, data)
+    assert "NotFound" == data.get('reason'), (code, data)
+
@@ -460,7 +733,7 @@

diff --git a/backend/apis/test_keypairs.html b/backend/apis/test_keypairs.html index fda5e9cce..ec4805a37 100644 --- a/backend/apis/test_keypairs.html +++ b/backend/apis/test_keypairs.html @@ -3,19 +3,30 @@ - + harvester_e2e_tests.apis.test_keypairs API documentation - + @@ -39,7 +50,6 @@

Classes

class TestKeypairs
-
Expand source code @@ -97,6 +107,7 @@

Classes

assert 404 == status_code, (status_code, data)
+

Class variables

var pytestmark
@@ -110,18 +121,80 @@

Methods

def test_create(self, api_client, ssh_keypair, unique_name)
+
+ +Expand source code + +
@pytest.mark.dependency(depends=["delete_keypair_negative", "create_keypair_negative"],
+                        name="create_keypairs")
+def test_create(self, api_client, ssh_keypair, unique_name):
+    public, private = ssh_keypair
+    status_code, data = api_client.keypairs.create(unique_name, public)
+
+    assert status_code == 201, (
+        f"Unable to create Keypair `{unique_name}` with `{public}`\n"
+        f"Response: {data}"
+    )
+    assert public == data['spec']['publicKey'], (
+        f"public key does not match: `{public}`\n"
+        f"responsed: `{data['spec']['publicKey']}`"
+    )
+
+    # 5 mins for cluster to validate keypair
+    endtime = datetime.now() + timedelta(minutes=5)
+    while endtime > datetime.now():
+        status_code, data = api_client.keypairs.get(unique_name)
+        if "validated" == data.get("status", {}).get("conditions", [{}])[0].get('type'):
+            break
+        sleep(5)
+    else:
+        raise AssertionError(f"Cluster failed to validate keypair `{unique_name}` in 5 mins")
+
def test_delete(self, api_client, unique_name)
+
+ +Expand source code + +
@pytest.mark.dependency(depends=["create_keypairs"])
+def test_delete(self, api_client, unique_name):
+    status_code, data = api_client.keypairs.delete(unique_name)
+
+    assert 200 == status_code, (status_code, data)
+    assert "Success" == data['status']
+
+    status_code, data = api_client.keypairs.get(unique_name)
+
+    assert 404 == status_code, (status_code, data)
+
def test_get(self, api_client, ssh_keypair, unique_name)
+
+ +Expand source code + +
@pytest.mark.dependency(depends=["create_keypairs"])
+def test_get(self, api_client, ssh_keypair, unique_name):
+    # Case 1: get all keypairs
+    status_code, data = api_client.keypairs.get()
+
+    assert len(data['items']) > 0, (status_code, data)
+
+    # Case 2: get created keypairs
+    status_code, data = api_client.keypairs.get(unique_name)
+    pubkey, _ = ssh_keypair
+
+    assert unique_name == data['metadata'].get('name')
+    assert pubkey == data['spec']['publicKey']
+
@@ -130,7 +203,6 @@

Methods

class TestKeypairsNegative
-
Expand source code @@ -170,6 +242,7 @@

Methods

assert code == 422, "Expecting got 422 when publicKey is empty" assert "Invalid" in data.get("reason")
+

Class variables

var pytestmark
@@ -183,30 +256,81 @@

Methods

def test_create_with_empty_key(self, api_client, unique_name)
+
+ +Expand source code + +
def test_create_with_empty_key(self, api_client, unique_name):
+    code, data = api_client.keypairs.create(unique_name, "")
+
+    assert code == 422, "Expecting got 422 when publicKey is empty"
+    assert "Invalid" in data.get("reason")
+
def test_create_with_empty_name(self, api_client, ssh_keypair)
+
+ +Expand source code + +
@pytest.mark.dependency(name="create_keypair_negative")
+def test_create_with_empty_name(self, api_client, ssh_keypair):
+    pubkey, prikey = ssh_keypair
+    code, data = api_client.keypairs.create("", pubkey)
+
+    assert code == 422, "Expecting got 422 when keypair name is empty"
+    assert "Invalid" in data.get("reason")
+
def test_create_with_invalid_key(self, api_client, unique_name)
+
+ +Expand source code + +
def test_create_with_invalid_key(self, api_client, unique_name):
+    code, data = api_client.keypairs.create(unique_name, "INVALID_PUBLIC_KEY")
+
+    assert code == 422, "Expecting got 422 when publicKey is empty"
+    assert "Invalid" in data.get("reason")
+
def test_delete_not_exist(self, api_client, unique_name)
+
+ +Expand source code + +
@pytest.mark.dependency(name="delete_keypair_negative")
+def test_delete_not_exist(self, api_client, unique_name):
+    code, data = api_client.keypairs.delete(unique_name, "namespace")
+    assert code == 404
+    assert "NotFound" == data.get("reason")
+
def test_get_not_exist(self, api_client, unique_name)
+
+ +Expand source code + +
def test_get_not_exist(self, api_client, unique_name):
+    code, data = api_client.keypairs.get(unique_name)
+    assert code == 404
+    assert "NotFound" == data.get("reason")
+
@@ -252,7 +376,7 @@

diff --git a/backend/apis/test_networks.html b/backend/apis/test_networks.html index 8f8a4525b..73c40d24e 100644 --- a/backend/apis/test_networks.html +++ b/backend/apis/test_networks.html @@ -3,19 +3,30 @@ - + harvester_e2e_tests.apis.test_networks API documentation - + @@ -39,7 +50,6 @@

Classes

class TestNetworks
-
Expand source code @@ -92,6 +102,7 @@

Classes

f"Still got {code} with {data}" )
+

Class variables

var pytestmark
@@ -105,24 +116,84 @@

Methods

def test_create(self, api_client, unique_name)
+
+ +Expand source code + +
@pytest.mark.dependency(name="create_network")
+@pytest.mark.skip_version_before("v1.1.0")
+def test_create(self, api_client, unique_name):
+    code, data = api_client.networks.create(unique_name, VLAN_ID, cluster_network='mgmt')
+    assert 201 == code, (code, data)
+
def test_create_103(self, api_client, unique_name)
+
+ +Expand source code + +
@pytest.mark.dependency(name="create_network_103")
+@pytest.mark.skip_version_after("v1.0.3")
+def test_create_103(self, api_client, unique_name):
+    code, data = api_client.networks.create(unique_name, VLAN_ID)
+    assert 201 == code, (code, data)
+
def test_delete(self, api_client, unique_name, wait_timeout)
+
+ +Expand source code + +
@pytest.mark.dependency(depends=["create_network_103", "create_network"], any=True)
+def test_delete(self, api_client, unique_name, wait_timeout):
+    code, data = api_client.networks.delete(unique_name)
+
+    assert 200 == code, (f"Failed to delete vlan with error: {code}, {data}")
+
+    endtime = datetime.now() + timedelta(seconds=wait_timeout)
+    while endtime > datetime.now():
+        code, data = api_client.networks.get(unique_name)
+        if code == 404:
+            break
+        sleep(5)
+    else:
+        raise AssertionError(
+            f"Failed to delete vlan {unique_name} with {wait_timeout} timed out\n"
+            f"Still got {code} with {data}"
+        )
+
def test_get(self, api_client, unique_name)
+
+ +Expand source code + +
@pytest.mark.dependency(depends=["create_network_103", "create_network"], any=True)
+def test_get(self, api_client, unique_name):
+    # Case 1: get all vlan networks
+    code, data = api_client.networks.get()
+
+    assert 200 == code, (code, data)
+    assert len(data['items']) > 0, (code, data)
+
+    # Case 2: get specific vlan by name
+    code, data = api_client.networks.get(unique_name)
+
+    assert 200 == code, (code, data)
+    assert unique_name == data['metadata']['name'], (code, data)
+
@@ -131,7 +202,6 @@

Methods

class TestNetworksNegative
-
Expand source code @@ -175,6 +245,7 @@

Methods

assert 500 == code, (vlan_id, code, data) assert "InternalError" == data.get("reason"), (vlan_id, code, data)
+

Class variables

var pytestmark
@@ -188,30 +259,84 @@

Methods

def test_create_with_invalid_id(self, api_client, unique_name, vlan_id)
+
+ +Expand source code + +
@pytest.mark.parametrize("vlan_id", [4095])
+@pytest.mark.skip_version_before("v1.1.0")  # ref to harvester/issues/3151
+def test_create_with_invalid_id(self, api_client, unique_name, vlan_id):
+    code, data = api_client.networks.create(unique_name, vlan_id)
+
+    assert 500 == code, (vlan_id, code, data)
+    assert "InternalError" == data.get("reason"), (vlan_id, code, data)
+
def test_create_with_invalid_id_103(self, api_client, unique_name, vlan_id)
+
+ +Expand source code + +
@pytest.mark.parametrize("vlan_id", [0, 4095])
+@pytest.mark.skip_version_after("v1.0.3")  # ref to harvester/issues/3151
+def test_create_with_invalid_id_103(self, api_client, unique_name, vlan_id):
+    code, data = api_client.networks.create(unique_name, vlan_id)
+
+    assert 422 == code, (vlan_id, code, data)
+    assert "Invalid" == data.get("reason"), (vlan_id, code, data)
+
def test_create_without_name(self, api_client)
+
+ +Expand source code + +
def test_create_without_name(self, api_client):
+    code, data = api_client.networks.create("", VLAN_ID)
+
+    assert 422 == code, (code, data)
+    assert "Invalid" == data.get("reason"), (code, data)
+
def test_delete_not_exist(self, api_client, unique_name)
+
+ +Expand source code + +
def test_delete_not_exist(self, api_client, unique_name):
+    code, data = api_client.networks.delete(unique_name)
+
+    assert 404 == code, (code, data)
+    assert "NotFound" == data.get("reason"), (code, data)
+
def test_get_not_exist(self, api_client, unique_name)
+
+ +Expand source code + +
def test_get_not_exist(self, api_client, unique_name):
+    code, data = api_client.networks.get(unique_name)
+
+    assert 404 == code, (code, data)
+    assert "NotFound" == data.get('reason'), (code, data)
+
@@ -258,7 +383,7 @@

diff --git a/backend/apis/test_settings.html b/backend/apis/test_settings.html index e22eb5198..40a61470f 100644 --- a/backend/apis/test_settings.html +++ b/backend/apis/test_settings.html @@ -3,19 +3,30 @@ - + harvester_e2e_tests.apis.test_settings API documentation - + @@ -37,24 +48,99 @@

Functions

def test_get_all_settings(api_client, expected_settings)
+
+ +Expand source code + +
@pytest.mark.p0
+@pytest.mark.settings
+def test_get_all_settings(api_client, expected_settings):
+    expected_settings = expected_settings['default']
+    code, data = api_client.settings.get()
+
+    available_settings = {m['metadata']['name'] for m in data['items']}
+
+    assert 200 == code, (code, data)
+    assert expected_settings <= available_settings, (
+        "Some setting missing:\n"
+        f"{expected_settings - available_settings}"
+    )
+
def test_get_all_settings_v110(api_client, expected_settings)
+
+ +Expand source code + +
@pytest.mark.p0
+@pytest.mark.settings
+@pytest.mark.skip_version_before('v1.1.0')
+def test_get_all_settings_v110(api_client, expected_settings):
+    expected_settings = expected_settings['default'] | expected_settings['1.1.0']
+    code, data = api_client.settings.get()
+
+    available_settings = {m['metadata']['name'] for m in data['items']}
+
+    assert 200 == code, (code, data)
+    assert expected_settings <= available_settings, (
+        "Some setting missing:\n"
+        f"{expected_settings - available_settings}"
+    )
+
+    removed = expected_settings - available_settings
+    added = available_settings - expected_settings
+
+    if removed:
+        warnings.warn(UserWarning(f"Few setting(s) been removed: {removed}."))
+    if added:
+        warnings.warn(UserWarning(f"New setting(s) added: {added}"))
+
def test_get_storage_network(api_client)
+
+ +Expand source code + +
@pytest.mark.p0
+@pytest.mark.settings
+def test_get_storage_network(api_client):
+    code, data = api_client.settings.get("storage-network")
+    assert 200 == code, (f"Failed to get storage-network setting with error: {code}, {data}")
+
def test_update_log_level(api_client)
+
+ +Expand source code + +
@pytest.mark.p0
+@pytest.mark.settings
+def test_update_log_level(api_client):
+    code, data = api_client.settings.get("log-level")
+    assert 200 == code, (f"Failed to get log-level setting with error: {code}, {data}")
+
+    original_value = data.get("value", data['default'])
+    updates = {"value": "Debug"}
+    code, data = api_client.settings.update("log-level", updates)
+
+    assert 200 == code, (f"Failed to update log-level setting with error: {code}, {data}")
+
+    # For teardown
+    updates = {"value": original_value}
+    api_client.settings.update("log-level", updates)
+

@@ -66,7 +152,6 @@

Classes

class TestUpdateInvalidBackupTarget
-
Expand source code @@ -116,6 +201,7 @@

Classes

f"API Status({code}): {data}" )
+

Class variables

var pytestmark
@@ -129,12 +215,61 @@

Methods

def test_invalid_S3(self, api_client)
+
+ +Expand source code + +
def test_invalid_S3(self, api_client):
+    S3Spec = api_client.settings.BackupTargetSpec.S3
+
+    spec = S3Spec('bogus_bucket', 'bogus_region', 'bogus_key', 'bogus_secret')
+    code, data = api_client.settings.update('backup-target', spec)
+    assert 422 == code, (
+        f"S3 backup-target should check key/secret/bucket/region"
+        f"API Status({code}): {data}"
+    )
+
+    spec = S3Spec('', '', '', '', endpoint="http://127.0.0.1")
+    code, data = api_client.settings.update('backup-target', spec)
+    assert 422 == code, (
+        f"S3 backup-target should check key/secret/bucket/region"
+        f"API Status({code}): {data}"
+    )
+
def test_invalid_nfs(self, api_client)
+
+ +Expand source code + +
def test_invalid_nfs(self, api_client):
+    NFSSpec = api_client.settings.BackupTargetSpec.NFS
+
+    spec = NFSSpec('not_starts_with_nfs://')
+    code, data = api_client.settings.update('backup-target', spec)
+    assert 422 == code, (
+        f"NFS backup-target should check endpoint starting with `nfs://`\n"
+        f"API Status({code}): {data}"
+    )
+
+    spec = NFSSpec('nfs://:/lack_server')
+    code, data = api_client.settings.update('backup-target', spec)
+    assert 422 == code, (
+        f"NFS backup-target should check endpoint had server path\n"
+        f"API Status({code}): {data}"
+    )
+
+    spec = NFSSpec('nfs://127.0.0.1:')
+    code, data = api_client.settings.update('backup-target', spec)
+    assert 422 == code, (
+        f"NFS backup-target should check endpoint had mount path\n"
+        f"API Status({code}): {data}"
+    )
+
@@ -143,7 +278,6 @@

Methods

class TestUpdateInvalidStorageNetwork
-
Expand source code @@ -179,6 +313,7 @@

Methods

f"API Status({code}): {data}" )
+

Class variables

var invalid_ip_range
@@ -204,12 +339,43 @@

Methods

def test_invalid_iprange(self, api_client)
+
+ +Expand source code + +
def test_invalid_iprange(self, api_client):
+    valid_vlan_id = 1
+    spec = api_client.settings.StorageNetworkSpec.enable_with(
+        valid_vlan_id, self.mgmt_network, self.invalid_ip_range
+    )
+    code, data = api_client.settings.update('storage-network', spec)
+
+    assert 422 == code, (
+        f"Storage Network should NOT able to create with IP Range: {self.invalid_ip_range}\n"
+        f"API Status({code}): {data}"
+    )
+
def test_invalid_vlanid(self, api_client)
+
+ +Expand source code + +
def test_invalid_vlanid(self, api_client):
+    spec = api_client.settings.StorageNetworkSpec.enable_with(
+        self.invalid_vlan_id, self.mgmt_network, "192.168.1.0/24"
+    )
+    code, data = api_client.settings.update('storage-network', spec)
+
+    assert 422 == code, (
+        f"Storage Network should NOT able to create with VLAN ID: {self.invalid_vlan_id}\n"
+        f"API Status({code}): {data}"
+    )
+
@@ -218,7 +384,6 @@

Methods

class TestUpdateKubeconfigDefaultToken
-
Expand source code @@ -246,6 +411,7 @@

Methods

f"API Status({code}): {data}" )
+

Class variables

var pytestmark
@@ -259,12 +425,40 @@

Methods

def test_invalid_kubeconfig_ttl_min(self, api_client)
+
+ +Expand source code + +
@pytest.mark.skip_version_before("v1.3.2", reason="Issue#5891 fixed after v1.3.2")
+def test_invalid_kubeconfig_ttl_min(self, api_client):
+    KubeconfigTTLMinSpec = api_client.settings.KubeconfigDefaultTokenTTLSpec.TTL
+    spec = KubeconfigTTLMinSpec(99999999999999)
+    code, data = api_client.settings.update('kubeconfig-default-token-ttl-minutes', spec)
+    assert 422 == code, (
+        f"Kubeconfig Default Token TTL Minutes should not exceed 100yrs\n"
+        f"API Status({code}): {data}"
+    )
+
def test_valid_kubeconfig_ttl_min(self, api_client)
+
+ +Expand source code + +
@pytest.mark.skip_version_before('v1.3.1')
+def test_valid_kubeconfig_ttl_min(self, api_client):
+    KubeconfigTTLMinSpec = api_client.settings.KubeconfigDefaultTokenTTLSpec.TTL
+    spec = KubeconfigTTLMinSpec(172800)
+    code, data = api_client.settings.update('kubeconfig-default-token-ttl-minutes', spec)
+    assert 200 == code, (
+        f"Kubeconfig Default Token TTL Minutes be allowed to be set for 120 days\n"
+        f"API Status({code}): {data}"
+    )
+
@@ -325,7 +519,7 @@

-

Generated by pdoc 0.11.1.

+

Generated by pdoc 0.11.5.

diff --git a/backend/apis/test_support_bundle.html b/backend/apis/test_support_bundle.html index bd5d51092..edcc4bff5 100644 --- a/backend/apis/test_support_bundle.html +++ b/backend/apis/test_support_bundle.html @@ -3,19 +3,30 @@ - + harvester_e2e_tests.apis.test_support_bundle API documentation - + @@ -39,7 +50,6 @@

Classes

class TestSupportBundle
-
Expand source code @@ -110,6 +120,7 @@

Classes

# ???: Downloaded support bundle will be deleted automatically assert 404 == code, (code, data)
+

Class variables

var pytestmark
@@ -123,30 +134,113 @@

Methods

def test_create(self, api_client, unique_name, support_bundle_state)
+
+ +Expand source code + +
@pytest.mark.dependency(name="create support bundle")
+def test_create(self, api_client, unique_name, support_bundle_state):
+    code, data = api_client.supportbundle.create(unique_name)
+
+    assert 201 == code, (code, data)
+
+    support_bundle_state.uid = data['metadata']['name']
+
def test_delete(self, api_client, support_bundle_state)
+
+ +Expand source code + +
@pytest.mark.dependency(depends=["get support bundle"])
+def test_delete(self, api_client, support_bundle_state):
+    code, data = api_client.supportbundle.delete(support_bundle_state.uid)
+
+    # ???: Downloaded support bundle will be deleted automatically
+    assert 404 == code, (code, data)
+
def test_download(self, api_client, support_bundle_state, wait_timeout)
+
+ +Expand source code + +
@pytest.mark.dependency(name="donwnload support bundle", depends=["get support bundle"])
+def test_download(self, api_client, support_bundle_state, wait_timeout):
+
+    endtime = datetime.now() + timedelta(seconds=wait_timeout)
+    while endtime > datetime.now():
+        code, data = api_client.supportbundle.get(support_bundle_state.uid)
+        if 100 == data.get('status', {}).get('progress', 0):
+            break
+        sleep(5)
+    else:
+        raise AssertionError(
+            f"Failed to wait supportbundle ready with {wait_timeout} timed out\n"
+            f"Still got {code} in {data}"
+        )
+
+    code, ctx = api_client.supportbundle.download(support_bundle_state.uid)
+
+    assert 200 == code, (code, ctx)
+
+    with ZipFile(BytesIO(ctx), 'r') as zf:
+        files = zf.namelist()
+
+    assert 0 != len(files)
+
+    support_bundle_state.files = files
+    support_bundle_state.fio.write(ctx)
+    support_bundle_state.fio.seek(0)
+
def test_get(self, api_client, support_bundle_state)
+
+ +Expand source code + +
@pytest.mark.dependency(name="get support bundle", depends=["create support bundle"])
+def test_get(self, api_client, support_bundle_state):
+    code, data = api_client.supportbundle.get(support_bundle_state.uid)
+
+    assert 200 == code, (code, data)
+
def test_logfile_exists(self, support_bundle_state)
+
+ +Expand source code + +
@pytest.mark.dependency(depends=["donwnload support bundle"])
+def test_logfile_exists(self, support_bundle_state):
+    patterns = [r"^.*/logs/cattle-fleet-local-system/fleet-agent-.*/fleet-agent.log",
+                r"^.*/logs/cattle-fleet-system/fleet-controller-.*/fleet-controller.log",
+                r"^.*/logs/cattle-fleet-system/gitjob-.*/gitjob.log"]
+    matches = []
+    for f in support_bundle_state.files:
+        for pattern in patterns:
+            matches.extend([f] if re.match(pattern, f) else [])
+
+    assert len(matches) == len(patterns), (
+        f"Some file(s) not found, files: {matches}\npatterns: {patterns}"
+    )
+
@@ -155,7 +249,6 @@

Methods

class TestSupportBundleNegative
-
Expand source code @@ -176,6 +269,7 @@

Methods

assert 404 == code, (code, data) assert "NotFound" == data.get('reason'), (code, data)
+

Class variables

var pytestmark
@@ -189,12 +283,32 @@

Methods

def test_delete_not_exist(self, api_client, unique_name)
+
+ +Expand source code + +
def test_delete_not_exist(self, api_client, unique_name):
+    code, data = api_client.supportbundle.delete(unique_name)
+
+    assert 404 == code, (code, data)
+    assert "NotFound" == data.get('reason'), (code, data)
+
def test_get_not_exist(self, api_client, unique_name)
+
+ +Expand source code + +
def test_get_not_exist(self, api_client, unique_name):
+    code, data = api_client.supportbundle.get(unique_name)
+
+    assert 404 == code, (code, data)
+    assert "NotFound" == data.get('reason'), (code, data)
+
@@ -239,7 +353,7 @@

-

Generated by pdoc 0.11.1.

+

Generated by pdoc 0.11.5.

diff --git a/backend/apis/test_vm_templates.html b/backend/apis/test_vm_templates.html index 33b9392cb..d320492c5 100644 --- a/backend/apis/test_vm_templates.html +++ b/backend/apis/test_vm_templates.html @@ -3,19 +3,30 @@ - + harvester_e2e_tests.apis.test_vm_templates API documentation - + @@ -39,7 +50,6 @@

Classes

class TestVMTemplate
-
Expand source code @@ -107,6 +117,7 @@

Classes

assert 200 == code, (code, data) assert DEFAULT_TEMPLATES == len(data['items']), (code, data)
+

Class variables

var pytestmark
@@ -120,36 +131,120 @@

Methods

def test_create(self, api_client, unique_name)
+
+ +Expand source code + +
def test_create(self, api_client, unique_name):
+    code, data = api_client.templates.create(unique_name)
+
+    assert 201 == code, (code, data)
+    assert unique_name == data['metadata']['name']
+
def test_delete(self, api_client, unique_name, wait_timeout)
+
+ +Expand source code + +
def test_delete(self, api_client, unique_name, wait_timeout):
+    code, data = api_client.templates.delete(unique_name)
+
+    assert 200 == code, (f"Failed to delete template with error: {code}, {data}")
+
+    endtime = datetime.now() + timedelta(seconds=wait_timeout)
+
+    while endtime > datetime.now():
+        code, data = api_client.templates.get(unique_name)
+        if code == 404:
+            break
+        sleep(5)
+    else:
+        raise AssertionError(
+            f"Failed to delete template {unique_name} with {wait_timeout} timed out\n"
+            f"Still got {code} with {data}"
+        )
+
def test_get(self, api_client, unique_name)
+
+ +Expand source code + +
@pytest.mark.dependency(name="get_template")
+def test_get(self, api_client, unique_name):
+    # Case 1: get all templates
+    code, data = api_client.templates.get()
+
+    assert 200 == code, (code, data)
+    assert len(data['items']) > 0, (code, data)
+
+    # Case 2: get specific template by name
+    code, data = api_client.templates.get(unique_name)
+
+    assert 200 == code, (code, data)
+    assert unique_name == data['metadata']['name']
+
def test_get_system_default(self, api_client)
+
+ +Expand source code + +
@pytest.mark.dependency(depends=["get_template"])
+def test_get_system_default(self, api_client):
+    code, data = api_client.templates.get(namespace=DEFAULT_TEMPLATES_NAMESPACE)
+
+    assert 200 == code, (code, data)
+    assert DEFAULT_TEMPLATES == len(data['items']), (code, data)
+
def test_get_system_default_versions(self, api_client)
+
+ +Expand source code + +
@pytest.mark.dependency(depends=["get_template"])
+def test_get_system_default_versions(self, api_client):
+    code, data = api_client.templates.get_version(namespace=DEFAULT_TEMPLATES_NAMESPACE)
+
+    assert 200 == code, (code, data)
+    assert DEFAULT_TEMPLATES == len(data['items']), (code, data)
+
def test_update(self, api_client, unique_name)
+
+ +Expand source code + +
def test_update(self, api_client, unique_name):
+    spec = api_client.templates.Spec(1, 2)
+
+    code, data = api_client.templates.create_version(unique_name, spec)
+
+    assert 201 == code, (code, data)
+    assert data['metadata']['name'].startswith(unique_name), (code, data)
+
@@ -158,7 +253,6 @@

Methods

class TestVMTemplateNegative
-
Expand source code @@ -185,6 +279,7 @@

Methods

assert 404 == code, (code, data) assert "NotFound" == data.get("reason"), (code, data)
+

Class variables

var pytestmark
@@ -198,18 +293,48 @@

Methods

def test_delete_not_exist(self, api_client, unique_name)
+
+ +Expand source code + +
def test_delete_not_exist(self, api_client, unique_name):
+    code, data = api_client.templates.delete(unique_name)
+
+    assert 404 == code, (code, data)
+    assert "NotFound" == data.get("reason"), (code, data)
+
def test_get_not_exist(self, api_client, unique_name)
+
+ +Expand source code + +
def test_get_not_exist(self, api_client, unique_name):
+    code, data = api_client.templates.get(unique_name)
+
+    assert 404 == code, (code, data)
+    assert "NotFound" == data.get('reason'), (code, data)
+
def test_get_version_not_exist(self, api_client, unique_name)
+
+ +Expand source code + +
def test_get_version_not_exist(self, api_client, unique_name):
+    code, data = api_client.templates.get_version(unique_name)
+
+    assert 404 == code, (code, data)
+    assert "NotFound" == data.get('reason'), (code, data)
+
@@ -256,7 +381,7 @@

-

Generated by pdoc 0.11.1.

+

Generated by pdoc 0.11.5.

diff --git a/backend/apis/test_vms.html b/backend/apis/test_vms.html index 234188d16..a7f592263 100644 --- a/backend/apis/test_vms.html +++ b/backend/apis/test_vms.html @@ -3,19 +3,30 @@ - + harvester_e2e_tests.apis.test_vms API documentation - + @@ -39,7 +50,6 @@

Classes

class TestVMNegative
-
Expand source code @@ -71,6 +81,7 @@

Classes

finally: api_client.set_retries()
+

Class variables

var pytestmark
@@ -84,6 +95,23 @@

Methods

def test_delete_not_exist(self, api_client, unique_name)
+
+ +Expand source code + +
def test_delete_not_exist(self, api_client, unique_name):
+    """ ref: https://github.com/harvester/tests/issues/1215
+    1. Tries to delete a VM that doesn't exist
+    2. Checks that it gets a 404
+    """
+    try:
+        api_client.set_retries(1)
+        code, data = api_client.vms.delete(unique_name)
+        assert 404 == code, (code, data)
+        assert "NotFound" in data.get('code'), (code, data)
+    finally:
+        api_client.set_retries()
+

ref: https://github.com/harvester/tests/issues/1215 1. Tries to delete a VM that doesn't exist 2. Checks that it gets a 404

@@ -92,6 +120,20 @@

Methods

def test_get_not_exist(self, api_client, unique_name)
+
+ +Expand source code + +
def test_get_not_exist(self, api_client, unique_name):
+    """
+    1. Tries to get a VM that doesn't exist
+    2. Checks that the get command gets a 404
+    """
+    code, data = api_client.vms.get(unique_name)
+
+    assert 404 == code, (code, data)
+    assert "NotFound" == data.get('code'), (code, data)
+
  1. Tries to get a VM that doesn't exist
  2. Checks that the get command gets a 404
  3. @@ -128,7 +170,7 @@

    -

    Generated by pdoc 0.11.1.

    +

    Generated by pdoc 0.11.5.

    diff --git a/backend/apis/test_volumes.html b/backend/apis/test_volumes.html index 66075ff82..cc1aafabd 100644 --- a/backend/apis/test_volumes.html +++ b/backend/apis/test_volumes.html @@ -3,19 +3,30 @@ - + harvester_e2e_tests.apis.test_volumes API documentation - + @@ -39,7 +50,6 @@

    Classes

    class TestVolumes
    -
    Expand source code @@ -122,6 +132,7 @@

    Classes

    f"Still got {code} with {data}" )
    +

    Class variables

    var pytestmark
    @@ -135,6 +146,20 @@

    Methods

    def test_create(self, api_client, unique_name)
    +
    + +Expand source code + +
    def test_create(self, api_client, unique_name):
    +    """
    +    1. Create a new image from the default entries in the spec
    +    2. Check that the create command gets a 201 response
    +    """
    +    spec = api_client.volumes.Spec(1)
    +    code, data = api_client.volumes.create(unique_name, spec)
    +
    +    assert 201 == code, (code, data)
    +
    1. Create a new image from the default entries in the spec
    2. Check that the create command gets a 201 response
    3. @@ -144,6 +169,32 @@

      Methods

      def test_delete(self, api_client, unique_name, wait_timeout)
      +
      + +Expand source code + +
      def test_delete(self, api_client, unique_name, wait_timeout):
      +    """
      +    1. Deletes the volume created in the previous test
      +    note: this will fail if you run it by itself
      +    2. It will loop while waiting for it to delete and error out if it doesn't
      +    """
      +    code, data = api_client.volumes.delete(unique_name)
      +
      +    assert 200 == code, (f"Failed to delete volume with error: {code}, {data}")
      +
      +    endtime = datetime.now() + timedelta(seconds=wait_timeout)
      +    while endtime > datetime.now():
      +        code, data = api_client.volumes.get(unique_name)
      +        if code == 404:
      +            break
      +        sleep(5)
      +    else:
      +        raise AssertionError(
      +            f"Failed to delete volume {unique_name} with {wait_timeout} timed out\n"
      +            f"Still got {code} with {data}"
      +        )
      +
      1. Deletes the volume created in the previous test note: this will fail if you run it by itself
      2. @@ -154,6 +205,28 @@

        Methods

        def test_get(self, api_client, unique_name)
        +
        + +Expand source code + +
        def test_get(self, api_client, unique_name):
        +    """
        +    1. Runs a get command on the volume created in the previous test
        +    note: this will fail if you run it by itself
        +    2. It will fail if it doesn't return a 200
        +    """
        +    # Case 1: get all volumes
        +    code, data = api_client.volumes.get()
        +
        +    assert 200 == code, (code, data)
        +    assert len(data['data']) > 0, (code, data)
        +
        +    # Case 2: get specific volume
        +    code, data = api_client.volumes.get(unique_name)
        +
        +    assert 200 == code, (code, data)
        +    assert unique_name == data['metadata']['name'], (code, data)
        +
        1. Runs a get command on the volume created in the previous test note: this will fail if you run it by itself
        2. @@ -164,6 +237,35 @@

          Methods

          def test_update_size(self, api_client, unique_name, wait_timeout)
          +
          + +Expand source code + +
          def test_update_size(self, api_client, unique_name, wait_timeout):
          +    """
          +    1. Updates the volume created in the previous test to 10GB
          +    note: this will fail if you run it by itself
          +    2. It will loop while waiting for it to update and error out if it doesn't
          +    """
          +    # Pre-condition: Volume is Ready
          +    endtime = datetime.now() + timedelta(seconds=wait_timeout)
          +    while endtime > datetime.now():
          +        code, data = api_client.volumes.get(unique_name)
          +        if "Bound" == data['status']['phase']:
          +            break
          +        sleep(5)
          +    else:
          +        raise AssertionError(
          +            "Volume not changed to phase: _Bound_ with {wait_timeout} timed out\n"
          +            f"Got error: {code}, {data}"
          +        )
          +
          +    spec = api_client.volumes.Spec.from_dict(data)
          +    spec.size = "10Gi"
          +    code, data = api_client.volumes.update(unique_name, spec)
          +
          +    assert 200 == code, (f"Failed to update volume with error: {code}, {data}")
          +
          1. Updates the volume created in the previous test to 10GB note: this will fail if you run it by itself
          2. @@ -176,7 +278,6 @@

            Methods

            class TestVolumesNegative
            -
            Expand source code @@ -227,6 +328,7 @@

            Methods

            assert 422 == code, (code, data) assert "Invalid" == data.get("code"), (code, data)
            +

            Class variables

            var pytestmark
            @@ -240,6 +342,21 @@

            Methods

            def test_create_without_name(self, api_client)
            +
            + +Expand source code + +
            def test_create_without_name(self, api_client):
            +    """
            +    1. Tries to create a volume without a name
            +    2. Checks that it gets a  422
            +    """
            +    spec = api_client.volumes.Spec(1)
            +    code, data = api_client.volumes.create("", spec)
            +
            +    assert 422 == code, (code, data)
            +    assert "Invalid" == data.get("code"), (code, data)
            +
            1. Tries to create a volume without a name
            2. Checks that it gets a @@ -250,6 +367,21 @@

              Methods

              def test_create_without_size(self, api_client, unique_name)
              +
              + +Expand source code + +
              def test_create_without_size(self, api_client, unique_name):
              +    """
              +    1. Tries to create a volume without a size
              +    2. Checks that it gets a 422
              +    """
              +    spec = api_client.volumes.Spec(0)
              +    code, data = api_client.volumes.create(unique_name, spec)
              +
              +    assert 422 == code, (code, data)
              +    assert "Invalid" == data.get("code"), (code, data)
              +
              1. Tries to create a volume without a size
              2. Checks that it gets a 422
              3. @@ -259,6 +391,20 @@

                Methods

                def test_delete_not_exist(self, api_client, unique_name)
                +
                + +Expand source code + +
                def test_delete_not_exist(self, api_client, unique_name):
                +    """
                +    1. Tries to delete a volume that doesn't exist
                +    2. Checks that it gets a 404
                +    """
                +    code, data = api_client.volumes.delete(unique_name)
                +
                +    assert 404 == code, (code, data)
                +    assert "NotFound" == data.get("code"), (code, data)
                +
                1. Tries to delete a volume that doesn't exist
                2. Checks that it gets a 404
                3. @@ -268,6 +414,20 @@

                  Methods

                  def test_get_not_exist(self, api_client, unique_name)
                  +
                  + +Expand source code + +
                  def test_get_not_exist(self, api_client, unique_name):
                  +    """
                  +    1. Tries to get a volume that doesn't exist
                  +    2. Checks that the get command gets a 404
                  +    """
                  +    code, data = api_client.volumes.get(unique_name)
                  +
                  +    assert 404 == code, (code, data)
                  +    assert "NotFound" == data.get('code'), (code, data)
                  +
                  1. Tries to get a volume that doesn't exist
                  2. Checks that the get command gets a 404
                  3. @@ -316,7 +476,7 @@

                    diff --git a/backend/conftest.html b/backend/conftest.html index df0ca81f9..7d6f2e164 100644 --- a/backend/conftest.html +++ b/backend/conftest.html @@ -3,19 +3,30 @@ - + harvester_e2e_tests.conftest API documentation - + @@ -37,36 +48,477 @@

                    Functions

                    def check_depends(self, depends, item)
                    +
                    + +Expand source code + +
                    def check_depends(self, depends, item):
                    +    # monkey patch `DependencyManager.checkDepends`
                    +
                    +    marker = item.get_closest_marker("dependency")
                    +    if marker and marker.kwargs.get('param'):
                    +        try:
                    +            param_id = item.callspec.id
                    +            depends = [f"{d}[{param_id}]" for d in depends]
                    +        except AttributeError:
                    +            pass
                    +
                    +    # ref: https://github.com/RKrahl/pytest-dependency/issues/57#issuecomment-1000896418
                    +    if marker and marker.kwargs.get('any'):
                    +        for depend in depends:
                    +            try:
                    +                self._check_depend([depend], item)
                    +            except pytest.skip.Exception:
                    +                continue
                    +            else:
                    +                return
                    +        pytest.skip("%s depends on any of %s" % (item.name, ", ".join(depends)))
                    +    else:
                    +        self._check_depend(depends, item)
                    +
                    def pytest_addoption(parser)
                    +
                    + +Expand source code + +
                    def pytest_addoption(parser):
                    +    with open('config.yml') as f:
                    +        config_data = yaml.safe_load(f)
                    +    parser.addoption(
                    +        '--endpoint',
                    +        action='store',
                    +        default=config_data['endpoint'],
                    +        help='Harvester API endpoint'
                    +    )
                    +    parser.addoption(
                    +        '--username',
                    +        action='store',
                    +        default=config_data['username'],
                    +        help='Harvester username'
                    +    )
                    +    parser.addoption(
                    +        '--password',
                    +        action='store',
                    +        default=config_data['password'],
                    +        help='Harvester password'
                    +    )
                    +    parser.addoption(
                    +        '--host-password',
                    +        action='store',
                    +        default=config_data['host-password'],
                    +        help='Password to access Harvesrer node'
                    +    )
                    +    parser.addoption(
                    +        '--host-private-key',
                    +        action='store',
                    +        default=config_data['host-private-key'],
                    +        help='private key to access Harvester node'
                    +    )
                    +    parser.addoption(
                    +        '--vlan-id',
                    +        action='store',
                    +        type=int,
                    +        default=config_data['vlan-id'],
                    +        help=('VLAN ID, if specified, will invoke the tests depended on '
                    +              'external networking.')
                    +    )
                    +    parser.addoption(
                    +        '--vlan-nic',
                    +        action='store',
                    +        default=config_data['vlan-nic'],
                    +        help='Physical NIC for VLAN. Default is "eth0"'
                    +    )
                    +    parser.addoption(
                    +        '--ip-pool-subnet',
                    +        action='store',
                    +        default=config_data['ip-pool-subnet'],
                    +        help='Subnet of IP pool for load balancer'
                    +    )
                    +    parser.addoption(
                    +        '--ip-pool-start',
                    +        action='store',
                    +        default=config_data['ip-pool-start'],
                    +        help='Start IP of IP pool for load balancer'
                    +    )
                    +    parser.addoption(
                    +        '--ip-pool-end',
                    +        action='store',
                    +        default=config_data['ip-pool-end'],
                    +        help='End IP of IP pool for load balancer'
                    +    )
                    +    parser.addoption(
                    +        '--wait-timeout',
                    +        action='store',
                    +        type=int,
                    +        default=config_data['wait-timeout'],
                    +        help='Wait time for polling operations'
                    +    )
                    +    parser.addoption(
                    +        '--sleep-timeout',
                    +        action='store',
                    +        type=int,
                    +        default=config_data['sleep-timeout'],
                    +        help='Wait time for polling operations'
                    +    )
                    +    parser.addoption(
                    +        '--node-scripts-location',
                    +        action='store',
                    +        default=config_data['node-scripts-location'],
                    +        help=('External scripts to power-off, power-up, and reboot a given '
                    +              'Harvester node to facilitate the host-specific tests')
                    +    )
                    +    parser.addoption(
                    +        '--opensuse-image-url',
                    +        action='store',
                    +        default=config_data.get('opensuse-image-url'),
                    +        help=('OpenSUSE image URL')
                    +    )
                    +    parser.addoption(
                    +        '--ubuntu-image-url',
                    +        action='store',
                    +        default=config_data.get('ubuntu-image-url'),
                    +        help=('ubuntu image URL')
                    +    )
                    +    parser.addoption(
                    +        '--terraform-scripts-location',
                    +        action='store',
                    +        default=config_data['terraform-scripts-location'],
                    +        help=('External scripts to create resources using terraform')
                    +    )
                    +    parser.addoption(
                    +        '--image-cache-url',
                    +        action='store',
                    +        default=config_data['image-cache-url'],
                    +        help=('URL for the local images cache')
                    +    )
                    +    parser.addoption(
                    +        '--accessKeyId',
                    +        action='store',
                    +        default=config_data['accessKeyId'],
                    +        help=('A user-id that uniquely identifies your account. ')
                    +    )
                    +    parser.addoption(
                    +        '--secretAccessKey',
                    +        action='store',
                    +        default=config_data['secretAccessKey'],
                    +        help=('The password to your account')
                    +    )
                    +    parser.addoption(
                    +        '--bucketName',
                    +        action='store',
                    +        default=config_data['bucketName'],
                    +        help=('Name of the bucket')
                    +    )
                    +    parser.addoption(
                    +        '--region',
                    +        action='store',
                    +        default=config_data['region'],
                    +        help=('Region of the bucket')
                    +    )
                    +    parser.addoption(
                    +        '--s3-endpoint',
                    +        action='store',
                    +        default=config_data.get('s3-endpoint', ''),
                    +        help=('S3 endpoint')
                    +    )
                    +    parser.addoption(
                    +        '--nfs-endpoint',
                    +        action='store',
                    +        default=config_data['nfs-endpoint'],
                    +        help=('Endpoint for storing backup in nfs share')
                    +    )
                    +    parser.addoption(
                    +        '--rancher-endpoint',
                    +        action='store',
                    +        default=config_data.get('rancher-endpoint', None),
                    +        help='Rancher API endpoint'
                    +    )
                    +    parser.addoption(
                    +        '--rancher-admin-password',
                    +        action='store',
                    +        default=config_data.get('rancher-admin-password', None),
                    +        help='Rancher admin user password'
                    +    )
                    +    parser.addoption(
                    +        '--k8s-version',
                    +        action='store',
                    +        default=config_data.get('k8s-version'),
                    +        help='K8s version to use for downstream cluster in Rancher integration tests'
                    +    )
                    +    parser.addoption(
                    +        '--rancher-cluster-wait-timeout',
                    +        action='store',
                    +        type=int,
                    +        default=config_data['rancher-cluster-wait-timeout'],
                    +        help='Wait time for polling Rancher cluster ready status'
                    +    )
                    +    parser.addoption(
                    +        '--nfs-mount-dir',
                    +        action='store',
                    +        default=config_data['nfs-mount-dir'],
                    +        help=('mount directory for nfs share')
                    +    )
                    +    parser.addoption(
                    +        '--upgrade-prepare-dependence',
                    +        action='store',
                    +        default=config_data['upgrade-prepare-dependence'],
                    +        help=('If true to prepare dependence')
                    +    )
                    +    parser.addoption(
                    +        '--upgrade-sc-replicas',
                    +        action='store',
                    +        default=config_data['upgrade-sc-replicas'],
                    +        help=('default storage class replicas')
                    +    )
                    +    parser.addoption(
                    +        '--upgrade-target-version',
                    +        action='store',
                    +        default=config_data['upgrade-target-version'],
                    +        help=('test target harvester version')
                    +    )
                    +    parser.addoption(
                    +        '--upgrade-iso-url',
                    +        action='store',
                    +        default=config_data['upgrade-iso-url'],
                    +        help=('URL for specific iso')
                    +    )
                    +    parser.addoption(
                    +        '--upgrade-iso-checksum',
                    +        action='store',
                    +        default=config_data['upgrade-iso-checksum'],
                    +        help=('URL for specific iso checksum')
                    +    )
                    +    parser.addoption(
                    +        '--upgrade-wait-timeout',
                    +        action='store',
                    +        default=config_data['upgrade-wait-timeout'],
                    +        help=('Wait time for polling upgrade Harvester cluster completed status')
                    +    )
                    +    parser.addoption(
                    +        '--terraform-provider-harvester',
                    +        action='store',
                    +        default=config_data.get('terraform-provider-harvester'),
                    +        help=('Version of Terraform Harvester Provider')
                    +    )
                    +    parser.addoption(
                    +        '--terraform-provider-rancher',
                    +        action='store',
                    +        default=config_data.get('terraform-provider-rancher'),
                    +        help=('Version of Terraform Rancher Provider')
                    +    )
                    +    parser.addoption(
                    +        '--ubuntu-checksum',
                    +        action='store',
                    +        default=config_data.get('ubuntu-checksum'),
                    +        help=('Checksum for ubuntu_image')
                    +    )
                    +    parser.addoption(
                    +        '--opensuse-checksum',
                    +        action='store',
                    +        default=config_data.get('opensuse-checksum'),
                    +        help=('Checksum for opensuse_image')
                    +    )
                    +
                    def pytest_collection_modifyitems(session, config, items)
                    +
                    + +Expand source code + +
                    @pytest.hookimpl(hookwrapper=True)
                    +def pytest_collection_modifyitems(session, config, items):
                    +    if config.getoption('--vlan-id') == -1:
                    +        skip_public_network = pytest.mark.skip(reason=(
                    +            'VM not accessible because no VLAN setup with public routing'))
                    +        for item in items:
                    +            if 'public_network' in item.keywords:
                    +                item.add_marker(skip_public_network)
                    +
                    +    if (config.getoption('--nfs-endpoint') == '' and
                    +            config.getoption('--accessKeyId') == ''):
                    +        skip_backup = pytest.mark.skip(reason=(
                    +            'AWS credentials or NFS endpoint are not available'))
                    +        for item in items:
                    +            if 'backup' in item.keywords:
                    +                item.add_marker(skip_backup)
                    +
                    +    if config.getoption('--accessKeyId') == '':
                    +        skip_backup = pytest.mark.skip(reason=(
                    +            'AWS credentials are not available'))
                    +        for item in items:
                    +            if 'backups3' in item.keywords:
                    +                item.add_marker(skip_backup)
                    +
                    +    if config.getoption('--nfs-endpoint') == '':
                    +        skip_backup = pytest.mark.skip(reason=(
                    +            'NFS endpoint is not available'))
                    +        for item in items:
                    +            if 'backupnfs' in item.keywords:
                    +                item.add_marker(skip_backup)
                    +
                    +    if (not config.getoption('--rancher-endpoint') and
                    +            not config.getoption('--rancher-admin-password')):
                    +        skip_rancher_integration_external = pytest.mark.skip(reason=(
                    +            'Rancher endpoint and admin password are not specified'))
                    +        for item in items:
                    +            if 'rancher_integration_with_external_rancher' in item.keywords:
                    +                item.add_marker(skip_rancher_integration_external)
                    +
                    +    if not config.getoption("--delete-host", False):
                    +        for item in items:
                    +            if "delete_host" in item.keywords:
                    +                item.add_marker(pytest.mark.skip(reason="Not configured to test host deletion."))
                    +
                    +    # legacy code above
                    +    # ''' To enable the test select with `and depends` keyword,
                    +    #     to select test cases and it depended test cases.
                    +    # '''
                    +
                    +    if "and depends" not in config.option.keyword:
                    +        # DO nothing
                    +        yield
                    +        return
                    +
                    +    all_items, old_keyword = items.copy(), config.option.keyword
                    +    config.option.keyword = old_keyword.replace('and depends', '')
                    +
                    +    yield
                    +
                    +    scope_cls = {
                    +        "session": pytest.Session,
                    +        "package": pytest.Package,
                    +        "module": pytest.Module,
                    +        "class": pytest.Class
                    +    }
                    +    # ref: https://github.com/RKrahl/pytest-dependency/blob/0.5.1/pytest_dependency.py#L77
                    +    # named_items : dict[('dep-scope', 'dep-name'), list[(idx, 'test-item')]]
                    +    # picked : list[(idx, 'test-item')]
                    +    named_items, picked = dict(), list()
                    +    for idx, item in enumerate(all_items):
                    +        if item in items:
                    +            picked.append((idx, item))
                    +        try:
                    +            marker = item.get_closest_marker('dependency')
                    +            scope = marker.kwargs.get('scope', 'module')
                    +            node = item.getparent(scope_cls[scope])
                    +            named_items.setdefault((node, marker.kwargs['name']), []).append((idx, item))
                    +        except AttributeError:
                    +            continue
                    +        except KeyError:
                    +            nodeid = item.nodeid.replace("::()::", "::")
                    +            if scope not in ("session", "package"):
                    +                shift = 2 if scope == "class" else 1
                    +                nodeid = nodeid.split("::", shift)[shift]
                    +            named_items.setdefault((node, nodeid), []).append((idx, item))
                    +
                    +    def pick_depends(idx, item, items):
                    +        picked = []
                    +        try:
                    +            marker = item.get_closest_marker('dependency')
                    +            scope = marker.kwargs.get('scope', 'module')
                    +            node = item.getparent(scope_cls[scope])
                    +            for name in marker.kwargs['depends']:
                    +                picked.extend(items.get((node, name), []))
                    +        except (AttributeError, KeyError):
                    +            pass
                    +        return picked
                    +
                    +    depends = []  # list[(idx, 'test-item')]
                    +    while picked:
                    +        depends.append(picked.pop())
                    +        picked.extend(pick_depends(*depends[-1], named_items))
                    +
                    +    session.items = items = [it for idx, it in sorted(set(depends), key=lambda it: it[0])]
                    +    # deselected.extend(t for ts in named_items.values() for i, t in ts if t not in items)
                    +    deselected = [t for t in all_items if t not in items]
                    +    # update to let the report shows correct counts
                    +    config.pluginmanager.get_plugin('terminalreporter').stats['deselected'] = deselected
                    +
                    def pytest_configure(config)
                    +
                    + +Expand source code + +
                    def pytest_configure(config):
                    +    # Register marker as the format (marker, (description))
                    +    markers = [
                    +        ("skip_version_if", "Mark test skipped when cluster version hit the condition"),
                    +        ("skip_version_before", (
                    +            "mark test skipped when cluster version < provided version")),
                    +        ("skip_version_after", (
                    +            "mark test skipped when cluster version >= provided version")),
                    +        ('p0', ("mark the test's priority is p0")),
                    +        ('p1', ("mark the test's priority is p1")),
                    +        ('p2', ("mark the test's priority is p2")),
                    +        ('hosts', ('{_r} host tests')),
                    +        ('delete_host', ('{_r} host and will delete one of hosts')),
                    +        ("negative", ("{_r} a negative tests")),
                    +        ('keypairs', ("{_r} SSH keypairs tests")),
                    +        ('images', ("{_r} image tests")),
                    +        ("networks", ("{_r} vlan network tests")),
                    +        ("volumes", ("{_r} volume tests")),
                    +        ("virtualmachines", ("{_r} VM tests")),
                    +        ("templates", ("{_r} VM template tests")),
                    +        ("support_bundle", ("{_r} Support Bundle tests")),
                    +        ("settings", ("{_r} settings tests")),
                    +        ("upgrade", ("{_r} upgrade tests")),
                    +        ("any_nodes", ("{_r} tests which could be ran on clushter with any nodes")),
                    +        ("single_node", ("{_r} tests which could only be ran on cluster with single node")),
                    +        ("three_nodes", ("{_r} tests which could only be ran on cluster with three nodes")),
                    +        ('rancher', ("{_r} rancher integration tests")),
                    +        ('rke1', ("{_r} rancher RKE1 tests")),
                    +        ('rke2', ("{_r} rancher RKE2 tests")),
                    +        ('terraform', ("{_r} terraform tests")),
                    +        ('virtualmachines', ('{_r} virtualmachines tests')),
                    +        ('backup_target', ('{_r} backup-target tests')),
                    +        ('S3', ('{_r} backup-target tests with S3')),
                    +        ('NFS', ('{_r} backup-target tests with NFS'))
                    +    ]
                    +
                    +    for m, msg in markers:
                    +        related = 'mark the test is related to'
                    +        config.addinivalue_line("markers", f"{m}:{msg.format(_r=related)}")
                    +
                    def pytest_html_results_table_header(cells)
                    +
                    + +Expand source code + +
                    def pytest_html_results_table_header(cells):
                    +    cells.insert(1, '<th class="sortable time" data-column-type="time">EndTime</th>')
                    +
                    def pytest_html_results_table_row(report, cells)
                    +
                    + +Expand source code + +
                    def pytest_html_results_table_row(report, cells):
                    +    cells.insert(1, f'<td class="col-time">{datetime.utcnow()}</td>')
                    +

            @@ -98,7 +550,7 @@

            Functions

            diff --git a/backend/fixtures/api_client.html b/backend/fixtures/api_client.html index 42f942ca4..71c05ed63 100644 --- a/backend/fixtures/api_client.html +++ b/backend/fixtures/api_client.html @@ -3,19 +3,30 @@ - + harvester_e2e_tests.fixtures.api_client API documentation - + @@ -37,78 +48,415 @@

            Functions

            def api_client(request, harvester_metadata)
            +
            + +Expand source code + +
            @pytest.fixture(scope="session")
            +def api_client(request, harvester_metadata):
            +    endpoint = request.config.getoption("--endpoint")
            +    username = request.config.getoption("--username")
            +    password = request.config.getoption("--password")
            +    ssl_verify = request.config.getoption("--ssl_verify", False)
            +
            +    api = HarvesterAPI(endpoint)
            +    api.authenticate(username, password, verify=ssl_verify)
            +
            +    api.session.verify = ssl_verify
            +
            +    harvester_metadata['Cluster Endpoint'] = endpoint
            +    harvester_metadata['Cluster Version'] = api.cluster_version.raw
            +
            +    return api
            +
            def expected_settings()
            +
            + +Expand source code + +
            @pytest.fixture(scope="session")
            +def expected_settings():
            +    return {
            +        "1.1.0": {'storage-network', 'containerd-registry', 'ui-plugin-index'},
            +        "default": {
            +            'additional-ca',
            +            'auto-disk-provision-paths',
            +            'backup-target',
            +            'cluster-registration-url',
            +            'http-proxy',
            +            'log-level',
            +            'overcommit-config',
            +            'release-download-url',
            +            'server-version',
            +            'ssl-certificates',
            +            'ssl-parameters',
            +            'support-bundle-image',
            +            'support-bundle-namespaces',
            +            'support-bundle-timeout',
            +            'ui-index',
            +            'ui-source',
            +            'upgrade-checker-enabled',
            +            'upgrade-checker-url',
            +            'vip-pools',
            +            'vm-force-reset-policy',
            +        }
            +    }
            +
            def fake_image_file()
            +
            + +Expand source code + +
            @pytest.fixture(scope="session")
            +def fake_image_file():
            +    with NamedTemporaryFile("wb") as f:
            +        f.seek(10 * 1024 ** 2 - 1)  # 10MB
            +        f.write(b"\0")
            +        f.seek(0)
            +        yield Path(f.name)
            +
            def gen_unique_name()
            +
            + +Expand source code + +
            @pytest.fixture(scope='module')
            +def gen_unique_name():
            +    """Generate unique name on-demand"""
            +    return lambda: datetime.now().strftime("%Hh%Mm%Ss%f-%m-%d")
            +

            Generate unique name on-demand

            def harvester_metadata(pytestconfig)
            +
            + +Expand source code + +
            @pytest.fixture(scope="session")
            +def harvester_metadata(pytestconfig):
            +    ''' be used to store harvester's metadata and expose into html report. '''
            +    # ref: https://github.com/pytest-dev/pytest-html/blob/4.1.1/src/pytest_html/basereport.py#L71
            +    try:
            +        from pytest_metadata.plugin import metadata_key
            +        metadata = pytestconfig.stash[metadata_key]
            +    except ImportError:
            +        metadata = pytestconfig._metadata
            +
            +    harv = dict()
            +    metadata["Harvester"] = harv
            +    return harv
            +

            be used to store harvester's metadata and expose into html report.

            def host_shell(request)
            +
            + +Expand source code + +
            @pytest.fixture(scope="session")
            +def host_shell(request):
            +    password = request.config.getoption("--host-password") or None
            +    pkey = request.config.getoption('--host-private-key') or None
            +    if pkey:
            +        pkey = RSAKey.from_private_key(StringIO(pkey))
            +
            +    class HostShell:
            +        _client = _jump = None
            +
            +        def __init__(self, username, password=None, pkey=None):
            +            self.username = username
            +            self.password = password
            +            self.pkey = pkey
            +
            +        def __enter__(self):
            +            return self
            +
            +        def __exit__(self, exc_type, exc_value, exc_tb):
            +            self.logout()
            +
            +        @property
            +        def client(self):
            +            return self._client
            +
            +        def reconnect(self, ipaddr, port=22, **kwargs):
            +            if self.client:
            +                self.client.close()
            +                self._client = cli = SSHClient()
            +                cli.set_missing_host_key_policy(MissingHostKeyPolicy())
            +                kws = dict(username=self.username, password=self.password, pkey=self.pkey)
            +                kws.update(kwargs)
            +                cli.connect(ipaddr, port, **kws)
            +
            +        def login(self, ipaddr, port=22, jumphost=False, allow_agent=False,
            +                  look_for_keys=False, **kwargs):
            +            if not self.client:
            +                cli = SSHClient()
            +                cli.set_missing_host_key_policy(MissingHostKeyPolicy())
            +                kws = dict(username=self.username, password=self.password, pkey=self.pkey)
            +                kws.update(kwargs)
            +
            +                # in case we're using a password to log into the host, this
            +                # prevents paramiko from getting confused by ssh keys in the ssh
            +                # agent:
            +                if self.password and not self.pkey:
            +                    kws.update(dict(allow_agent=allow_agent,
            +                                    look_for_keys=look_for_keys))
            +
            +                cli.connect(ipaddr, port, **kws)
            +                self._client = cli
            +
            +                if jumphost:
            +                    self.jumphost_policy()
            +                    self._jump = True
            +                    self.reconnect(ipaddr, port, **kws)
            +
            +            return self
            +
            +        def logout(self):
            +            if self.client and self.client.get_transport():
            +                if self._jump:
            +                    self.jumphost_policy(False)
            +                    self._jump = None
            +                self.client.close()
            +                self._client = None
            +
            +        def exec_command(self, command, bufsize=-1, timeout=None, get_pty=False, env=None,
            +                         splitlines=False):
            +            _, out, err = self.client.exec_command(command, bufsize, timeout, get_pty, env)
            +            out, err = out.read().decode(), err.read().decode()
            +            if splitlines:
            +                out = out.splitlines()
            +            return out, err
            +
            +        def jumphost_policy(self, allow=True):
            +            ctx, err = self.exec_command("sudo cat /etc/ssh/sshd_config")
            +            if allow:
            +                renew = re.sub(r'\n(Allow(?:Tcp|Agent)Forwarding no)',
            +                               lambda m: f"\n#{m.group(1)}", ctx, re.I | re.M)
            +            else:
            +                renew = re.sub(r'#(Allow(?:Tcp|Agent)Forwarding no)',
            +                               lambda m: m.group(1), ctx, re.I | re.M)
            +            self.exec_command(f'sudo cat<<"EOF">_config\n{renew}EOF')
            +            self.exec_command('sudo mv _config /etc/ssh/sshd_config'
            +                              ' && sudo systemctl restart sshd')
            +
            +    return HostShell('rancher', password, pkey)
            +
            def host_state(request)
            +
            + +Expand source code + +
            @pytest.fixture(scope="session")
            +def host_state(request):
            +    class HostState:
            +        files = ("power_off.sh", "power_on.sh", "reboot.sh")  # [False, True, -1]
            +
            +        def __init__(self, script_path, delay=120):
            +            self.path = Path(script_path)
            +            self.delay = delay
            +
            +        def __repr__(self):
            +            return f"HostState({self.path}, {self.delay})"
            +
            +        def power(self, name, ip, on=True):
            +            proc = run([self.path / self.files[on], name, ip],
            +                       stdout=PIPE, stderr=PIPE)
            +            return proc.returncode, proc.stdout, proc.stderr
            +
            +        def reboot(self, name, ip):
            +            return self.power(name, ip, -1)
            +
            +    return HostState(request.config.getoption("--node-scripts-location"))
            +
            def opensuse_checksum(request)
            +
            + +Expand source code + +
            @pytest.fixture(scope="session")
            +def opensuse_checksum(request):
            +    """Returns openSUSE checksum from config"""
            +    return request.config.getoption("--opensuse-checksum")
            +

            Returns openSUSE checksum from config

            def polling_for(wait_timeout, sleep_timeout)
            +
            + +Expand source code + +
            @pytest.fixture(scope="session")
            +def polling_for(wait_timeout, sleep_timeout):
            +    # TODO: Try to redesign refer to multiprocessing package (e.g. apply_async and map_async)
            +    def _polling_for(subject: str,
            +                     checker: Callable[..., bool],
            +                     poller: Callable, *args,
            +                     timeout=wait_timeout):
            +        """ Polling expected confition for `timeout`s every `sleep_timeout`s
            +
            +        Arguments:
            +          subject: str, what is waiting for
            +          checker: Callable, check `poller` output and returns bool
            +          args: list, [*poller_args, testee]
            +          poller: Callable, poller(*poller_args, testee) for each testee
            +
            +        Returns:
            +          Any: `poller` output if qualified by `checker`
            +
            +        Raises:
            +          AssertionError: if still NOT qualified within `timeout`s
            +        """
            +        *poller_args, testee = args
            +        testees = testee if isinstance(testee, list) else [testee]
            +        checker_args_len = len(getfullargspec(checker).args)
            +
            +        endtime = datetime.now() + timedelta(seconds=timeout)
            +        while endtime > datetime.now():
            +            for testee in testees[:]:
            +                output = poller(*poller_args, testee)
            +                # unpack poller output according to checker signature
            +                qualified = checker(*output) if checker_args_len > 1 else checker(output)
            +                if qualified:
            +                    testees.remove(testee)
            +            if not testees:
            +                return output
            +            sleep(sleep_timeout)
            +        else:
            +            raise AssertionError(
            +                f'Timeout {timeout}s waiting for {subject}\n'
            +                f'Got error: {output}'
            +            )
            +
            +    return _polling_for
            +
            def rancher_wait_timeout(request)
            +
            + +Expand source code + +
            @pytest.fixture(scope="session")
            +def rancher_wait_timeout(request):
            +    return request.config.getoption("--rancher-cluster-wait-timeout", 1800)
            +
            def skip_version_after(request, api_client)
            +
            + +Expand source code + +
            @pytest.fixture(autouse=True)
            +def skip_version_after(request, api_client):
            +    mark = request.node.get_closest_marker("skip_version_after")
            +    if mark:
            +        cluster_ver = api_client.cluster_version
            +        for target_ver in mark.args:
            +            if not hasattr(cluster_ver, 'major') or parse_version(target_ver) <= cluster_ver:
            +                return pytest.skip(
            +                    f"Cluster Version `{api_client.cluster_version}` is not included"
            +                    f" in the supported version (most < `{target_ver}`)"
            +                )
            +
            def skip_version_before(request, api_client)
            +
            + +Expand source code + +
            @pytest.fixture(autouse=True)
            +def skip_version_before(request, api_client):
            +    mark = request.node.get_closest_marker("skip_version_before")
            +    if mark:
            +        cluster_ver = api_client.cluster_version
            +        for target_ver in mark.args:
            +            if '-head' not in cluster_ver.public and parse_version(target_ver) > cluster_ver:
            +                return pytest.skip(
            +                    f"Cluster Version `{api_client.cluster_version}` is not included"
            +                    f" in the supported version (most >= `{target_ver}`)"
            +                )
            +
            def skip_version_if(request, api_client)
            +
            + +Expand source code + +
            @pytest.fixture(autouse=True)
            +def skip_version_if(request, api_client):
            +    ''' To mark test case should be skip when hit the condition string.
            +
            +    Args:
            +        *args: Version string prefixing with one of operators: `!=`, `==`, `>=`, `<=`, `>`, `<`
            +    Keyword Args:
            +        reason: The reason string for `pytest.skip`, default is:
            +            "Cluster Version `{cluster_version}` is not included in versions: {versions}"
            +        condition: Condition callable function to check compare result(bool), default is `all`
            +    '''
            +    default_reason = (
            +        "Cluster Version `{cluster_version}` is not included in versions: {versions}"
            +    )
            +    mark = request.node.get_closest_marker("skip_version_if")
            +    if mark:
            +        cluster_ver = api_client.cluster_version
            +        checks = [version_check(vstr, cluster_ver) for vstr in mark.args]
            +        reason = mark.kwargs.get('reason', default_reason)
            +        if mark.kwargs.get('condition', all)(r for *_, r in checks):
            +            versions = [f"{op} {v}" for op, v, _ in checks]
            +            return pytest.skip(
            +                reason.format(cluster_version=cluster_ver, versions=versions)
            +            )
            +

            To mark test case should be skip when hit the condition string.

            Args

            @@ -124,48 +472,145 @@

            Args

            def sleep_timeout(request)
            +
            + +Expand source code + +
            @pytest.fixture(scope="session")
            +def sleep_timeout(request):
            +    return request.config.getoption("--sleep-timeout", 4)
            +
            def ssh_keypair()
            +
            + +Expand source code + +
            @pytest.fixture(scope="module")
            +def ssh_keypair():
            +    private_key = asymmetric.rsa.generate_private_key(
            +        public_exponent=65537,
            +        key_size=1024,
            +        backend=backends.default_backend()
            +    )
            +    private_key_pem = private_key.private_bytes(
            +        serialization.Encoding.PEM,
            +        serialization.PrivateFormat.OpenSSH,
            +        serialization.NoEncryption()
            +    )
            +
            +    public_key = private_key.public_key()
            +    public_key_ssh = public_key.public_bytes(
            +        serialization.Encoding.OpenSSH,
            +        serialization.PublicFormat.OpenSSH
            +    )
            +
            +    return public_key_ssh.decode('utf-8'), private_key_pem.decode('utf-8')
            +
            def support_bundle_state()
            +
            + +Expand source code + +
            @pytest.fixture(scope="session")
            +def support_bundle_state():
            +    class SupportBundle:
            +        def __init__(self, fio):
            +            self.uid = ""
            +            self.files = list()  # for checking file name
            +            self.fio = fio  # for checking file content
            +
            +    with NamedTemporaryFile() as f:
            +        yield SupportBundle(f)
            +
            def ubuntu_checksum(request)
            +
            + +Expand source code + +
            @pytest.fixture(scope="session")
            +def ubuntu_checksum(request):
            +    """Returns Ubuntu checksum from config"""
            +    return request.config.getoption("--ubuntu-checksum")
            +

            Returns Ubuntu checksum from config

            def unique_name()
            +
            + +Expand source code + +
            @pytest.fixture(scope='module')
            +def unique_name():
            +    """Default unique name"""
            +    return datetime.now().strftime("%Hh%Mm%Ss%f-%m-%d")
            +

            Default unique name

            def upgrade_timeout(request)
            +
            + +Expand source code + +
            @pytest.fixture(scope="session")
            +def upgrade_timeout(request):
            +    return request.config.getoption('--upgrade-wait-timeout') or 7200
            +
            def version_check(vstring, version)
            +
            + +Expand source code + +
            def version_check(vstring, version):
            +    from operator import le, lt, ge, gt, ne, eq
            +    ops = {"<=": le, "<": lt, ">=": ge, ">": gt, "!=": ne, "==": eq}
            +
            +    try:
            +        op = target_ver = None
            +        op, target_ver = re.search(r"([<>=!]+)\s?(.+)", vstring).groups()
            +        return op, target_ver, ops[op](version, parse_version(target_ver))
            +    except Exception:
            +        return op, target_ver, False
            +
            def wait_timeout(request)
            +
            + +Expand source code + +
            @pytest.fixture(scope="session")
            +def wait_timeout(request):
            +    return request.config.getoption("--wait-timeout", 300)
            +
            @@ -212,7 +657,7 @@

            Args

            diff --git a/backend/fixtures/base.html b/backend/fixtures/base.html index 91844bf32..f3ceae5c2 100644 --- a/backend/fixtures/base.html +++ b/backend/fixtures/base.html @@ -3,19 +3,30 @@ - + harvester_e2e_tests.fixtures.base API documentation - + @@ -37,6 +48,25 @@

            Functions

            def wait_until(timeout, snooze=3)
            +
            + +Expand source code + +
            def wait_until(timeout, snooze=3):
            +    def wait_until_decorator(api_func):
            +        def wrapped(*args, **kwargs):
            +            endtime = datetime.now() + timedelta(seconds=timeout)
            +            while endtime > datetime.now():
            +                qualified, (code, data) = api_func(*args, **kwargs)
            +                if qualified:
            +                    break
            +                sleep(snooze)
            +            return qualified, (code, data)
            +
            +        return wrapped
            +
            +    return wait_until_decorator
            +
    @@ -63,7 +93,7 @@

    Functions

    diff --git a/backend/fixtures/images.html b/backend/fixtures/images.html index 2dfb8f120..2b76e5312 100644 --- a/backend/fixtures/images.html +++ b/backend/fixtures/images.html @@ -3,19 +3,30 @@ - + harvester_e2e_tests.fixtures.images API documentation - + @@ -37,24 +48,98 @@

    Functions

    def image_checker(api_client, wait_timeout, sleep_timeout)
    +
    + +Expand source code + +
    @pytest.fixture(scope="session")
    +def image_checker(api_client, wait_timeout, sleep_timeout):
    +    class ImageChecker:
    +        def __init__(self):
    +            self.images = api_client.images
    +
    +        @wait_until(wait_timeout, sleep_timeout)
    +        def wait_downloaded(self, image_name):
    +            code, data = self.images.get(image_name)
    +            if data.get('status', {}).get('progress') == 100:
    +                return True, (code, data)
    +            return False, (code, data)
    +
    +        @wait_until(wait_timeout, sleep_timeout)
    +        def wait_deleted(self, image_name):
    +            code, data = self.images.get(image_name)
    +            if code == 404:
    +                return True, (code, data)
    +            return False, (code, data)
    +
    +    return ImageChecker()
    +
    def image_k3s(request)
    +
    + +Expand source code + +
    @pytest.fixture(scope="session")
    +def image_k3s(request):
    +    external = "https://github.com/rancher/k3os/releases/download/v0.20.11-k3s2r1/"
    +    base_url = request.config.getoption("--image-cache-url") or external
    +    url = urlparse(urljoin(f"{base_url}/", "k3os-amd64.iso"))
    +
    +    return ImageInfo(url, ssh_user="k3s")
    +
    def image_opensuse(request, api_client)
    +
    + +Expand source code + +
    @pytest.fixture(scope="session")
    +def image_opensuse(request, api_client):
    +    image_server = request.config.getoption("--image-cache-url")
    +    image_checksum = request.config.getoption("--opensuse-checksum", default=None)
    +    url = urlparse(
    +        request.config.getoption("--opensuse-image-url")
    +    )
    +
    +    if image_server:
    +        *_, image_name = url.path.rsplit("/", 1)
    +        url = urlparse(urljoin(f"{image_server}/", image_name))
    +
    +    return ImageInfo(url, image_checksum, name="opensuse", ssh_user="opensuse")
    +
    def image_ubuntu(request)
    +
    + +Expand source code + +
    @pytest.fixture(scope="session")
    +def image_ubuntu(request):
    +    image_server = request.config.getoption("--image-cache-url")
    +    image_checksum = request.config.getoption("--ubuntu-checksum", default=None)
    +    url = urlparse(
    +        request.config.getoption("--ubuntu-image-url") or DEFAULT_UBUNTU_IMAGE_URL
    +    )
    +
    +    if image_server:
    +        *_, image_name = url.path.rsplit("/", 1)
    +        url = urlparse(urljoin(f"{image_server}/", image_name))
    +
    +    return ImageInfo(url, image_checksum, name="ubuntu", ssh_user="ubuntu")
    +

@@ -67,7 +152,6 @@

Classes

(url_result, image_checksum=None, name='', ssh_user=None)
-
Expand source code @@ -95,11 +179,11 @@

Classes

return self.url_result.geturl().split("file://", 1)[-1] return self.url_result.geturl()
+

Instance variables

prop is_file
-
Expand source code @@ -108,10 +192,10 @@

Instance variables

def is_file(self): return "file" == self.url_result.scheme
+
prop url
-
Expand source code @@ -122,6 +206,7 @@

Instance variables

return self.url_result.geturl().split("file://", 1)[-1] return self.url_result.geturl()
+
@@ -161,7 +246,7 @@

-

Generated by pdoc 0.11.1.

+

Generated by pdoc 0.11.5.

diff --git a/backend/fixtures/index.html b/backend/fixtures/index.html index cdae09b78..534a863a4 100644 --- a/backend/fixtures/index.html +++ b/backend/fixtures/index.html @@ -3,19 +3,30 @@ - + harvester_e2e_tests.fixtures API documentation - + @@ -101,7 +112,7 @@

Sub-modules

diff --git a/backend/fixtures/networks.html b/backend/fixtures/networks.html index 018daa15d..0e603b6a6 100644 --- a/backend/fixtures/networks.html +++ b/backend/fixtures/networks.html @@ -3,19 +3,30 @@ - + harvester_e2e_tests.fixtures.networks API documentation - + @@ -37,18 +48,59 @@

Functions

def network_checker(api_client, wait_timeout, sleep_timeout)
+
+ +Expand source code + +
@pytest.fixture(scope="session")
+def network_checker(api_client, wait_timeout, sleep_timeout):
+    class NetworkChecker:
+        def __init__(self):
+            self.networks = api_client.networks
+
+        @wait_until(wait_timeout, sleep_timeout)
+        def wait_routed(self, vnet_name):
+            code, data = self.networks.get(vnet_name)
+            annotations = data['metadata'].get('annotations', {})
+            route = json.loads(annotations.get('network.harvesterhci.io/route', '{}'))
+            if code == 200 and route.get('connectivity') == 'true':
+                return True, (code, data)
+            return False, (code, data)
+
+    return NetworkChecker()
+
def vlan_id(request)
+
+ +Expand source code + +
@pytest.fixture(scope="session")
+def vlan_id(request):
+    vlan_id = request.config.getoption('--vlan-id')
+    assert 0 < vlan_id < 4095, f"VLAN ID should be in range 1-4094, not {vlan_id}"
+    return vlan_id
+
def vlan_nic(request)
+
+ +Expand source code + +
@pytest.fixture(scope="session")
+def vlan_nic(request):
+    vlan_nic = request.config.getoption('--vlan-nic')
+    assert vlan_nic, f"VLAN NIC {vlan_nic} not configured correctly."
+    return vlan_nic
+

@@ -77,7 +129,7 @@

Functions

diff --git a/backend/fixtures/rancher_api_client.html b/backend/fixtures/rancher_api_client.html index d0f305b4b..8a8287c96 100644 --- a/backend/fixtures/rancher_api_client.html +++ b/backend/fixtures/rancher_api_client.html @@ -3,19 +3,30 @@ - + harvester_e2e_tests.fixtures.rancher_api_client API documentation - + @@ -37,24 +48,93 @@

Functions

def k3s_version(request, rancher_api_client, harvester_metadata)
+
+ +Expand source code + +
@pytest.fixture(scope="session")
+def k3s_version(request, rancher_api_client, harvester_metadata):
+    target_ver = request.config.getoption("--k8s-version")
+
+    resp = rancher_api_client._get("v1-k3s-release/releases")
+    assert resp.ok
+    supported_vers = [r['id'] for r in resp.json()['data']]
+
+    ver = _pickup_k8s_version(supported_vers, target_ver)
+    harvester_metadata['K3S Version'] = ver
+    return ver
+
def rancher_api_client(request, harvester_metadata)
+
+ +Expand source code + +
@pytest.fixture(scope="session")
+def rancher_api_client(request, harvester_metadata):
+    endpoint = request.config.getoption("--rancher-endpoint")
+    password = request.config.getoption("--rancher-admin-password")
+    ssl_verify = request.config.getoption("--ssl_verify", False)
+
+    api = RancherAPI(endpoint)
+    api.authenticate("admin", password, verify=ssl_verify)
+
+    api.session.verify = ssl_verify
+
+    harvester_metadata['Rancher Endpoint'] = endpoint
+    harvester_metadata['Rancher Version'] = api.cluster_version.raw
+
+    return api
+
def rke1_version(request, rancher_api_client, harvester_metadata)
+
+ +Expand source code + +
@pytest.fixture(scope='session')
+def rke1_version(request, rancher_api_client, harvester_metadata):
+    target_ver = request.config.getoption("--k8s-version")
+
+    code, data = rancher_api_client.settings.get("k8s-versions-current")
+    assert 200 == code, (code, data)
+    supported_vers = data["value"].split(",")
+
+    ver = _pickup_k8s_version(supported_vers, target_ver)
+    harvester_metadata['RKE1 Version'] = ver
+    return ver
+
def rke2_version(request, rancher_api_client, harvester_metadata)
+
+ +Expand source code + +
@pytest.fixture(scope="session")
+def rke2_version(request, rancher_api_client, harvester_metadata):
+    target_ver = request.config.getoption("--k8s-version")
+
+    # Ref. https://github.com/rancher/dashboard/blob/master/shell/edit/provisioning.cattle.io.cluster/rke2.vue  # noqa
+    resp = rancher_api_client._get("v1-rke2-release/releases")
+    assert resp.ok
+    supported_vers = [r['id'] for r in resp.json()['data']]
+
+    ver = _pickup_k8s_version(supported_vers, target_ver)
+    harvester_metadata['RKE2 Version'] = ver
+    return ver
+
@@ -84,7 +164,7 @@

Functions

diff --git a/backend/fixtures/settings.html b/backend/fixtures/settings.html index 70698d80c..e178b836e 100644 --- a/backend/fixtures/settings.html +++ b/backend/fixtures/settings.html @@ -3,19 +3,30 @@ - + harvester_e2e_tests.fixtures.settings API documentation - + @@ -37,6 +48,110 @@

Functions

def setting_checker(api_client, wait_timeout, sleep_timeout)
+
+ +Expand source code + +
@pytest.fixture(scope="session")
+def setting_checker(api_client, wait_timeout, sleep_timeout):
+    class SettingChecker:
+        def __init__(self):
+            self.settings = api_client.settings
+            self.nets_annotation = 'k8s.v1.cni.cncf.io/networks'
+            self.net_status_annotation = 'k8s.v1.cni.cncf.io/network-status'
+
+        def _storage_net_configured(self):
+            code, data = self.settings.get('storage-network')
+
+            if (cs := data.get('status', {}).get('conditions')):
+                if 'True' == cs[-1].get('status') and 'Completed' == cs[-1].get('reason'):
+                    return True, (code, data)
+            return False, (code, data)
+
+        @wait_until(wait_timeout, sleep_timeout)
+        def wait_storage_net_enabled_on_harvester(self):
+            snet_configured, (code, data) = self._storage_net_configured()
+            if snet_configured and data.get('value'):
+                return True, (code, data)
+            return False, (code, data)
+
+        @wait_until(wait_timeout, sleep_timeout)
+        def wait_storage_net_disabled_on_harvester(self):
+            snet_configured, (code, data) = self._storage_net_configured()
+            if snet_configured and not data.get('value'):
+                return True, (code, data)
+            return False, (code, data)
+
+        def _lh_instance_mgrs_running(self):
+            code, data = api_client.get_pods(namespace='longhorn-system')
+            if not (code == 200):
+                return False, (code, data)
+
+            lh_instance_mgrs = [pod for pod in data['data'] if 'instance-manager' in pod['id']]
+            if not lh_instance_mgrs:
+                return False, ("No instance-manager pods", data)
+
+            for imgr in lh_instance_mgrs:
+                if 'Running' != imgr['status']['phase']:
+                    return False, (f"Pod {imgr['id']} is not Running", imgr)
+
+            return True, (None, lh_instance_mgrs)
+
+        @wait_until(wait_timeout, sleep_timeout)
+        def wait_storage_net_enabled_on_longhorn(self, snet_cidr):
+            imgrs_running, (code, data) = self._lh_instance_mgrs_running()
+            if not imgrs_running:
+                return False, (code, data)
+
+            for imgr in data:
+                annotations = imgr['metadata']['annotations']
+
+                for na in [self.nets_annotation, self.net_status_annotation]:
+                    if na not in annotations:
+                        return False, (f"Pod has no annotation {na}", imgr)
+
+                # Check k8s.v1.cni.cncf.io/networks
+                try:
+                    nets = json.loads(annotations[self.nets_annotation])
+                    snet = next(n for n in nets if 'lhnet1' == n.get('interface'))
+                except StopIteration:
+                    msg = f"Annotation {self.nets_annotation} has no interface 'lhnet1'"
+                    return False, (msg, imgr)
+
+                # Check k8s.v1.cni.cncf.io/network-status
+                try:
+                    net_statuses = json.loads(annotations[self.net_status_annotation])
+                    snet_status = next(s for s in net_statuses if 'lhnet1' == s.get('interface'))
+                except StopIteration:
+                    msg = f"Annotation {self.net_status_annotation} has no interface 'lhnet1'"
+                    return False, (msg, imgr)
+
+                snet_ips = snet_status.get('ips', ['::1'])
+                if not all(ip_address(sip) in ip_network(snet_cidr) for sip in snet_ips):
+                    return False, (f"Dedicated IPs {snet_ips} does NOT fits {snet_cidr}", imgr)
+
+                # Check network name identical in both annotations
+                if f"{snet.get('namespace')}/{snet.get('name')}" != snet_status.get('name'):
+                    msg = "Network name is not identical between annotations {} and {}".format(
+                        self.nets_annotation, self.net_status_annotation)
+                    return False, (msg, imgr)
+
+            return True, (None, None)
+
+        @wait_until(wait_timeout, sleep_timeout)
+        def wait_storage_net_disabled_on_longhorn(self):
+            imgrs_running, (code, data) = self._lh_instance_mgrs_running()
+            if not imgrs_running:
+                return False, (code, data)
+
+            for imgr in data:
+                if self.nets_annotation in imgr['metadata']['annotations']:
+                    return False, (f"Pod should not has annotation {self.nets_annotation}", imgr)
+
+            return True, (None, None)
+
+    return SettingChecker()
+
@@ -63,7 +178,7 @@

Functions

diff --git a/backend/fixtures/terraform.html b/backend/fixtures/terraform.html index 749002701..3f9dd8ac9 100644 --- a/backend/fixtures/terraform.html +++ b/backend/fixtures/terraform.html @@ -3,19 +3,30 @@ - + harvester_e2e_tests.fixtures.terraform API documentation - + @@ -37,54 +48,159 @@

Functions

def remove_ansicode(ctx)
+
+ +Expand source code + +
def remove_ansicode(ctx):
+    if isinstance(ctx, bytes):
+        ctx = ctx.decode()
+    return re.sub(r"\x1b|\[\d+m", "", ctx)
+
def tf_executor(tf_script_dir)
+
+ +Expand source code + +
@pytest.fixture(scope="session")
+def tf_executor(tf_script_dir):
+    run(str(tf_script_dir / "terraform_install.sh"), stdout=PIPE, stderr=PIPE)
+    executor = tf_script_dir / "bin/terraform"
+    assert executor.is_file()
+
+    yield executor
+
def tf_harvester(api_client, tf_script_dir, tf_provider_version, tf_executor)
+
+ +Expand source code + +
@pytest.fixture(scope="session")
+def tf_harvester(api_client, tf_script_dir, tf_provider_version, tf_executor):
+    harv = TerraformHarvester(tf_executor, tf_script_dir / datetime.now().strftime("%Hh%Mm_%m-%d"))
+    kuebconfig = api_client.generate_kubeconfig()
+    out, err, exc_code = harv.initial_provider(kuebconfig, tf_provider_version)
+    assert not err and 0 == exc_code
+    return harv
+
def tf_provider_rancher_ver(request, harvester_metadata)
+
+ +Expand source code + +
@pytest.fixture(scope="session")
+def tf_provider_rancher_ver(request, harvester_metadata):
+    version = request.config.getoption('--terraform-provider-rancher')
+    if not version:
+        import requests
+        resp = requests.get("https://registry.terraform.io/v1/providers/rancher/rancher2")
+        version = max(resp.json()['versions'], key=parse_version)
+    harvester_metadata['Terraform Rancher Provider Version'] = version
+    return version
+
def tf_provider_version(request, harvester_metadata)
+
+ +Expand source code + +
@pytest.fixture(scope="session")
+def tf_provider_version(request, harvester_metadata):
+    version = request.config.getoption('--terraform-provider-harvester')
+    if not version:
+        import requests
+        resp = requests.get("https://registry.terraform.io/v1/providers/harvester/harvester")
+        version = max(resp.json()['versions'], key=parse_version)
+    harvester_metadata['Terraform Harvester Provider Version'] = f"{version}"
+    return version
+
-def tf_rancher(rancher_api_client, tf_script_dir, tf_provider_rancher_ver, tf_executor, harvester, rancher) +def tf_rancher(rancher_api_client,
tf_script_dir,
tf_provider_rancher_ver,
tf_executor,
harvester,
rancher)
+
+ +Expand source code + +
@pytest.fixture(scope="module")
+def tf_rancher(rancher_api_client, tf_script_dir, tf_provider_rancher_ver, tf_executor,
+               harvester, rancher):
+    tf_rancher = TerraformRancher(tf_executor,
+                                  tf_script_dir / datetime.now().strftime("%Hh%Mm_%m-%d"))
+    kubeconfig = rancher_api_client.generate_kubeconfig(harvester["id"], harvester["name"])
+
+    out, err, exc_code = \
+        tf_rancher.initial_provider(kubeconfig, tf_provider_rancher_ver, harvester, rancher)
+    assert not err and 0 == exc_code
+    return tf_rancher
+
def tf_rancher_resource(tf_provider_rancher_ver)
+
+ +Expand source code + +
@pytest.fixture(scope="session")
+def tf_rancher_resource(tf_provider_rancher_ver):
+    converter = Path("./terraform_test_artifacts/json2hcl")
+    return TerraformRancherResource.for_version(tf_provider_rancher_ver)(converter)
+
def tf_resource(tf_provider_version)
+
+ +Expand source code + +
@pytest.fixture(scope="session")
+def tf_resource(tf_provider_version):
+    converter = Path("./terraform_test_artifacts/json2hcl")
+    # update `0.0.0-dev` to get newest class
+    version = "8.8.99" if tf_provider_version == '0.0.0-dev' else tf_provider_version
+    return TerraformResource.for_version(version)(converter)
+
def tf_script_dir(request)
+
+ +Expand source code + +
@pytest.fixture(scope="session")
+def tf_script_dir(request):
+    return Path(request.config.getoption('--terraform-scripts-location'))
+
@@ -97,7 +213,6 @@

Classes

(converter)
-
Expand source code @@ -151,6 +266,7 @@

Classes

return ResourceContext(resource_type, resource_name, self.convert_to_hcl(rv), rv) return rv
+

Subclasses

  • TerraformRancherResource
  • @@ -185,12 +301,40 @@

    Methods

    def convert_to_hcl(self, json_spec, raw=False)
    +
    + +Expand source code + +
    def convert_to_hcl(self, json_spec, raw=False):
    +    rv = run(f"echo {json.dumps(json_spec)!r} | {self.executor!s}",
    +             shell=True, stdout=PIPE, stderr=PIPE)
    +    if raw:
    +        return rv
    +    if rv.stderr:
    +        raise TypeError(rv.stderr, rv.stdout, rv.returncode)
    +    out = rv.stdout.decode()
    +    out = re.sub(r'"resource"', "resource", out)    # resource should not quote
    +    out = re.sub(r"\"(.+?)\" =", r"\1 =", out)      # property should not quote
    +    out = re.sub(r'"(data\.\S+?)"', r"\1", out)     # data should not quote
    +    out = re.sub(r"(.[^ ]+) = {", r"\1 {", out)     # block should not have `=`
    +    return out
    +
    def make_resource(self, resource_type, resource_name, *, convert=True, **properties)
    +
    + +Expand source code + +
    def make_resource(self, resource_type, resource_name, *, convert=True, **properties):
    +    rv = dict(resource={resource_type: {resource_name: properties}})
    +    if convert:
    +        return ResourceContext(resource_type, resource_name, self.convert_to_hcl(rv), rv)
    +    return rv
    +
    @@ -200,7 +344,6 @@

    Methods

    (type: str, name: str, ctx: str, raw: dict = <factory>)
    -

    ResourceContext(type: str, name: str, ctx: str, raw: dict = )

    Expand source code @@ -212,6 +355,7 @@

    Methods

    ctx: str raw: dict = field(default_factory=dict, compare=False)
    +

    ResourceContext(type: str, name: str, ctx: str, raw: dict = )

    Class variables

    var ctx : str
    @@ -237,7 +381,6 @@

    Class variables

    (executor, workdir)
    -
    Expand source code @@ -302,6 +445,7 @@

    Class variables

    def destroy_resource(self, resource_type, resource_name): return self.execute(f"destroy -auto-approve -target {resource_type}.{resource_name}")
    +

    Subclasses

    • TerraformRancher
    • @@ -312,36 +456,114 @@

      Methods

      def apply_resource(self, resource_type, resource_name)
      +
      + +Expand source code + +
      def apply_resource(self, resource_type, resource_name):
      +    return self.execute(f"apply -auto-approve -target {resource_type}.{resource_name}")
      +
      def destroy_resource(self, resource_type, resource_name)
      +
      + +Expand source code + +
      def destroy_resource(self, resource_type, resource_name):
      +    return self.execute(f"destroy -auto-approve -target {resource_type}.{resource_name}")
      +
      def exec_command(self, cmd, raw=False, **kws)
      +
      + +Expand source code + +
      def exec_command(self, cmd, raw=False, **kws):
      +    rv = run(cmd, shell=True, stdout=PIPE, stderr=PIPE, cwd=self.workdir, **kws)
      +
      +    if raw:
      +        return rv
      +    return remove_ansicode(rv.stdout), remove_ansicode(rv.stderr), rv.returncode
      +
      def execute(self, cmd, raw=False, **kws)
      +
      + +Expand source code + +
      def execute(self, cmd, raw=False, **kws):
      +    return self.exec_command(f"{self.executor} {cmd}", raw=raw, **kws)
      +
      def initial_provider(self, kubeconfig, provider_version)
      +
      + +Expand source code + +
      def initial_provider(self, kubeconfig, provider_version):
      +    kubefile = self.workdir / "kubeconfig"
      +    with open(kubefile, "w") as f:
      +        f.write(kubeconfig)
      +
      +    with open(self.workdir / "provider.tf", "w") as f:
      +        f.write(TF_PROVIDER % dict(
      +            tf_version=">=0.13", config_path=kubefile.resolve(),
      +            provider_source="harvester/harvester", provider_version=provider_version
      +        ))
      +    init_arg = ""
      +
      +    if provider_version == "0.0.0-dev":
      +        local_plugin_path = "./registry.terraform.io/harvester/harvester/0.0.0-dev"
      +        docker_plugin_path = (
      +            "/root/.terraform.d/plugins/terraform.local/local/"
      +            "harvester/0.0.0-dev/linux_amd64/terraform-provider-harvester_v0.0.0-dev"
      +        )
      +        rv = run(
      +            f"mkdir -p {local_plugin_path}"
      +            f"&& mkdir linux_amd64"
      +            f"&& docker run --pull=always -q --rm --name harv-tf-master-head"
      +            f" -v ./linux_amd64:/_tf"
      +            f" rancher/terraform-provider-harvester:master-head-amd64"
      +            f' bash -c "cp {docker_plugin_path} /_tf/"'
      +            f"&& mv linux_amd64 {local_plugin_path}",
      +            shell=True, stdout=PIPE, stderr=PIPE, cwd=self.workdir
      +        )
      +        assert not remove_ansicode(rv.stderr) and 0 == rv.returncode
      +        init_arg = " -plugin-dir ."
      +
      +    return self.execute(f"init {init_arg}")
      +
      def save_as(self, content, filename, ext='.tf')
      +
      + +Expand source code + +
      def save_as(self, content, filename, ext=".tf"):
      +    filepath = self.workdir / f"{filename}{ext}"
      +    with open(filepath, "w") as f:
      +        f.write(content)
      +
    @@ -351,7 +573,6 @@

    Methods

    (executor, workdir)
    -
    Expand source code @@ -373,6 +594,7 @@

    Methods

    return self.execute("init")
    +

    Ancestors

    • TerraformHarvester
    • @@ -383,6 +605,26 @@

      Methods

      def initial_provider(self, kubeconfig, provider_version, harvester, rancher)
      +
      + +Expand source code + +
      def initial_provider(self, kubeconfig, provider_version, harvester, rancher):
      +    kubefile = self.workdir / "kubeconfig"
      +    with open(kubefile, "w") as f:
      +        f.write(kubeconfig)
      +
      +    with open(self.workdir / "provider.tf", "w") as f:
      +        f.write(TF_PROVIDER_RANCHER % {
      +            "provider_source": "rancher/rancher2",
      +            "provider_version": provider_version,
      +            "rancher_endpoint": rancher["endpoint"],
      +            "rancher_token": rancher["token"],
      +            "harvester_name": harvester["name"]
      +        })
      +
      +    return self.execute("init")
      +
      @@ -392,7 +634,6 @@

      Methods

      (converter)
      -
      Expand source code @@ -430,6 +671,7 @@

      Methods

      } return ResourceContext("rancher2_cluster_v2", rke_cluster_name, hcl_str, "")
      +

      Ancestors

      • BaseTerraformResource
      • @@ -444,12 +686,48 @@

        Methods

        def cluster_config(self, rke_cluster_name, k8s_version, harvester_name, cloud_credential_name)
        +
        + +Expand source code + +
        def cluster_config(self, rke_cluster_name, k8s_version, harvester_name, cloud_credential_name):
        +    machine_pools = TF_MACHINE_POOLS % {
        +        "cloud_credential_name": cloud_credential_name,
        +        "machine_config_name": rke_cluster_name
        +    }
        +    rke_config = TF_RKE_CONFIG % {
        +        "machine_pools": machine_pools,
        +        "harvester_name": harvester_name
        +    }
        +    hcl_str = TF_CLUSTER_CONFIG % {
        +        "name": rke_cluster_name,
        +        "rke2_version": k8s_version,
        +        "rke_config": rke_config
        +    }
        +    return ResourceContext("rancher2_cluster_v2", rke_cluster_name, hcl_str, "")
        +
        def machine_config(self, rke_cluster_name, network_id, image_id, ssh_user)
        +
        + +Expand source code + +
        def machine_config(self, rke_cluster_name, network_id, image_id, ssh_user):
        +    hcl_str = TF_MACHINE_CONFIG % {
        +        "name": rke_cluster_name,
        +        "harvester_config": TF_HARVESTER_CONFIG % {
        +            "ssh_user": ssh_user,
        +            "disk_info": TF_DISK_INFO % {"image_name": image_id},
        +            "network_info": TF_NETWORK_INFO % {"network_name": network_id},
        +            "user_data": TF_USER_DATA
        +        }
        +    }
        +    return ResourceContext("rancher2_machine_config_v2", rke_cluster_name, hcl_str, "")
        +
        @@ -467,7 +745,6 @@

        Inherited members

        (converter)
        -
        Expand source code @@ -488,6 +765,7 @@

        Inherited members

        harvester_credential_config=harvester_credential_config, convert=convert, **properties)
        +

        Ancestors

        • TerraformRancherResource
        • @@ -499,6 +777,21 @@

          Methods

          def cloud_credential(self, name, harvester_name, *, convert=True, **properties)
          +
          + +Expand source code + +
          def cloud_credential(self, name, harvester_name, *, convert=True, **properties):
          +    harvester_credential_config = {
          +        "cluster_id": f"data.rancher2_cluster_v2.{harvester_name}.cluster_v1_id",
          +        "cluster_type": "imported",
          +        "kubeconfig_content": f"data.rancher2_cluster_v2.{harvester_name}.kube_config"
          +    }
          +    return self.make_resource("rancher2_cloud_credential", name,
          +                              name=name,
          +                              harvester_credential_config=harvester_credential_config,
          +                              convert=convert, **properties)
          +
          @@ -516,7 +809,6 @@

          Inherited members

          (converter)
          -
          Expand source code @@ -572,6 +864,7 @@

          Inherited members

          convert=convert, **properties )
          +

          Ancestors

          • BaseTerraformResource
          • @@ -586,42 +879,123 @@

            Methods

            def image_download(self, resource_name, name, display_name, url, *, convert=True, **properties)
            +
            + +Expand source code + +
            def image_download(
            +    self, resource_name, name, display_name, url, *, convert=True, **properties
            +):
            +    return self.make_resource(
            +        "harvester_image", resource_name, name=name, display_name=display_name, url=url,
            +        source_type="download", convert=convert, **properties
            +    )
            +
            -def image_export_from_volume(self, resource_name, name, display_name, pvc_name, pvc_namespace, *, convert=True, **properties) +def image_export_from_volume(self,
            resource_name,
            name,
            display_name,
            pvc_name,
            pvc_namespace,
            *,
            convert=True,
            **properties)
            +
            + +Expand source code + +
            def image_export_from_volume(
            +    self, resource_name, name, display_name, pvc_name, pvc_namespace,
            +    *, convert=True, **properties
            +):
            +    return self.make_resource(
            +        "harvester_image", resource_name, name=name, display_name=display_name,
            +        pvc_name=pvc_name, pvc_namespace=pvc_namespace, source_type="export-from-volume",
            +        convert=convert, **properties
            +    )
            +
            def network(self, resource_name, name, vlan_id, *, convert=True, **properties)
            +
            + +Expand source code + +
            def network(self, resource_name, name, vlan_id, *, convert=True, **properties):
            +    return self.make_resource(
            +        "harvester_network", resource_name, name=name, vlan_id=vlan_id,
            +        convert=convert, **properties
            +    )
            +
            def ssh_key(self, resource_name, name, public_key, *, convert=True, **properties)
            +
            + +Expand source code + +
            def ssh_key(self, resource_name, name, public_key, *, convert=True, **properties):
            +    return self.make_resource(
            +        "harvester_ssh_key", resource_name, name=name, public_key=public_key,
            +        convert=convert, **properties
            +    )
            +
            def virtual_machine(self, resource_name, name, disks, nics, *, convert=True, **properties)
            +
            + +Expand source code + +
            def virtual_machine(self, resource_name, name, disks, nics, *, convert=True, **properties):
            +    disks.extend(properties.pop("disk", []))
            +    nics.extend(properties.pop("network_interface", []))
            +    return self.make_resource(
            +        "harvester_virtualmachine", resource_name, name=name, disk=disks,
            +        network_interface=nics, convert=convert, **properties
            +    )
            +
            def vm(self, resource_name, name, disks, nics, *, convert=True, **properties)
            +
            + +Expand source code + +
            def virtual_machine(self, resource_name, name, disks, nics, *, convert=True, **properties):
            +    disks.extend(properties.pop("disk", []))
            +    nics.extend(properties.pop("network_interface", []))
            +    return self.make_resource(
            +        "harvester_virtualmachine", resource_name, name=name, disk=disks,
            +        network_interface=nics, convert=convert, **properties
            +    )
            +
            def volume(self, resource_name, name, size=1, *, convert=True, **properties)
            +
            + +Expand source code + +
            def volume(self, resource_name, name, size=1, *, convert=True, **properties):
            +    size = size if isinstance(size, str) else f"{size}Gi"
            +    return self.make_resource(
            +        "harvester_volume", resource_name, name=name, size=size,
            +        convert=convert, **properties
            +    )
            +
            @@ -639,7 +1013,6 @@

            Inherited members

            (converter)
            -
            Expand source code @@ -687,6 +1060,7 @@

            Inherited members

            convert=convert, **properties )
            +

            Ancestors

            • TerraformResource
            • @@ -702,24 +1076,79 @@

              Methods

              def cluster_network(self, resource_name, name, *, convert=True, **properties)
              +
              + +Expand source code + +
              def cluster_network(self, resource_name, name, *, convert=True, **properties):
              +    return self.make_resource(
              +        "harvester_clusternetwork", resource_name, name=name, convert=convert, **properties
              +    )
              +
              def network(self, resource_name, name, vlan_id, cluster_network_name, *, convert=True, **properties)
              +
              + +Expand source code + +
              def network(
              +    self, resource_name, name, vlan_id, cluster_network_name, *, convert=True, **properties
              +):
              +    return super().network(
              +        resource_name, name, vlan_id, cluster_network_name=cluster_network_name,
              +        convert=convert, **properties
              +    )
              +
              -def storage_class(self, resource_name, name, replicas=1, stale_timeout=30, migratable='true', *, convert=True, **properties) +def storage_class(self,
              resource_name,
              name,
              replicas=1,
              stale_timeout=30,
              migratable='true',
              *,
              convert=True,
              **properties)
              +
              + +Expand source code + +
              def storage_class(
              +    self, resource_name, name, replicas=1, stale_timeout=30, migratable="true",
              +    *, convert=True, **properties
              +):
              +    params = {
              +            "migratable": migratable,
              +            "numberOfReplicas": str(replicas),
              +            "staleReplicaTimeout": str(stale_timeout)
              +    }
              +    params.update(properties.pop('parameters', {}))
              +    return self.make_resource(
              +        "harvester_storageclass", resource_name, name=name, parameters=params,
              +        convert=convert, **properties
              +    )
              +
              def vlanconfig(self, resource_name, name, cluster_network_name, nics, *, convert=True, **properties)
              +
              + +Expand source code + +
              def vlanconfig(
              +    self, resource_name, name, cluster_network_name, nics, *, convert=True, **properties
              +):
              +    uplink = properties.pop('uplink', dict())
              +    uplink['nics'] = nics
              +
              +    return self.make_resource(
              +        "harvester_vlanconfig", resource_name, name=name, uplink=uplink,
              +        cluster_network_name=cluster_network_name, convert=convert, **properties
              +    )
              +
              @@ -737,7 +1166,6 @@

              Inherited members

              (converter)
              -
              Expand source code @@ -755,6 +1183,7 @@

              Inherited members

              user_data=user_data, network_data=network_data, convert=convert, **properties )
              +

              Ancestors

              • TerraformResource_060
              • @@ -767,6 +1196,18 @@

                Methods

                def cloudinit_secret(self, resource_name, name, user_data='', network_data='', *, convert=True, **properties)
                +
                + +Expand source code + +
                def cloudinit_secret(
                +    self, resource_name, name, user_data="", network_data="", *, convert=True, **properties
                +):
                +    return self.make_resource(
                +        "harvester_cloudinit_secret", resource_name,
                +        user_data=user_data, network_data=network_data, convert=convert, **properties
                +    )
                +
                @@ -889,7 +1330,7 @@

                -

                Generated by pdoc 0.11.1.

                +

                Generated by pdoc 0.11.5.

                diff --git a/backend/fixtures/virtualmachines.html b/backend/fixtures/virtualmachines.html index 84c405334..b79e202a3 100644 --- a/backend/fixtures/virtualmachines.html +++ b/backend/fixtures/virtualmachines.html @@ -3,19 +3,30 @@ - + harvester_e2e_tests.fixtures.virtualmachines API documentation - + @@ -37,30 +48,502 @@

                Functions

                def vm_calc()
                +
                + +Expand source code + +
                @pytest.fixture(scope="session")
                +def vm_calc():
                +    from re import match
                +    from json import loads
                +
                +    class VMResourceCalc:
                +        UNITS = ('', 'K', 'M', 'G', 'T', 'P', 'E', 'Z', 'Y')
                +        FRACTIONAL = ('', 'm', 'u', 'n', 'p', 'f', 'a', 'z', 'y')
                +
                +        @classmethod
                +        def node_resources(cls, node, *, res_types=('cpu', 'memory')):
                +            reserved = loads(node['metadata']['annotations']["management.cattle.io/pod-requests"])
                +            reserved = {k: cls.parse_unit(v) for k, v in reserved.items() if k in res_types}
                +            available = node['status']['allocatable']
                +            available = {k: cls.parse_unit(v) for k, v in available.items() if k in res_types}
                +            schedulable = {k: v - reserved[k] for k, v in available.items()}
                +            return {
                +                'schedulable': schedulable,
                +                'available': available,
                +                'reserved': reserved
                +            }
                +
                +        @classmethod
                +        def format_unit(
                +            cls, value, *, increment=1000, start_exp=0, min_exp=0, max_exp=99, max_precision=2,
                +            suffix='', add_suffix=True, suffix_space=True, first_suffix=None, can_round_0=True
                +        ):
                +            # type: (int, int, int, int, int, int) -> str
                +            # https://github.com/harvester/dashboard/blob/master/shell/utils/units.js#L4
                +
                +            val, exp, divide = value, start_exp, max_exp >= 0
                +
                +            if divide:
                +                while exp < min_exp or (val >= increment and exp + 1 < len(cls.UNITS)
                +                                        and exp < max_exp):
                +                    val = val / increment
                +                    exp += 1
                +            else:
                +                while exp < (min_exp * -1) or (val < increment and exp + 1 < len(cls.FRACTIONAL)
                +                                               and exp < (max_exp * -1)):
                +                    val = val * increment
                +                    exp += 1
                +
                +            if val < 10 and max_precision >= 1:
                +                rv = f"{round(val * (10 ** max_precision) / (10 ** max_precision))}"
                +            else:
                +                rv = f"{round(val)}"
                +
                +            if rv == '0' and not can_round_0 and value != 0:
                +                val, exp = value, 0
                +                while val >= increment:
                +                    val /= increment
                +                    exp += 1
                +                return cls.format_unit(
                +                    val, increment=increment, start_exp=start_exp, min_exp=exp, max_exp=exp,
                +                    max_precision=max_precision, suffix=suffix, add_suffix=add_suffix,
                +                    suffix_space=suffix_space, first_suffix=first_suffix
                +                )
                +
                +            if add_suffix:
                +                rv = f"{rv} " if suffix_space else rv
                +                if exp == 0 and first_suffix is not None:
                +                    rv += f"{first_suffix}"
                +                else:
                +                    rv += f"{cls.UNITS[exp] if divide else cls.FRACTIONAL[exp]}{suffix}"
                +
                +            return rv
                +
                +        @classmethod
                +        def parse_unit(cls, value):
                +            # https://github.com/harvester/dashboard/blob/master/shell/utils/units.js#L83
                +            try:
                +                pattern = r"^([0-9.-]+)\s*([^0-9.-]?)([^0-9.-]?)"
                +                val, unit, inc = match(pattern, value).groups()
                +                val = float(val)
                +                assert unit != ""
                +            except AttributeError:
                +                raise ValueError("Could not parse the value", value)
                +            except (AssertionError, ValueError):
                +                return val
                +
                +            # µ (mu) symbol -> u
                +            unit = 'u' if ord(unit[0]) == 181 else unit
                +
                +            divide = unit in cls.FRACTIONAL
                +            multiply = unit.upper() in cls.UNITS
                +            inc_base = 1024 if inc == 'i' and (divide or multiply) else 1000
                +
                +            if divide:
                +                exp = cls.FRACTIONAL.index(unit)
                +                return val / (inc_base ** exp)
                +
                +            if multiply:
                +                exp = cls.UNITS.index(unit.upper())
                +                return val * (inc_base ** exp)
                +
                +    return VMResourceCalc
                +
                def vm_checker(api_client, wait_timeout, sleep_timeout, vm_shell)
                +
                + +Expand source code + +
                @pytest.fixture(scope="session")
                +def vm_checker(api_client, wait_timeout, sleep_timeout, vm_shell):
                +    from dataclasses import dataclass, field
                +
                +    @dataclass
                +    class ResponseContext:
                +        callee: str
                +        code: int
                +        data: dict
                +        options: dict = field(default_factory=dict, compare=False)
                +
                +        def __iter__(self):
                +            ''' handy method be used to unpack'''
                +            return iter([self.code, self.data])
                +
                +    @dataclass
                +    class ShellContext:
                +        command: str
                +        stdout: str
                +        stderr: str
                +        options: dict = field(default_factory=dict, compare=False)
                +
                +        def __iter__(self):
                +            ''' handy method be used to unpack'''
                +            return iter([self.stdout, self.stderr])
                +
                +    def default_cb(ctx):
                +        ''' identity callback function for adjust checking condition.
                +
                +        :rtype: boolean
                +        :return: True when hit the additional check
                +        '''
                +
                +        return True
                +
                +    class VMChecker:
                +        def __init__(self, vm_api, wait_timeout, snooze=3):
                +            self.vms = vm_api
                +            self.wait_timeout = wait_timeout
                +            self.snooze = snooze
                +
                +        def _endtime(self):
                +            return datetime.now() + timedelta(seconds=self.wait_timeout)
                +
                +        @contextmanager
                +        def configure(self, snooze=None, wait_timeout=None):
                +            ''' context manager to temporarily change snooze or wait_timeout '''
                +            s, t = self.snooze, self.wait_timeout
                +            try:
                +                self.snooze, self.wait_timeout = snooze or s, wait_timeout or t
                +                yield self
                +            finally:
                +                self.snooze, self.wait_timeout = s, t
                +
                +        def wait_getable(self, vm_name, endtime=None, callback=default_cb, **kws):
                +            endtime = endtime or self._endtime()
                +            while endtime > datetime.now():
                +                ctx = ResponseContext('vm.get', *self.vms.get(vm_name, **kws))
                +                if 200 == ctx.code and callback(ctx):
                +                    break
                +                sleep(self.snooze)
                +            else:
                +                return False, ctx
                +            return True, ctx
                +
                +        def wait_stopped(self, vm_name, endtime=None, callback=default_cb, **kws):
                +            ctx = ResponseContext('vm.stop', *self.vms.stop(vm_name, **kws))
                +            if 404 == ctx.code and callback(ctx):
                +                return False, ctx
                +
                +            endtime = endtime or self._endtime()
                +            while endtime > datetime.now():
                +                ctx = ResponseContext('get_status', *self.vms.get_status(vm_name, **kws))
                +                if 404 == ctx.code and callback(ctx):
                +                    break
                +                sleep(self.snooze)
                +            else:
                +                return False, ctx
                +            return True, ctx
                +
                +        def wait_status_stopped(self, vm_name, endtime=None, callback=default_cb, **kws):
                +            def cb(ctx):
                +                if ctx.callee == 'vm.stop':
                +                    return callback(ctx)
                +                ctx.code, ctx.data = self.vms.get(vm_name, **kws)
                +                ctx.callee = 'vm.get'
                +                return (
                +                    200 == ctx.code
                +                    and "Stopped" == ctx.data.get('status', {}).get('printableStatus')
                +                    and callback(ctx)
                +                )
                +            return self.wait_stopped(vm_name, endtime, cb, **kws)
                +
                +        def wait_status_running(self, vm_name, endtime=None, callback=default_cb, **kws):
                +            endtime = endtime or self._endtime()
                +            while endtime > datetime.now():
                +                ctx = ResponseContext('vm.get', *self.vms.get(vm_name, **kws))
                +                status = ctx.data.get('status', {}).get('printableStatus')
                +                if 200 == ctx.code and "Running" == status and callback(ctx):
                +                    break
                +                sleep(self.snooze)
                +            else:
                +                return False, ctx
                +            return True, ctx
                +
                +        def wait_deleted(self, vm_name, endtime=None, callback=default_cb, **kws):
                +            ctx = ResponseContext('vm.delete', *self.vms.delete(vm_name, **kws))
                +            if 404 == ctx.code and callback(ctx):
                +                return False, ctx
                +
                +            endtime = endtime or self._endtime()
                +            while endtime > datetime.now():
                +                ctx = ResponseContext('vm.get_status', *self.vms.get_status(vm_name, **kws))
                +                if 404 == ctx.code and callback(ctx):
                +                    break
                +                sleep(self.snooze)
                +            else:
                +                return False, ctx
                +            return True, ctx
                +
                +        def wait_restarted(self, vm_name, endtime=None, callback=default_cb, **kws):
                +            ctx = ResponseContext('vm.get_status', *self.vms.get_status(vm_name, **kws))
                +            if 404 == ctx.code and callback(ctx):
                +                return False, ctx
                +
                +            options = dict(old_pods=set(ctx.data['status']['activePods'].items()))
                +            ctx = ResponseContext('vm.restart', *self.vms.restart(vm_name, **kws), options)
                +            if 404 == ctx.code and callback(ctx):
                +                return False, ctx
                +
                +            endtime = endtime or self._endtime()
                +            while endtime > datetime.now():
                +                ctx = ResponseContext('vm.get_status', *self.vms.get_status(vm_name, **kws),
                +                                      ctx.options)
                +                if 404 != ctx.code:
                +                    old_pods = ctx.options['old_pods']
                +                    cur_pods = ctx.data['status'].get('activePods', {}).items()
                +                    if old_pods.difference(cur_pods or old_pods) and callback(ctx):
                +                        break
                +                sleep(self.snooze)
                +            else:
                +                return False, ctx
                +            return self.wait_started(vm_name, endtime, callback, **kws)
                +
                +        def wait_started(self, vm_name, endtime=None, callback=default_cb, **kws):
                +            ctx = ResponseContext('vm.start', *self.vms.start(vm_name, **kws))
                +            if 404 == ctx.code and callback(ctx):
                +                return False, ctx
                +
                +            endtime = endtime or self._endtime()
                +            while endtime > datetime.now():
                +                ctx = ResponseContext('vm.get_status', *self.vms.get_status(vm_name, **kws))
                +                if (
                +                    200 == ctx.code
                +                    and "Running" == ctx.data.get('status', {}).get('phase')
                +                    and callback(ctx)
                +                ):
                +                    break
                +                sleep(self.snooze)
                +            else:
                +                return False, ctx
                +            return True, ctx
                +
                +        def wait_agent_connected(self, vm_name, endtime=None, callback=default_cb, **kws):
                +            def cb(ctx):
                +                if ctx.callee == 'vm.start':
                +                    return callback(ctx)
                +
                +                conds = ctx.data.get('status', {}).get('conditions', [{}])
                +                return (
                +                    "AgentConnected" == conds[-1].get('type')
                +                    and callback(ctx)
                +                )
                +
                +            return self.wait_started(vm_name, endtime, cb, **kws)
                +
                +        def wait_interfaces(self, vm_name, endtime=None, callback=default_cb, **kws):
                +            def cb(ctx):
                +                if ctx.callee == 'vm.start':
                +                    return callback(ctx)
                +
                +                return (
                +                    ctx.data.get('status', {}).get('interfaces')
                +                    and callback(ctx)
                +                )
                +            return self.wait_agent_connected(vm_name, endtime, cb, **kws)
                +
                +        def wait_ip_addresses(self, vm_name, ifnames, endtime=None, callback=default_cb, **kws):
                +            def cb(ctx):
                +                if ctx.callee == 'vm.start':
                +                    return callback(ctx)
                +                ifaces = {d['name']: d for d in ctx.data.get('status', {}).get('interfaces', {})}
                +                return (
                +                    all(ifaces.get(name, {}).get('ipAddress') for name in ifnames)
                +                    and callback(ctx)
                +                )
                +
                +            ifnames = list(ifnames)
                +            return self.wait_interfaces(vm_name, endtime, cb, **kws)
                +
                +        def wait_cloudinit_done(self, shell, endtime=None, callback=default_cb, **kws):
                +            cmd = 'cloud-init status'
                +            endtime = endtime or self._endtime()
                +            while endtime > datetime.now():
                +                ctx = ShellContext(cmd, *shell.exec_command(cmd))
                +                if 'done' in ctx.stdout and callback(ctx):
                +                    break
                +                sleep(self.snooze)
                +            else:
                +                return False, (ctx.stdout, ctx.stderr)
                +            return True, (ctx.stdout, ctx.stderr)
                +
                +        def wait_migrated(self, vm_name, new_host, endtime=None, callback=default_cb, **kws):
                +            ctx = ResponseContext('vm.migrate', *self.vms.migrate(vm_name, new_host, **kws))
                +            if 404 == ctx.code and callback(ctx):
                +                return False, ctx
                +
                +            endtime = endtime or self._endtime()
                +            while endtime > datetime.now():
                +                ctx = ResponseContext('vm.get_status', *self.vms.get_status(vm_name, **kws))
                +                if (
                +                    not ctx.data['metadata']['annotations'].get("harvesterhci.io/migrationState")
                +                    and new_host == ctx.data['status']['nodeName']
                +                    and callback(ctx)
                +                ):
                +                    break
                +                sleep(self.snooze)
                +            else:
                +                return False, ctx
                +            return True, ctx
                +
                +        def wait_ssh_connected(
                +            self, vm_ip, username, password=None, pkey=None, endtime=None, **kws
                +        ):
                +            vm_sh = vm_shell(username, password, pkey)
                +            endtime = endtime or self._endtime()
                +            while endtime > datetime.now():
                +                try:
                +                    vm_sh.connect(vm_ip, **kws)
                +                except (ChannelException, NoValidConnectionsError) as e:
                +                    login_ex = e
                +                    sleep(self.snooze)
                +                else:
                +                    break
                +            else:
                +                raise AssertionError(f"Unable to login to VM {vm_ip}") from login_ex
                +
                +            return vm_sh
                +
                +    return VMChecker(api_client.vms, wait_timeout, sleep_timeout)
                +
                def vm_mgmt_static(api_client)
                +
                + +Expand source code + +
                @pytest.fixture(scope="session")
                +def vm_mgmt_static(api_client):
                +    code, data = api_client.hosts.get()
                +    assert 200 == code, (code, data)
                +
                +    for node in data['data']:
                +        rke2_args = node['metadata']['annotations']['rke2.io/node-args']
                +        match = re.search(r'cluster-cidr[\",]+((?:\d+\.?)+\/\d+)\"', rke2_args)
                +        if match:
                +            cluster_cidr = match.group(1)
                +            break
                +    else:
                +        raise AssertionError("cluster-cidr is not available")
                +
                +    mgmt_network = ip_network(cluster_cidr)
                +    mgmt_route = {
                +        "gateway": "10.0.2.1",
                +        "netmask": f"{mgmt_network.netmask}",
                +        "network": f"{mgmt_network.network_address}"
                +    }
                +    return dict(type="static", address="10.0.2.2/24", routes=[mgmt_route])
                +
                def vm_shell()
                +
                + +Expand source code + +
                @pytest.fixture(scope="session")
                +def vm_shell():
                +    class VMShell:
                +        _client = _jump = None
                +
                +        def __init__(self, username, password=None, pkey=None):
                +            self.username = username
                +            self.password = password
                +            self.pkey = pkey
                +
                +        @classmethod
                +        def login(cls, ipaddr, username, password=None, pkey=None, port=22, jumphost=None, **kws):
                +            obj = cls(username, password, pkey)
                +            obj.connect(ipaddr, port, jumphost, **kws)
                +            return obj
                +
                +        def __enter__(self):
                +            return self
                +
                +        def __exit__(self, exc_type, exc_value, exc_tb):
                +            self.close()
                +
                +        @property
                +        def client(self):
                +            return self._client
                +
                +        def connect(self, ipaddr, port=22, jumphost=None, **kwargs):
                +            if not self.client:
                +                if jumphost:
                +                    tp = jumphost.get_transport()
                +                    ch = tp.open_channel('direct-tcpip', (ipaddr, port), tp.sock.getpeername())
                +                else:
                +                    ch = None
                +
                +                pkey = RSAKey.from_private_key(StringIO(self.pkey)) if self.pkey else None
                +
                +                cli = SSHClient()
                +                cli.set_missing_host_key_policy(MissingHostKeyPolicy())
                +                kws = dict(username=self.username, password=self.password, pkey=pkey, sock=ch)
                +                kws.update(kwargs)
                +                cli.connect(ipaddr, port, **kws)
                +
                +                self._client = cli
                +
                +            return self
                +
                +        def close(self):
                +            if self.client and self.client.get_transport():
                +                self.client.close()
                +                self._client = None
                +
                +        def exec_command(self, command, bufsize=-1, timeout=None, get_pty=False, env=None):
                +            _, out, err = self.client.exec_command(command, bufsize, timeout, get_pty, env)
                +            return out.read().decode(), err.read().decode()
                +
                +    return VMShell
                +
                def vm_shell_from_host(vm_shell, host_shell, wait_timeout)
                +
                + +Expand source code + +
                @pytest.fixture(scope="session")
                +def vm_shell_from_host(vm_shell, host_shell, wait_timeout):
                +    @contextmanager
                +    def vm_login_from_host(
                +        host_ip, vm_ip, username, password=None, pkey=None, wait_timeout=wait_timeout
                +    ):
                +        with host_shell.login(host_ip, jumphost=True) as h:
                +            vm_sh = vm_shell(username, password, pkey)
                +            endtime = datetime.now() + timedelta(seconds=wait_timeout)
                +            while endtime > datetime.now():
                +                try:
                +                    vm_sh.connect(vm_ip, jumphost=h.client)
                +                except (ChannelException, NoValidConnectionsError) as e:
                +                    login_ex = e
                +                    sleep(3)
                +                else:
                +                    break
                +            else:
                +                raise AssertionError(f"Unable to login to VM {vm_ip}") from login_ex
                +
                +            yield vm_sh
                +
                +    return vm_login_from_host
                +
                @@ -91,7 +574,7 @@

                Functions

                diff --git a/backend/fixtures/volumes.html b/backend/fixtures/volumes.html index 8af91f503..f689820c6 100644 --- a/backend/fixtures/volumes.html +++ b/backend/fixtures/volumes.html @@ -3,19 +3,30 @@ - + harvester_e2e_tests.fixtures.volumes API documentation - + @@ -37,6 +48,33 @@

                Functions

                def volume_checker(api_client, wait_timeout, sleep_timeout)
                +
                + +Expand source code + +
                @pytest.fixture(scope="session")
                +def volume_checker(api_client, wait_timeout, sleep_timeout):
                +    class VolumeChecker:
                +        def __init__(self):
                +            self.volumes = api_client.volumes
                +            self.lhvolumes = api_client.lhvolumes
                +
                +        @wait_until(wait_timeout, sleep_timeout)
                +        def wait_volumes_detached(self, vol_names):
                +            for vol_name in vol_names:
                +                code, data = self.volumes.get(name=vol_name)
                +                if not (code == 200):
                +                    return False, (code, data)
                +
                +                pvc_name = data["spec"]["volumeName"]
                +                code, data = self.lhvolumes.get(pvc_name)
                +                if not (200 == code and "detached" == data['status']['state']):
                +                    return False, (code, data)
                +
                +            return True, (code, data)
                +
                +    return VolumeChecker()
                +
                @@ -63,7 +101,7 @@

                Functions

                diff --git a/backend/index.html b/backend/index.html index 5c4a0252c..c70ac7993 100644 --- a/backend/index.html +++ b/backend/index.html @@ -3,19 +3,30 @@ - + harvester_e2e_tests API documentation - + @@ -71,7 +82,7 @@

                Sub-modules

                diff --git a/backend/integrations/index.html b/backend/integrations/index.html index 12907eab0..54ca7b8cb 100644 --- a/backend/integrations/index.html +++ b/backend/integrations/index.html @@ -3,19 +3,30 @@ - + harvester_e2e_tests.integrations API documentation - + @@ -136,7 +147,7 @@

                Sub-modules

                diff --git a/backend/integrations/test_0_storage_network.html b/backend/integrations/test_0_storage_network.html index 13a38f8ec..fff7063ee 100644 --- a/backend/integrations/test_0_storage_network.html +++ b/backend/integrations/test_0_storage_network.html @@ -3,19 +3,30 @@ - + harvester_e2e_tests.integrations.test_0_storage_network API documentation - + @@ -37,12 +48,156 @@

                Functions

                def cluster_network(request, api_client, unique_name)
                +
                + +Expand source code + +
                @pytest.fixture(scope='module')
                +def cluster_network(request, api_client, unique_name):
                +    vlan_nic = request.config.getoption('--vlan-nic')
                +    assert vlan_nic, f"VLAN NIC {vlan_nic} not configured correctly."
                +
                +    code, data = api_client.clusternetworks.get_config()
                +    assert 200 == code, (code, data)
                +
                +    node_key = 'network.harvesterhci.io/matched-nodes'
                +    cnet_nodes = dict()  # cluster_network: items
                +    for cfg in data['items']:
                +        if vlan_nic in cfg['spec']['uplink']['nics']:
                +            nodes = json.loads(cfg['metadata']['annotations'][node_key])
                +            cnet_nodes.setdefault(cfg['spec']['clusterNetwork'], []).extend(nodes)
                +
                +    code, data = api_client.hosts.get()
                +    assert 200 == code, (code, data)
                +    all_nodes = set(n['id'] for n in data['data'])
                +    try:
                +        # vlad_nic configured on specific cluster network, reuse it
                +        yield next(cnet for cnet, nodes in cnet_nodes.items() if all_nodes == set(nodes))
                +        return None
                +    except StopIteration:
                +        configured_nodes = reduce(add, cnet_nodes.values(), [])
                +        if any(n in configured_nodes for n in all_nodes):
                +            raise AssertionError(
                +                "Not all nodes' VLAN NIC {vlan_nic} are available.\n"
                +                f"VLAN NIC configured nodes: {configured_nodes}\n"
                +                f"All nodes: {all_nodes}\n"
                +            )
                +
                +    # Create cluster network
                +    cnet = f"cnet-{datetime.strptime(unique_name, '%Hh%Mm%Ss%f-%m-%d').strftime('%H%M%S')}"
                +    created = []
                +    code, data = api_client.clusternetworks.create(cnet)
                +    assert 201 == code, (code, data)
                +    while all_nodes:
                +        node = all_nodes.pop()
                +        code, data = api_client.clusternetworks.create_config(node, cnet, vlan_nic, hostname=node)
                +        assert 201 == code, (
                +            f"Failed to create cluster config for {node}\n"
                +            f"Created: {created}\t Remaining: {all_nodes}\n"
                +            f"API Status({code}): {data}"
                +        )
                +        created.append(node)
                +
                +    yield cnet
                +
                +    # Teardown
                +    deleted = {name: api_client.clusternetworks.delete_config(name) for name in created}
                +    failed = [(name, code, data) for name, (code, data) in deleted.items() if 200 != code]
                +    if failed:
                +        fmt = "Unable to delete VLAN Config {} with error ({}): {}"
                +        raise AssertionError(
                +            "\n".join(fmt.format(name, code, data) for (name, code, data) in failed)
                +        )
                +
                +    code, data = api_client.clusternetworks.delete(cnet)
                +    assert 200 == code, (code, data)
                +
                def test_storage_network(api_client, cluster_network, vlan_id, unique_name, wait_timeout, setting_checker)
                +
                + +Expand source code + +
                @pytest.mark.p0
                +@pytest.mark.settings
                +@pytest.mark.networks
                +@pytest.mark.skip_version_before('v1.0.3')
                +def test_storage_network(
                +    api_client, cluster_network, vlan_id, unique_name, wait_timeout, setting_checker
                +):
                +    '''
                +    To cover test:
                +    - https://harvester.github.io/tests/manual/_incoming/1055_dedicated_storage_network/
                +
                +    Prerequisites:
                +        - All VMs should be halted
                +        - All nodes should be selected in cluster network
                +    Steps:
                +        1. Create VM Network with the VLAN ID to get CIDR
                +        2. Delete the VM Network
                +        3. Create Storage Network with the cluster network, VLAN ID and IP Range(CIDR)
                +        4. Verify Storage Network be configured
                +    Expected Result:
                +        - Status of Storage Network should be `reason: Completed` and `status:True`
                +        - Pods of Longhorn's instance manager should be `status.phase: Running`
                +        - And should have value `metadata.annotations: k8s.v1.cni.cncf.io/network-status`
                +        - And one of the value should contains `interface:lhnet1`
                +            - And the value of `ips` should be in the IP Range
                +    '''
                +    # Prerequisite: VMs should be shutting down
                +    code, data = api_client.vms.get_status()
                +    assert 200 == code, (code, data)
                +    assert not data['data'], (
                +        "\n".join(
                +            f"VM({d['id']}) still in phase: {d['status']['phase']}"
                +            for d in data['data']
                +        )
                +    )
                +
                +    # Get CIDR from VM Network
                +    code, data = api_client.networks.create(unique_name, vlan_id, cluster_network=cluster_network)
                +    assert 201 == code, (code, data)
                +    endtime = datetime.now() + timedelta(seconds=wait_timeout)
                +    while endtime > datetime.now():
                +        code, data = api_client.networks.get(unique_name)
                +        annotations = data['metadata'].get('annotations', {})
                +        if 200 == code and annotations.get('network.harvesterhci.io/route'):
                +            route = json.loads(annotations['network.harvesterhci.io/route'])
                +            if route['cidr']:
                +                break
                +        sleep(3)
                +    else:
                +        raise AssertionError(
                +            "VM network created but route info not available\n"
                +            f"API Status({code}): {data}"
                +        )
                +    _ = api_client.networks.delete(unique_name)
                +    vlan_cidr = route['cidr']
                +
                +    # Create storage-network
                +    enable_spec = api_client.settings.StorageNetworkSpec.enable_with(
                +        vlan_id, cluster_network, vlan_cidr
                +    )
                +    code, data = api_client.settings.update('storage-network', enable_spec)
                +    assert 200 == code, (code, data)
                +    snet_enabled, (code, data) = setting_checker.wait_storage_net_enabled_on_harvester()
                +    assert snet_enabled, (code, data)
                +    snet_enabled, (code, data) = setting_checker.wait_storage_net_enabled_on_longhorn(vlan_cidr)
                +    assert snet_enabled, (code, data)
                +
                +    # teardown
                +    disable_spec = api_client.settings.StorageNetworkSpec.disable()
                +    code, data = api_client.settings.update('storage-network', disable_spec)
                +    assert 200 == code, (code, data)
                +    snet_disabled, (code, data) = setting_checker.wait_storage_net_disabled_on_harvester()
                +    assert snet_disabled, (code, data)
                +    snet_disabled, (code, data) = setting_checker.wait_storage_net_disabled_on_longhorn()
                +    assert snet_disabled, (code, data)
                +

                To cover test: - https://harvester.github.io/tests/manual/_incoming/1055_dedicated_storage_network/

                Prerequisites

                @@ -93,7 +248,7 @@

                Steps

                diff --git a/backend/integrations/test_1_images.html b/backend/integrations/test_1_images.html index a34262d5b..c9024ca3c 100644 --- a/backend/integrations/test_1_images.html +++ b/backend/integrations/test_1_images.html @@ -3,19 +3,30 @@ - + harvester_e2e_tests.integrations.test_1_images API documentation - + @@ -37,60 +48,261 @@

                Functions

                def cluster_network(api_client, vlan_nic)
                +
                + +Expand source code + +
                @pytest.fixture(scope="class")
                +def cluster_network(api_client, vlan_nic):
                +    # We should change this at some point. It fails if the total cnet name is over 12 chars
                +    cnet = f"cnet-{vlan_nic}"
                +    code, data = api_client.clusternetworks.get(cnet)
                +    if code != 200:
                +        code, data = api_client.clusternetworks.create(cnet)
                +        assert 201 == code, (code, data)
                +
                +    code, data = api_client.clusternetworks.get_config(cnet)
                +    if code != 200:
                +        code, data = api_client.clusternetworks.create_config(cnet, cnet, vlan_nic)
                +        assert 201 == code, (code, data)
                +
                +    yield cnet
                +
                +    # Teardown
                +    code, data = api_client.clusternetworks.delete_config(cnet)
                +    assert 200 == code, (code, data)
                +    code, data = api_client.clusternetworks.delete(cnet)
                +    assert 200 == code, (code, data)
                +
                def create_image_url(api_client, name, image_url, image_checksum, wait_timeout)
                +
                + +Expand source code + +
                def create_image_url(api_client, name, image_url, image_checksum, wait_timeout):
                +    code, data = api_client.images.create_by_url(name, image_url, image_checksum)
                +
                +    assert 201 == code, (code, data)
                +    image_spec = data.get("spec")
                +
                +    assert name == image_spec.get("displayName")
                +    assert "download" == image_spec.get("sourceType")
                +
                +    endtime = datetime.now() + timedelta(seconds=wait_timeout)
                +
                +    while endtime > datetime.now():
                +        code, data = api_client.images.get(name)
                +        image_status = data.get("status", {})
                +
                +        assert 200 == code, (code, data)
                +        if image_status.get("progress") == 100:
                +            break
                +        sleep(5)
                +    else:
                +        raise AssertionError(
                +            f"Failed to download image {name} with {wait_timeout} timed out\n"
                +            f"Still got {code} with {data}"
                +        )
                +
                def delete_image(api_client, image_name, wait_timeout)
                +
                + +Expand source code + +
                def delete_image(api_client, image_name, wait_timeout):
                +    code, data = api_client.images.delete(image_name)
                +
                +    assert 200 == code, f"Failed to delete image with error: {code}, {data}"
                +
                +    endtime = datetime.now() + timedelta(seconds=wait_timeout)
                +
                +    while endtime > datetime.now():
                +        code, data = api_client.images.get(image_name)
                +        if code == 404:
                +            break
                +        sleep(5)
                +    else:
                +        raise AssertionError(
                +            f"Failed to delete image {image_name} with {wait_timeout} timed out\n"
                +            f"Still got {code} with {data}"
                +        )
                +
                def delete_volume(api_client, volume_name, wait_timeout)
                +
                + +Expand source code + +
                def delete_volume(api_client, volume_name, wait_timeout):
                +    code, data = api_client.volumes.delete(volume_name)
                +
                +    assert 200 == code, f"Failed to delete volume with error: {code}, {data}"
                +
                +    endtime = datetime.now() + timedelta(seconds=wait_timeout)
                +    while endtime > datetime.now():
                +        code, data = api_client.volumes.get(volume_name)
                +        if code == 404:
                +            break
                +        sleep(5)
                +    else:
                +        raise AssertionError(
                +            f"Failed to delete volume {volume_name} with {wait_timeout} timed out\n"
                +            f"Still got {code} with {data}"
                +        )
                +
                def export_storage_class()
                +
                + +Expand source code + +
                @pytest.fixture(scope="session")
                +def export_storage_class():
                +    storage_class = "harvester-longhorn"
                +    return storage_class
                +
                def fake_invalid_image_file()
                +
                + +Expand source code + +
                @pytest.fixture(scope="session")
                +def fake_invalid_image_file():
                +    with NamedTemporaryFile("wb") as f:
                +        f.seek(5)  # less than 10MB
                +        f.write(b"\0")
                +        f.seek(0)
                +        yield Path(f.name)
                +
                def get_image(api_client, image_name)
                +
                + +Expand source code + +
                def get_image(api_client, image_name):
                +    code, data = api_client.images.get()
                +
                +    assert len(data["items"]) > 0, (code, data)
                +
                +    code, data = api_client.images.get(image_name)
                +    assert 200 == code, (code, data)
                +    assert image_name == data["metadata"]["name"]
                +
                def image_info(request)
                +
                + +Expand source code + +
                @pytest.fixture(params=["image_opensuse", "image_ubuntu"])
                +def image_info(request):
                +    return request.getfixturevalue(request.param)
                +
                def storage_network(api_client, cluster_network, vlan_id, vlan_cidr, setting_checker)
                +
                + +Expand source code + +
                @pytest.fixture(scope="class")
                +def storage_network(api_client, cluster_network, vlan_id, vlan_cidr, setting_checker):
                +    ''' Ref. https://docs.harvesterhci.io/v1.3/advanced/storagenetwork/#configuration-example
                +    '''
                +    enable_spec = api_client.settings.StorageNetworkSpec.enable_with(
                +        vlan_id, cluster_network, vlan_cidr
                +    )
                +    code, data = api_client.settings.update('storage-network', enable_spec)
                +    assert 200 == code, (code, data)
                +    snet_enabled, (code, data) = setting_checker.wait_storage_net_enabled_on_harvester()
                +    assert snet_enabled, (code, data)
                +    snet_enabled, (code, data) = setting_checker.wait_storage_net_enabled_on_longhorn(vlan_cidr)
                +    assert snet_enabled, (code, data)
                +
                +    yield
                +
                +    # Teardown
                +    disable_spec = api_client.settings.StorageNetworkSpec.disable()
                +    code, data = api_client.settings.update('storage-network', disable_spec)
                +    assert 200 == code, (code, data)
                +    snet_disabled, (code, data) = setting_checker.wait_storage_net_disabled_on_harvester()
                +    assert snet_disabled, (code, data)
                +    snet_disabled, (code, data) = setting_checker.wait_storage_net_disabled_on_longhorn()
                +    assert snet_disabled, (code, data)
                +
                def vlan_cidr(api_client, cluster_network, vlan_id, wait_timeout, sleep_timeout)
                +
                + +Expand source code + +
                @pytest.fixture(scope="class")
                +def vlan_cidr(api_client, cluster_network, vlan_id, wait_timeout, sleep_timeout):
                +    vnet = f'{cluster_network}-vlan{vlan_id}'
                +    code, data = api_client.networks.get(vnet)
                +    if code != 200:
                +        code, data = api_client.networks.create(vnet, vlan_id, cluster_network=cluster_network)
                +        assert 201 == code, (code, data)
                +
                +    endtime = datetime.now() + timedelta(seconds=wait_timeout)
                +    while endtime > datetime.now():
                +        code, data = api_client.networks.get(vnet)
                +        annotations = data['metadata'].get('annotations', {})
                +        if 200 == code and annotations.get('network.harvesterhci.io/route'):
                +            route = json.loads(annotations['network.harvesterhci.io/route'])
                +            if route['cidr']:
                +                break
                +        sleep(sleep_timeout)
                +    else:
                +        raise AssertionError(
                +            f"Fail to get route info of VM network {vnet} with error: {code}, {data}"
                +        )
                +
                +    yield route['cidr']
                +
                +    # Teardown
                +    code, data = api_client.networks.delete(vnet)
                +    assert 200 == code, (code, data)
                +
                @@ -102,7 +314,6 @@

                Classes

                class TestBackendImages
                -
                Expand source code @@ -322,6 +533,7 @@

                Classes

                delete_volume(api_client, volume_name, wait_timeout) delete_image(api_client, image_name, wait_timeout)
                +

                Class variables

                var pytestmark
                @@ -335,6 +547,72 @@

                Methods

                def test_create_image_from_volume(self, api_client, unique_name, export_storage_class, wait_timeout)
                +
                + +Expand source code + +
                @pytest.mark.p0
                +@pytest.mark.dependency(name="create_image_from_volume")
                +def test_create_image_from_volume(
                +    self, api_client, unique_name, export_storage_class, wait_timeout
                +):
                +    """
                +    Test create image from volume
                +
                +    Steps:
                +        1. Create a volume "test-volume" in Volumes page
                +        2. Export the volume to image "export-image"
                +        3. Check the image "export-image" exists
                +        4. Cleanup image "export-image" on Images page
                +        5. Cleanup volume "test-volume" on Volumes page
                +    """
                +
                +    volume_name = f"volume-{unique_name}"
                +    image_name = f"image-{unique_name}"
                +
                +    spec = api_client.volumes.Spec(1)
                +    code, data = api_client.volumes.create(volume_name, spec)
                +
                +    assert 201 == code, (code, data)
                +
                +    # Check volume ready
                +    endtime = datetime.now() + timedelta(seconds=wait_timeout)
                +    while endtime > datetime.now():
                +        code, data = api_client.volumes.get(volume_name)
                +        if data["status"]["phase"] == "Bound":
                +            break
                +        sleep(5)
                +    else:
                +        raise AssertionError(
                +            f"Failed to delete volume {volume_name} bound in {wait_timeout} timed out\n"
                +            f"Still got {code} with {data}"
                +        )
                +
                +    api_client.volumes.export(volume_name, image_name, export_storage_class)
                +
                +    endtime = datetime.now() + timedelta(seconds=wait_timeout)
                +
                +    image_id = ""
                +    while endtime > datetime.now():
                +        code, data = api_client.images.get()
                +        assert 200 == code, (code, data)
                +
                +        for image in data["items"]:
                +            if image["spec"]["displayName"] == image_name:
                +                if 100 == image.get("status", {}).get("progress", 0):
                +                    image_id = image["metadata"]["name"]
                +                break
                +        else:
                +            raise AssertionError(f"Failed to find image {image_name}")
                +
                +        if image_id != "":
                +            break
                +
                +        sleep(3)  # snooze
                +
                +    delete_volume(api_client, volume_name, wait_timeout)
                +    delete_image(api_client, image_id, wait_timeout)
                +

                Test create image from volume

                Steps

                  @@ -349,6 +627,28 @@

                  Steps

                  def test_create_image_url(self, image_info, unique_name, api_client, wait_timeout)
                  +
                  + +Expand source code + +
                  @pytest.mark.p0
                  +@pytest.mark.dependency(name="create_image_url")
                  +def test_create_image_url(self, image_info, unique_name, api_client, wait_timeout):
                  +    """
                  +    Test create raw and iso type image from url
                  +
                  +    Steps:
                  +    1. Open image page and select default URL
                  +    2. Input qcow2 image file download URL, wait for download complete
                  +    3. Check the qcow2 image exists
                  +    4. Input iso image file download URL, wait for download complete
                  +    5. Check the iso image exists
                  +    """
                  +    image_name = f"{image_info.name}-{unique_name}"
                  +    image_url = image_info.url
                  +    create_image_url(api_client, image_name, image_url,
                  +                     image_info.image_checksum, wait_timeout)
                  +

                  Test create raw and iso type image from url

                  Steps: 1. Open image page and select default URL @@ -361,6 +661,30 @@

                  Steps

                  def test_create_invalid_file(self, api_client, gen_unique_name, fake_invalid_image_file, wait_timeout)
                  +
                  + +Expand source code + +
                  @pytest.mark.p0
                  +def test_create_invalid_file(
                  +    self, api_client, gen_unique_name, fake_invalid_image_file, wait_timeout
                  +):
                  +    """
                  +    Test create upload image from invalid file type
                  +
                  +    Steps:
                  +    1. Prepare an invalid file that is not in a multiple of 512 bytes
                  +    2. Try to upload invalid image file which to images page
                  +    2. Check should get an error
                  +    """
                  +    unique_name = gen_unique_name()
                  +    resp = api_client.images.create_by_file(unique_name, fake_invalid_image_file)
                  +
                  +    assert (
                  +        500 == resp.status_code
                  +    ), f"File size correct, it's a multiple of 512 bytes:{resp.status_code}, {resp.content}"
                  +    delete_image(api_client, unique_name, wait_timeout)
                  +

                  Test create upload image from invalid file type

                  Steps: 1. Prepare an invalid file that is not in a multiple of 512 bytes @@ -371,6 +695,60 @@

                  Steps

                  def test_delete_image_recreate(self, api_client, image_info, unique_name, fake_image_file, wait_timeout)
                  +
                  + +Expand source code + +
                  @pytest.mark.skip_version_if("> v1.2.0", "<= v1.4.0", reason="Issue#4293 fix after `v1.4.0`")
                  +@pytest.mark.p0
                  +@pytest.mark.dependency(name="delete_image_recreate", depends=["create_image_url"])
                  +def test_delete_image_recreate(
                  +    self,
                  +    api_client,
                  +    image_info,
                  +    unique_name,
                  +    fake_image_file,
                  +    wait_timeout,
                  +):
                  +    """
                  +    Test create raw and iso type image from file
                  +
                  +    Steps:
                  +    1. Check the image created by URL exists
                  +    2. Delete the newly created image
                  +    3. Create an iso file type image from URL
                  +    4. Check the iso image exists
                  +    5. Upload an qcow2 file type image
                  +    5. Delete the newly uploaded file
                  +    6. Upload a new qcow2 file type image
                  +    """
                  +    image_name = f"{image_info.name}-{unique_name}"
                  +    image_url = image_info.url
                  +
                  +    get_image(api_client, image_name)
                  +    delete_image(api_client, image_name, wait_timeout)
                  +
                  +    create_image_url(api_client, image_name, image_url, wait_timeout)
                  +    get_image(api_client, image_name)
                  +
                  +    resp = api_client.images.create_by_file(unique_name, fake_image_file)
                  +
                  +    assert (
                  +        200 == resp.status_code
                  +    ), f"Failed to upload fake image with error:{resp.status_code}, {resp.content}"
                  +
                  +    get_image(api_client, unique_name)
                  +    delete_image(api_client, unique_name, wait_timeout)
                  +
                  +    resp = api_client.images.create_by_file(unique_name, fake_image_file)
                  +
                  +    assert (
                  +        200 == resp.status_code
                  +    ), f"Failed to upload fake image with error:{resp.status_code}, {resp.content}"
                  +
                  +    get_image(api_client, unique_name)
                  +    delete_image(api_client, unique_name, wait_timeout)
                  +

                  Test create raw and iso type image from file

                  Steps: 1. Check the image created by URL exists @@ -385,6 +763,73 @@

                  Steps

                  def test_edit_image_in_use(self, api_client, unique_name, image_info, wait_timeout)
                  +
                  + +Expand source code + +
                  @pytest.mark.p0
                  +@pytest.mark.dependency(name="edit_image_in_use", depends=["create_image_url"])
                  +def test_edit_image_in_use(self, api_client, unique_name, image_info, wait_timeout):
                  +    """
                  +    Test can edit image which already in use
                  +
                  +    Steps:
                  +    1. Check the image created from URL exists
                  +    2. Create a volume from existing image
                  +    3. Update the image labels and description
                  +    4. Check can change the image content
                  +    """
                  +
                  +    image_name = f"{image_info.name}-{unique_name}"
                  +    volume_name = f"volume-{image_info.name}-{unique_name}"
                  +
                  +    code, data = api_client.images.get(name=image_name)
                  +    assert 200 == code, (code, data)
                  +
                  +    image_id = f"{data['metadata']['namespace']}/{image_name}"
                  +
                  +    # Create volume from image_id
                  +    spec = api_client.volumes.Spec(1)
                  +    code, data = api_client.volumes.create(volume_name, spec, image_id=image_id)
                  +    assert 201 == code, (code, data)
                  +
                  +    # Check volume ready
                  +    endtime = datetime.now() + timedelta(seconds=wait_timeout)
                  +    while endtime > datetime.now():
                  +        code, data = api_client.volumes.get(volume_name)
                  +        if data["status"]["phase"] == "Bound":
                  +            break
                  +        sleep(5)
                  +    else:
                  +        raise AssertionError(
                  +            f"Failed to delete volume {unique_name} bound in {wait_timeout} timed out\n"
                  +            f"Still got {code} with {data}"
                  +        )
                  +
                  +    # Update image content
                  +    updates = {
                  +        "labels": {"usage-label": "yes"},
                  +        "annotations": {"field.cattle.io/description": "edit image in use"},
                  +    }
                  +
                  +    # Update image by input
                  +    code, data = api_client.images.update(image_name, dict(metadata=updates))
                  +    assert 200 == code, f"Failed to update image with error: {code}, {data}"
                  +
                  +    unexpected = list()
                  +    for field, pairs in updates.items():
                  +        for k, val in pairs.items():
                  +            if data["metadata"][field].get(k) != val:
                  +                unexpected.append((field, k, val, data["metadata"][field].get(k)))
                  +
                  +    assert not unexpected, "\n".join(
                  +        f"Update {f} failed, set key {k} as {v} but got {n}"
                  +        for f, k, v, n in unexpected
                  +    )
                  +
                  +    delete_volume(api_client, volume_name, wait_timeout)
                  +    delete_image(api_client, image_name, wait_timeout)
                  +

                  Test can edit image which already in use

                  Steps: 1. Check the image created from URL exists @@ -398,7 +843,6 @@

                  Steps

                  class TestImageWithStorageNetwork
                  -
                  Expand source code @@ -449,6 +893,7 @@

                  Steps

                  f"Fail to delete image {unique_name} with error: {code}, {data}" )
                  +

                  Class variables

                  var pytestmark
                  @@ -462,18 +907,72 @@

                  Methods

                  def test_create_image_by_file(self, api_client, fake_image_file, unique_name)
                  +
                  + +Expand source code + +
                  @pytest.mark.dependency(name="create_image_by_file")
                  +def test_create_image_by_file(self, api_client, fake_image_file, unique_name):
                  +    resp = api_client.images.create_by_file(unique_name, fake_image_file)
                  +    assert resp.ok, f"Fail to upload fake image with error: {resp.status_code}, {resp.text}"
                  +
                  +    code, data = api_client.images.get(unique_name)
                  +    assert 200 == code, (code, data)
                  +    assert unique_name == data["metadata"]["name"], (code, data)
                  +
                  def test_delete_image(self, api_client, unique_name, wait_timeout, sleep_timeout)
                  +
                  + +Expand source code + +
                  @pytest.mark.dependency(depends=["create_image_by_file"])
                  +def test_delete_image(self, api_client, unique_name, wait_timeout, sleep_timeout):
                  +    code, data = api_client.images.delete(unique_name)
                  +    assert 200 == code, (code, data)
                  +
                  +    endtime = datetime.now() + timedelta(seconds=wait_timeout)
                  +    while endtime > datetime.now():
                  +        code, data = api_client.images.get(unique_name)
                  +        if code == 404:
                  +            break
                  +        sleep(sleep_timeout)
                  +    else:
                  +        raise AssertionError(
                  +            f"Fail to delete image {unique_name} with error: {code}, {data}"
                  +        )
                  +
                  def test_download_image(self, api_client, fake_image_file, tmp_path, unique_name)
                  +
                  + +Expand source code + +
                  @pytest.mark.dependency(depends=["create_image_by_file"])
                  +def test_download_image(self, api_client, fake_image_file, tmp_path, unique_name):
                  +    resp = api_client.images.download(unique_name)
                  +    assert resp.ok, f"Fail to download fake image with error: {resp.status_code}, {resp.text}"
                  +
                  +    filename = re.search(r'filename=(\S+)', resp.headers.get("Content-Disposition"))
                  +    assert filename, f"No filename info in the response header: {resp.headers}"
                  +    filename = filename.groups()[0]
                  +
                  +    tmp_image_file = tmp_path / filename
                  +    tmp_image_file.write_bytes(
                  +        zlib.decompress(resp.content, 32+15) if ".gz" in filename else resp.content
                  +    )
                  +    assert filecmp.cmp(fake_image_file, tmp_image_file), (
                  +        "Contents of downloaded image is NOT identical to the fake image"
                  +    )
                  +
                  @@ -533,7 +1032,7 @@

                  -

                  Generated by pdoc 0.11.1.

                  +

                  Generated by pdoc 0.11.5.

                  diff --git a/backend/integrations/test_1_volumes.html b/backend/integrations/test_1_volumes.html index 33edf7efe..6d49e7202 100644 --- a/backend/integrations/test_1_volumes.html +++ b/backend/integrations/test_1_volumes.html @@ -3,19 +3,30 @@ - + harvester_e2e_tests.integrations.test_1_volumes API documentation - + @@ -37,6 +48,66 @@

                  Functions

                  def test_create_volume(api_client, unique_name, ubuntu_image, create_as, source_type, polling_for)
                  +
                  + +Expand source code + +
                  @pytest.mark.p0
                  +@pytest.mark.volumes
                  +@pytest.mark.parametrize("create_as", ["json", "yaml"])
                  +@pytest.mark.parametrize("source_type", ["New", "VM Image"])
                  +def test_create_volume(api_client, unique_name, ubuntu_image, create_as, source_type, polling_for):
                  +    """
                  +    1. Create a volume from image
                  +    2. Create should respond with 201
                  +    3. Wait for volume to create
                  +    4. Failures should be at 0
                  +    5. Get volume metadata
                  +    6. Volume should not be in error or transitioning state
                  +    7. ImageId should match what was used in create
                  +    8. Delete volume
                  +    9. Delete volume should reply 404 after delete
                  +    Ref.
                  +    """
                  +    image_id, storage_cls = None, None
                  +    if source_type == "VM Image":
                  +        image_id, storage_cls = ubuntu_image['id'], f"longhorn-{ubuntu_image['display_name']}"
                  +
                  +    spec = api_client.volumes.Spec("10Gi", storage_cls)
                  +    if create_as == 'yaml':
                  +        kws = dict(headers={'Content-Type': 'application/yaml'}, json=None,
                  +                   data=yaml.dump(spec.to_dict(unique_name, 'default', image_id=image_id)))
                  +    else:
                  +        kws = dict()
                  +    code, data = api_client.volumes.create(unique_name, spec, image_id=image_id, **kws)
                  +    assert 201 == code, (code, unique_name, data, image_id)
                  +
                  +    polling_for("volume do created",
                  +                lambda code, data: 200 == code and data['status']['phase'] == "Bound",
                  +                api_client.volumes.get, unique_name)
                  +    code2, data2 = api_client.images.get(ubuntu_image['display_name'])
                  +    # This grabs the failed count for the image
                  +    failed: int = data2['status']['failed']
                  +    # This makes sure that the failures are 0
                  +    assert failed <= 3, 'Image failed more than 3 times'
                  +
                  +    code, data = api_client.volumes.get(unique_name)
                  +    mdata, annotations = data['metadata'], data['metadata']['annotations']
                  +    assert 200 == code, (code, data)
                  +    assert unique_name == mdata['name'], (code, data)
                  +    # status
                  +    assert not mdata['state']['error'], (code, data)
                  +    assert not mdata['state']['transitioning'], (code, data)
                  +    assert data['status']['phase'] == "Bound", (code, data)
                  +    # source
                  +    if source_type == "VM Image":
                  +        assert image_id == annotations['harvesterhci.io/imageId'], (code, data)
                  +    else:
                  +        assert not annotations.get('harvesterhci.io/imageId'), (code, data)
                  +    # teardown
                  +    polling_for("volume do deleted", lambda code, _: 404 == code,
                  +                api_client.volumes.delete, unique_name)
                  +
                  1. Create a volume from image
                  2. Create should respond with 201
                  3. @@ -51,9 +122,62 @@

                    Functions

                  -def test_create_volume_bad_checksum(api_client, unique_name, ubuntu_image_bad_checksum, create_as, source_type, polling_for) +def test_create_volume_bad_checksum(api_client,
                  unique_name,
                  ubuntu_image_bad_checksum,
                  create_as,
                  source_type,
                  polling_for)
                  +
                  + +Expand source code + +
                  @pytest.mark.p1
                  +@pytest.mark.volumes
                  +@pytest.mark.negative
                  +@pytest.mark.parametrize("create_as", ["json", "yaml"])
                  +@pytest.mark.parametrize("source_type", ["New", "VM Image"])
                  +def test_create_volume_bad_checksum(api_client, unique_name, ubuntu_image_bad_checksum,
                  +                                    create_as, source_type, polling_for):
                  +    """
                  +    1. Create a volume from image with a bad checksum
                  +    2. Create should respond with 201
                  +    3. Wait for volume to create
                  +    4. Wait for 4 failures in the volume fail status
                  +    5. Failures should be set at 4
                  +    6. Delete volume
                  +    7. Delete volume should reply 404 after delete
                  +    Ref. https://github.com/harvester/tests/issues/1121
                  +    """
                  +    image_id, storage_cls = None, None
                  +    if source_type == "VM Image":
                  +        image_id, storage_cls = ubuntu_image_bad_checksum['id'], \
                  +            f"longhorn-{ubuntu_image_bad_checksum['display_name']}"
                  +
                  +    spec = api_client.volumes.Spec("10Gi", storage_cls)
                  +    if create_as == 'yaml':
                  +        kws = dict(headers={'Content-Type': 'application/yaml'}, json=None,
                  +                   data=yaml.dump(spec.to_dict(unique_name, 'default', image_id=image_id)))
                  +    else:
                  +        kws = dict()
                  +    code, data = api_client.volumes.create(unique_name, spec, image_id=image_id, **kws)
                  +    assert 201 == code, (code, unique_name, data, image_id)
                  +
                  +    polling_for("volume do created",
                  +                lambda code, data: 200 == code and data['status']['phase'] == "Bound",
                  +                api_client.volumes.get, unique_name)
                  +    code2, data2 = api_client.images.get(ubuntu_image_bad_checksum['display_name'])
                  +    polling_for("failed to process sync file",
                  +                lambda code2, data2: 200 == code2 and data2['status']['failed'] == 4,
                  +                api_client.images.get, ubuntu_image_bad_checksum['display_name'])
                  +
                  +    # This grabs the failed count for the image
                  +    code2, data2 = api_client.images.get(ubuntu_image_bad_checksum['display_name'])
                  +    failed: int = data2['status']['failed']
                  +    # This makes sure that the tests fails with bad checksum
                  +    assert failed == 4, 'Image download correctly failed more than 3 times with bad checksum'
                  +
                  +    # teardown
                  +    polling_for("volume do deleted", lambda code, _: 404 == code,
                  +                api_client.volumes.delete, unique_name)
                  +
                  1. Create a volume from image with a bad checksum
                  2. Create should respond with 201
                  3. @@ -69,6 +193,43 @@

                    Functions

                    def ubuntu_image(api_client, unique_name, image_ubuntu, polling_for)
                    +
                    + +Expand source code + +
                    @pytest.fixture(scope="module")
                    +def ubuntu_image(api_client, unique_name, image_ubuntu, polling_for):
                    +    """
                    +    Generates a Ubuntu image
                    +
                    +    1. Creates an image name based on unique_name
                    +    2. Create the image based on URL
                    +    3. Response for creation should be 201
                    +    4. Loop while waiting for image to be created
                    +    5. Yield the image with the namespace and name
                    +    6. Delete the image
                    +    7. The response for getting the image name should be 404 after deletion
                    +    """
                    +    image_name = f"img-{unique_name}"
                    +    code, data = api_client.images.create_by_url(image_name, image_ubuntu.url,
                    +                                                 image_ubuntu.image_checksum)
                    +    assert 201 == code, f"Fail to create image\n{code}, {data}"
                    +    code, data = polling_for("image do created",
                    +                             lambda c, d: c == 200 and d.get('status', {}).get('progress') == 100,
                    +                             api_client.images.get, image_name)
                    +
                    +    namespace = data['metadata']['namespace']
                    +    name = data['metadata']['name']
                    +    yield dict(ssh_user=image_ubuntu.ssh_user, id=f"{namespace}/{name}", display_name=image_name)
                    +
                    +    code, data = api_client.images.get(image_name)
                    +    if 200 == code:
                    +        code, data = api_client.images.delete(image_name)
                    +        assert 200 == code, f"Fail to cleanup image\n{code}, {data}"
                    +        polling_for("image do deleted",
                    +                    lambda c, d: 404 == c,
                    +                    api_client.images.get, image_name)
                    +

                    Generates a Ubuntu image

                    1. Creates an image name based on unique_name
                    2. @@ -84,6 +245,43 @@

                      Functions

                      def ubuntu_image_bad_checksum(api_client, unique_name, image_ubuntu, polling_for)
                      +
                      + +Expand source code + +
                      @pytest.fixture(scope="module")
                      +def ubuntu_image_bad_checksum(api_client, unique_name, image_ubuntu, polling_for):
                      +    """
                      +    Generates a Ubuntu image with a bad sha512 checksum
                      +
                      +    1. Creates an image name based on unique_name
                      +    2. Create the image based on URL with a bad statically assigned checksum
                      +    3. Response for creation should be 201
                      +    4. Loop while waiting for image to be created
                      +    5. Yield the image with the namespace and name
                      +    6. Delete the image
                      +    7. The response for getting the image name should be 404 after deletion
                      +    """
                      +
                      +    image_name = f"img-{unique_name + '-badchecksum'}"
                      +    # Random fake checksum to use in test
                      +    fake_checksum = sha512(b'not_a_valid_checksum').hexdigest()
                      +    code, data = api_client.images.create_by_url(image_name, image_ubuntu.url, fake_checksum)
                      +    assert 201 == code, f"Fail to create image\n{code}, {data}"
                      +    code, data = polling_for("image do created",
                      +                             lambda c, d: c == 200 and d.get('status', {}).get('progress') == 100,
                      +                             api_client.images.get, image_name)
                      +    namespace = data['metadata']['namespace']
                      +    name = data['metadata']['name']
                      +    yield dict(ssh_user=image_ubuntu.ssh_user, id=f"{namespace}/{name}", display_name=image_name)
                      +    code, data = api_client.images.get(image_name)
                      +    if 200 == code:
                      +        code, data = api_client.images.delete(image_name)
                      +        assert 200 == code, f"Fail to cleanup image\n{code}, {data}"
                      +        polling_for("image do deleted",
                      +                    lambda c, d: 404 == c,
                      +                    api_client.images.get, image_name)
                      +

                      Generates a Ubuntu image with a bad sha512 checksum

                      1. Creates an image name based on unique_name
                      2. @@ -99,6 +297,46 @@

                        Functions

                        def ubuntu_vm(api_client, unique_name, ubuntu_image, polling_for)
                        +
                        + +Expand source code + +
                        @pytest.fixture(scope="class")
                        +def ubuntu_vm(api_client, unique_name, ubuntu_image, polling_for):
                        +    vm_name = f"vm-{unique_name}"
                        +
                        +    vm_spec = api_client.vms.Spec(1, 2)
                        +    vm_spec.add_image(vm_name, ubuntu_image["id"])
                        +    code, data = api_client.vms.create(vm_name, vm_spec)
                        +    assert 201 == code, f"Fail to create VM\n{code}, {data}"
                        +    code, data = polling_for(
                        +        "VM do created",
                        +        lambda c, d: 200 == c and d.get('status', {}).get('printableStatus') == "Running",
                        +        api_client.vms.get, vm_name
                        +    )
                        +
                        +    volumes = list(filter(lambda vol: "persistentVolumeClaim" in vol,
                        +                          data["spec"]["template"]["spec"]["volumes"]))
                        +    assert len(volumes) == 1
                        +    yield data
                        +
                        +    code, data = api_client.vms.get(vm_name)
                        +    if 200 == code:
                        +        code, data = api_client.vms.delete(vm_name)
                        +        assert 200 == code, f"Fail to cleanup VM\n{code}, {data}"
                        +        polling_for("VM do deleted",
                        +                    lambda c, d: 404 == c,
                        +                    api_client.vms.get, vm_name)
                        +
                        +    vol_name = volumes[0]['persistentVolumeClaim']['claimName']
                        +    code, data = api_client.volumes.get(vol_name)
                        +    if 200 == code:
                        +        api_client.volumes.delete(vol_name)
                        +        assert 200 == code, f"Fail to cleanup volume\n{code}, {data}"
                        +        polling_for("volume do deleted",
                        +                    lambda c, d: 404 == c,
                        +                    api_client.volumes.get, vol_name)
                        +

                @@ -110,7 +348,6 @@

                Classes

                class TestVolumeWithVM
                -
                Expand source code @@ -205,6 +442,7 @@

                Classes

                lambda c, d: 404 == c, api_client.volumes.get, vol_name)
                +

                Class variables

                var pytestmark
                @@ -218,24 +456,91 @@

                Methods

                def delete_vm(self, api_client, ubuntu_vm, polling_for)
                +
                + +Expand source code + +
                def delete_vm(self, api_client, ubuntu_vm, polling_for):
                +    vm_name = ubuntu_vm['metadata']['name']
                +    code, data = api_client.vms.delete(vm_name)
                +    assert 200 == code, f"Fail to delete VM\n{code}, {data}"
                +    polling_for("VM do deleted",
                +                lambda c, d: 404 == c,
                +                api_client.vms.get, vm_name)
                +
                def pause_vm(self, api_client, ubuntu_vm, polling_for)
                +
                + +Expand source code + +
                def pause_vm(self, api_client, ubuntu_vm, polling_for):
                +    vm_name = ubuntu_vm['metadata']['name']
                +    code, data = api_client.vms.pause(vm_name)
                +    assert 204 == code, f"Fail to pause VM\n{code}, {data}"
                +    polling_for("VM do paused",
                +                lambda c, d: d.get('status', {}).get('printableStatus') == "Paused",
                +                api_client.vms.get, vm_name)
                +
                def stop_vm(self, api_client, ubuntu_vm, polling_for)
                +
                + +Expand source code + +
                def stop_vm(self, api_client, ubuntu_vm, polling_for):
                +    vm_name = ubuntu_vm['metadata']['name']
                +    code, data = api_client.vms.stop(vm_name)
                +    assert 204 == code, f"Fail to stop VM\n{code}, {data}"
                +    polling_for("VM do stopped",
                +                lambda c, d: 404 == c,
                +                api_client.vms.get_status, vm_name)
                +
                def test_delete_volume_on_deleted_vm(self, api_client, ubuntu_image, ubuntu_vm, polling_for)
                +
                + +Expand source code + +
                def test_delete_volume_on_deleted_vm(self, api_client, ubuntu_image, ubuntu_vm, polling_for):
                +    """
                +    1. Create a VM with volume
                +    2. Delete VM but not volume
                +    3. Delete volume concurrently with VM
                +    4. VM should be deleted
                +    5. Volume should be deleted
                +    Ref. https://github.com/harvester/tests/issues/652
                +    """
                +    vm_name = ubuntu_vm['metadata']['name']
                +    vol_name = (ubuntu_vm["spec"]["template"]["spec"]["volumes"][0]
                +                         ['persistentVolumeClaim']['claimName'])
                +
                +    api_client.vms.delete(vm_name)
                +
                +    polling_for("Delete volume",
                +                lambda c, d: 200 == c,
                +                api_client.volumes.delete, vol_name)
                +
                +    # Retry since VM is deleting
                +    polling_for("VM do deleted",
                +                lambda c, d: 404 == c,
                +                api_client.vms.get, vm_name)
                +    polling_for("Volume do deleted",
                +                lambda c, d: 404 == c,
                +                api_client.volumes.get, vol_name)
                +
                1. Create a VM with volume
                2. Delete VM but not volume
                3. @@ -249,6 +554,46 @@

                  Methods

                  def test_delete_volume_on_existing_vm(self, api_client, ubuntu_image, ubuntu_vm, polling_for)
                  +
                  + +Expand source code + +
                  def test_delete_volume_on_existing_vm(self, api_client, ubuntu_image, ubuntu_vm, polling_for):
                  +    """
                  +    1. Create a VM with volume
                  +    2. Delete volume should reply 422
                  +    3. Pause VM
                  +    4. Delete volume should reply 422 too
                  +    5. Stop VM
                  +    6. Delete volume should reply 422 too
                  +    Ref. https://github.com/harvester/tests/issues/905
                  +    """
                  +    vol_name = (ubuntu_vm["spec"]["template"]["spec"]["volumes"][0]
                  +                         ['persistentVolumeClaim']['claimName'])
                  +
                  +    code, data = api_client.volumes.delete(vol_name)
                  +    assert 422 == code, f"Should fail to delete volume\n{code}, {data}"
                  +
                  +    self.pause_vm(api_client, ubuntu_vm, polling_for)
                  +    code, data = api_client.volumes.delete(vol_name)
                  +    assert 422 == code, f"Should fail to delete volume\n{code}, {data}"
                  +
                  +    self.stop_vm(api_client, ubuntu_vm, polling_for)
                  +    code, data = api_client.volumes.delete(vol_name)
                  +    assert 422 == code, f"Should fail to delete volume\n{code}, {data}"
                  +
                  +    # Check Volume
                  +    code, data = api_client.volumes.get(vol_name)
                  +    mdata, annotations = data['metadata'], data['metadata']['annotations']
                  +    assert 200 == code, (code, data)
                  +    assert mdata['name'] == vol_name, (code, data)
                  +    # status
                  +    assert not mdata['state']['error'], (code, data)
                  +    assert not mdata['state']['transitioning'], (code, data)
                  +    assert data['status']['phase'] == "Bound", (code, data)
                  +    # source
                  +    assert ubuntu_image["id"] == annotations['harvesterhci.io/imageId'], (code, data)
                  +
                  1. Create a VM with volume
                  2. Delete volume should reply 422
                  3. @@ -302,7 +647,7 @@

                    -

                    Generated by pdoc 0.11.1.

                    +

                    Generated by pdoc 0.11.5.

                    diff --git a/backend/integrations/test_3_vm.html b/backend/integrations/test_3_vm.html index 94a9b2dae..eb7686899 100644 --- a/backend/integrations/test_3_vm.html +++ b/backend/integrations/test_3_vm.html @@ -3,19 +3,30 @@ - + harvester_e2e_tests.integrations.test_3_vm API documentation - + @@ -37,54 +48,463 @@

                    Functions

                    def available_node_names(api_client)
                    +
                    + +Expand source code + +
                    @pytest.fixture(scope="class")
                    +def available_node_names(api_client):
                    +    status_code, nodes_info = api_client.hosts.get()
                    +    assert status_code == 200, f"Failed to list nodes with error: {nodes_info}"
                    +
                    +    node_names = []
                    +    for node_info in nodes_info['data']:
                    +        is_ready = False
                    +        for condition in node_info.get('status', {}).get('conditions', []):
                    +            if condition.get('type', "") == "Ready" and \
                    +                    condition.get('status', "") == "True":
                    +                is_ready = True
                    +                break
                    +
                    +        if is_ready and not node_info.get('spec', {}).get('unschedulable', False):
                    +            node_names.append(node_info['metadata']['name'])
                    +
                    +    assert 2 <= len(node_names), (
                    +        f"The cluster only have {len(node_names)} available node. It's not enough."
                    +    )
                    +    yield node_names
                    +
                    def cluster_network(api_client, vlan_nic)
                    +
                    + +Expand source code + +
                    @pytest.fixture(scope="class")
                    +def cluster_network(api_client, vlan_nic):
                    +    name = f"cnet-{vlan_nic}"
                    +    code, data = api_client.clusternetworks.create(name)
                    +    assert 201 == code, (code, data)
                    +    code, data = api_client.clusternetworks.create_config(name, name, vlan_nic)
                    +    assert 201 == code, (code, data)
                    +
                    +    yield name
                    +
                    +    # teardown
                    +    code, data = api_client.clusternetworks.delete_config(name)
                    +    assert 200 == code, (code, data)
                    +    code, data = api_client.clusternetworks.delete(name)
                    +    assert 200 == code, (code, data)
                    +
                    def minimal_vm(api_client, unique_name, ubuntu_image, ssh_keypair, vm_checker)
                    +
                    + +Expand source code + +
                    @pytest.fixture
                    +def minimal_vm(api_client, unique_name, ubuntu_image, ssh_keypair, vm_checker):
                    +    unique_vm_name = f"vm-{unique_name}"
                    +    cpu, mem = 1, 2
                    +    pub_key, pri_key = ssh_keypair
                    +    vm_spec = api_client.vms.Spec(cpu, mem)
                    +    vm_spec.add_image("disk-0", ubuntu_image.id)
                    +
                    +    userdata = yaml.safe_load(vm_spec.user_data)
                    +    userdata['ssh_authorized_keys'] = [pub_key]
                    +    userdata['password'] = 'password'
                    +    userdata['chpasswd'] = dict(expire=False)
                    +    userdata['sshpwauth'] = True
                    +    vm_spec.user_data = yaml.dump(userdata)
                    +    code, data = api_client.vms.create(unique_vm_name, vm_spec)
                    +
                    +    vm_got_ips, (code, data) = vm_checker.wait_ip_addresses(unique_vm_name, ['default'])
                    +    assert vm_got_ips, (
                    +        f"Fail to start VM and get IP with error: {code}, {data}"
                    +    )
                    +    vm_ip = next(i['ipAddress'] for i in data['status']['interfaces'] if i['name'] == 'default')
                    +
                    +    code, data = api_client.hosts.get(data['status']['nodeName'])
                    +    host_ip = next(a['address'] for a in data['status']['addresses'] if a['type'] == 'InternalIP')
                    +
                    +    yield SimpleNamespace(**{
                    +        "name": unique_vm_name,
                    +        "host_ip": host_ip,
                    +        "vm_ip": vm_ip,
                    +        "ssh_user": ubuntu_image.ssh_user
                    +    })
                    +
                    +    # teardown
                    +    code, data = api_client.vms.get(unique_vm_name)
                    +    vm_spec = api_client.vms.Spec.from_dict(data)
                    +    vm_deleted, (code, data) = vm_checker.wait_deleted(unique_vm_name)
                    +    assert vm_deleted, (code, data)
                    +
                    +    for vol in vm_spec.volumes:
                    +        vol_name = vol['volume']['persistentVolumeClaim']['claimName']
                    +        api_client.volumes.delete(vol_name)
                    +
                    def storage_network(api_client, cluster_network, vm_network, setting_checker)
                    +
                    + +Expand source code + +
                    @pytest.fixture
                    +def storage_network(api_client, cluster_network, vm_network, setting_checker):
                    +    ''' Ref. https://docs.harvesterhci.io/v1.3/advanced/storagenetwork/#configuration-example
                    +    '''
                    +    yield SimpleNamespace(**{
                    +        "vlan_id": vm_network.vlan_id,
                    +        "cluster_network": cluster_network,
                    +        "cidr": vm_network.cidr,
                    +        "enable_spec": api_client.settings.StorageNetworkSpec.enable_with(
                    +            vm_network.vlan_id, cluster_network, vm_network.cidr
                    +        )
                    +    })
                    +
                    +    # teardown
                    +    disable_spec = api_client.settings.StorageNetworkSpec.disable()
                    +    code, data = api_client.settings.update('storage-network', disable_spec)
                    +    assert 200 == code, (code, data)
                    +    snet_disabled, (code, data) = setting_checker.wait_storage_net_disabled_on_harvester()
                    +    assert snet_disabled, (code, data)
                    +    snet_disabled, (code, data) = setting_checker.wait_storage_net_disabled_on_longhorn()
                    +    assert snet_disabled, (code, data)
                    +
                    def test_migrate_vm_with_multiple_volumes(api_client, unique_name, ubuntu_image, wait_timeout, available_node_names, vm_checker)
                    +
                    + +Expand source code + +
                    @pytest.mark.p0
                    +@pytest.mark.virtualmachines
                    +def test_migrate_vm_with_multiple_volumes(
                    +    api_client, unique_name, ubuntu_image, wait_timeout, available_node_names, vm_checker
                    +):
                    +    vm_spec = api_client.vms.Spec(1, 1)
                    +    vm_spec.add_image('disk-0', ubuntu_image.id)
                    +    vm_spec.add_volume('disk-1', 1)
                    +    code, vm_data = api_client.vms.create(unique_name, vm_spec)
                    +    assert code == 201, (
                    +        f"Failed to create VM {unique_name} with error: {code}, {vm_data}"
                    +    )
                    +
                    +    endtime = datetime.now() + timedelta(seconds=wait_timeout)
                    +    data, vmi_data = None, None
                    +    while endtime > datetime.now():
                    +        code, data = api_client.vms.get(unique_name)
                    +        if data.get('status', {}).get('ready', False):
                    +            code, data = api_client.vms.get_status(unique_name)
                    +            if data['status']['conditions'][-1]['status'] == 'True':
                    +                vmi_data = data
                    +                break
                    +        sleep(5)
                    +    else:
                    +        vm_checker.wait_deleted(unique_name)
                    +        raise AssertionError(
                    +            f"Can't find VM {unique_name} with {wait_timeout} timed out\n"
                    +            f"Got error: {code}, {data}"
                    +        )
                    +
                    +    src_host = vmi_data['status']['nodeName']
                    +    dst_host = next(n for n in available_node_names if n != src_host)
                    +
                    +    code, data = api_client.vms.migrate(unique_name, dst_host)
                    +    assert code == 204, (
                    +        f"Can't migrate VM {unique_name} from host {src_host} to {dst_host}",
                    +        f"Got error: {code}, {data}"
                    +    )
                    +
                    +    endtime = datetime.now() + timedelta(seconds=wait_timeout)
                    +    while endtime > datetime.now():
                    +        code, data = api_client.vms.get_status(unique_name)
                    +        if data.get('status', {}).get('migrationState', {}).get('completed', False):
                    +            assert dst_host == data['status']['nodeName'], (
                    +                f"Failed to migrate VM {unique_name} from {src_host} to {dst_host}"
                    +            )
                    +            break
                    +        sleep(5)
                    +    else:
                    +        vm_checker.wait_deleted(unique_name)
                    +        raise AssertionError(
                    +            f"The migration of VM {unique_name} is not completed with {wait_timeout} timed out"
                    +            f"Got error: {code}, {data}"
                    +        )
                    +
                    +    # teardown
                    +    vm_deleted, (code, data) = vm_checker.wait_deleted(unique_name)
                    +    assert vm_deleted, (code, data)
                    +
                    +    for vol in api_client.vms.Spec.from_dict(vm_data).volumes:
                    +        if vol['volume'].get('persistentVolumeClaim', {}).get('claimName', "") != "":
                    +            api_client.volumes.delete(vol['volume']['persistentVolumeClaim']['claimName'])
                    +
                    def test_migrate_vm_with_user_data(api_client, unique_name, ubuntu_image, wait_timeout, available_node_names, vm_checker)
                    +
                    + +Expand source code + +
                    @pytest.mark.p0
                    +@pytest.mark.virtualmachines
                    +def test_migrate_vm_with_user_data(
                    +    api_client, unique_name, ubuntu_image, wait_timeout, available_node_names, vm_checker
                    +):
                    +    vm_spec = api_client.vms.Spec(1, 1)
                    +    vm_spec.add_image('disk-0', ubuntu_image.id)
                    +    vm_spec.user_data += (
                    +        "password: test\n"
                    +        "chpasswd:\n"
                    +        "  expire: false\n"
                    +        "ssh_pwauth: true\n"
                    +    )
                    +    code, vm_data = api_client.vms.create(unique_name, vm_spec)
                    +    assert code == 201, (
                    +        f"Failed to create VM {unique_name} with error: {code}, {vm_data}"
                    +    )
                    +
                    +    endtime = datetime.now() + timedelta(seconds=wait_timeout)
                    +    data, vmi_data = None, None
                    +    while endtime > datetime.now():
                    +        code, data = api_client.vms.get(unique_name)
                    +        if data.get('status', {}).get('ready', False):
                    +            code, data = api_client.vms.get_status(unique_name)
                    +            if data['status']['conditions'][-1]['status'] == 'True':
                    +                vmi_data = data
                    +                break
                    +        sleep(5)
                    +    else:
                    +        vm_checker.wait_deleted(unique_name)
                    +        raise AssertionError(
                    +            f"Can't find VM {unique_name} with {wait_timeout} timed out\n"
                    +            f"Got error: {code}, {data}"
                    +        )
                    +
                    +    src_host = vmi_data['status']['nodeName']
                    +    dst_host = next(n for n in available_node_names if n != src_host)
                    +
                    +    code, data = api_client.vms.migrate(unique_name, dst_host)
                    +    assert code == 204, (
                    +        f"Can't migrate VM {unique_name} from host {src_host} to {dst_host}",
                    +        f"Got error: {code}, {data}"
                    +    )
                    +
                    +    endtime = datetime.now() + timedelta(seconds=wait_timeout)
                    +    while endtime > datetime.now():
                    +        code, data = api_client.vms.get_status(unique_name)
                    +        if data.get('status', {}).get('migrationState', {}).get('completed', False):
                    +            assert dst_host == data['status']['nodeName'], (
                    +                f"Failed to migrate VM {unique_name} from {src_host} to {dst_host}"
                    +            )
                    +            break
                    +        sleep(5)
                    +    else:
                    +        vm_checker.wait_deleted(unique_name)
                    +        raise AssertionError(
                    +            f"The migration of VM {unique_name} is not completed with {wait_timeout} timed out"
                    +            f"Got error: {code}, {data}"
                    +        )
                    +
                    +    # teardown
                    +    vm_deleted, (code, data) = vm_checker.wait_deleted(unique_name)
                    +    assert vm_deleted, (code, data)
                    +
                    +    for vol in api_client.vms.Spec.from_dict(vm_data).volumes:
                    +        if vol['volume'].get('persistentVolumeClaim', {}).get('claimName', "") != "":
                    +            api_client.volumes.delete(vol['volume']['persistentVolumeClaim']['claimName'])
                    +
                    def test_multiple_migrations(api_client, unique_name, ubuntu_image, wait_timeout, available_node_names)
                    +
                    + +Expand source code + +
                    @pytest.mark.p0
                    +@pytest.mark.virtualmachines
                    +def test_multiple_migrations(
                    +    api_client, unique_name, ubuntu_image, wait_timeout, available_node_names
                    +):
                    +    vm_spec = api_client.vms.Spec(1, 1)
                    +    vm_spec.add_image('disk-0', ubuntu_image.id)
                    +    vm_names = [f"migrate-1-{unique_name}", f"migrate-2-{unique_name}"]
                    +    volumes = []
                    +    for vm_name in vm_names:
                    +        code, data = api_client.vms.create(vm_name, vm_spec)
                    +        assert 201 == code, (
                    +            f"Failed to create VM {vm_name} with error: {code}, {data}"
                    +        )
                    +        volumes.extend(api_client.vms.Spec.from_dict(data).volumes)
                    +
                    +    vmi_data = []
                    +    vm_name = code = data = None
                    +    endtime = datetime.now() + timedelta(seconds=wait_timeout)
                    +    while endtime > datetime.now():
                    +        vmi_data.clear()
                    +        for vm_name in vm_names:
                    +            code, data = api_client.vms.get_status(vm_name)
                    +            if not (code == 200 and "Running" == data.get('status', {}).get('phase')):
                    +                break
                    +            vmi_data.append(data)
                    +        else:
                    +            break
                    +        sleep(5)
                    +    else:
                    +        for vm_name in vm_names:
                    +            api_client.vms.delete(vm_name)
                    +        raise AssertionError(
                    +            f"Can't find VM {vm_name} with {wait_timeout} timed out\n"
                    +            f"Got error: {code}, {data}"
                    +        )
                    +
                    +    vm_src_dst_hosts = {}  # {vm_name: [src_host, dst_host]}
                    +    for vm_name, vm_datum in zip(vm_names, vmi_data):
                    +        src_host = vm_datum['status']['nodeName']
                    +        dst_host = next(n for n in available_node_names if n != src_host)
                    +        vm_src_dst_hosts[vm_name] = [src_host, dst_host]
                    +
                    +        code, data = api_client.vms.migrate(vm_name, dst_host)
                    +        assert code == 204, (
                    +            f"Can't migrate VM {vm_name} from host {src_host} to {dst_host}",
                    +            f"Got error: {code}, {data}"
                    +        )
                    +
                    +    endtime = datetime.now() + timedelta(seconds=wait_timeout)
                    +    while endtime > datetime.now():
                    +        fails = []
                    +        for vm_name, (src, dst) in vm_src_dst_hosts.items():
                    +            code, data = api_client.vms.get_status(vm_name)
                    +            if not data.get('status', {}).get('migrationState', {}).get('completed'):
                    +                break
                    +            else:
                    +                if dst != data['status']['nodeName']:
                    +                    fails.append(
                    +                        f"Failed to migrate VM {vm_name} from {src} to {dst}\n"
                    +                        f"API Status({code}): {data}"
                    +                    )
                    +        else:
                    +            break
                    +        sleep(5)
                    +    else:
                    +        for vm_name in vm_names:
                    +            api_client.vms.delete(vm_name)
                    +        raise AssertionError("\n".join(fails))
                    +
                    +    # teardown
                    +    for vm_name in vm_names:
                    +        api_client.vms.delete(vm_name)
                    +
                    +    endtime = datetime.now() + timedelta(seconds=wait_timeout)
                    +    code, data = None, None
                    +    while endtime > datetime.now():
                    +        fails = []
                    +        for vm_name in vm_names:
                    +            code, data = api_client.vms.get_status(vm_name)
                    +            if code != 404:
                    +                fails.append(
                    +                    f"VM {vm_name} can't be deleted with {wait_timeout} timed out\n"
                    +                    f"API Status({code}): {data}"
                    +                )
                    +                break
                    +        else:
                    +            break
                    +        sleep(5)
                    +    else:
                    +        raise AssertionError("\n".join(fails))
                    +
                    +    for vol in volumes:
                    +        if vol['volume'].get('persistentVolumeClaim', {}).get('claimName'):
                    +            api_client.volumes.delete(vol['volume']['persistentVolumeClaim']['claimName'])
                    +
                    def ubuntu_image(api_client, unique_name, image_ubuntu, image_checker)
                    +
                    + +Expand source code + +
                    @pytest.fixture(scope="module")
                    +def ubuntu_image(api_client, unique_name, image_ubuntu, image_checker):
                    +    name = f"{image_ubuntu.name}-{unique_name}"
                    +    code, data = api_client.images.create_by_url(name, image_ubuntu.url)
                    +    assert 201 == code, (code, data)
                    +
                    +    image_downloaded, (code, data) = image_checker.wait_downloaded(name)
                    +    assert image_downloaded, (code, data)
                    +
                    +    namespace = data['metadata']['namespace']
                    +    assert name == data['metadata']['name'], data
                    +
                    +    yield SimpleNamespace(
                    +        name=name,
                    +        id=f"{namespace}/{name}",
                    +        ssh_user=image_ubuntu.ssh_user
                    +    )
                    +
                    +    # teardown
                    +    code, data = api_client.images.delete(name, namespace)
                    +    assert 200 == code, (code, data)
                    +    image_deleted, (code, data) = image_checker.wait_deleted(name)
                    +    assert image_deleted, (code, data)
                    +
                    def vm_network(api_client, unique_name, cluster_network, vlan_id, network_checker)
                    +
                    + +Expand source code + +
                    @pytest.fixture(scope="class")
                    +def vm_network(api_client, unique_name, cluster_network, vlan_id, network_checker):
                    +    name = f"vnet-{unique_name}"
                    +    code, data = api_client.networks.create(name, vlan_id, cluster_network=cluster_network)
                    +    assert 201 == code, (code, data)
                    +
                    +    vnet_routed, (code, data) = network_checker.wait_routed(name)
                    +    assert vnet_routed, (code, data)
                    +    route = json.loads(data['metadata'].get('annotations').get('network.harvesterhci.io/route'))
                    +
                    +    yield SimpleNamespace(
                    +        name=name,
                    +        vlan_id=vlan_id,
                    +        cidr=route['cidr']
                    +    )
                    +
                    +    # teardown
                    +    code, data = api_client.networks.delete(name)
                    +    assert 200 == code, (code, data)
                    +

                @@ -96,7 +516,6 @@

                Classes

                class TestVMWithStorageNetwork
                -
                Expand source code @@ -191,6 +610,7 @@

                Classes

                ) assert snet_enabled, (code, data)
                +

                Class variables

                var pytestmark
                @@ -201,9 +621,48 @@

                Class variables

                Methods

                -def test_enable_storage_network_with_api_stopped_vm(self, api_client, minimal_vm, storage_network, setting_checker, vm_checker, volume_checker) +def test_enable_storage_network_with_api_stopped_vm(self,
                api_client,
                minimal_vm,
                storage_network,
                setting_checker,
                vm_checker,
                volume_checker)
                +
                + +Expand source code + +
                def test_enable_storage_network_with_api_stopped_vm(
                +    self, api_client, minimal_vm, storage_network, setting_checker, vm_checker, volume_checker
                +):
                +    '''
                +    Steps:
                +      1. Have at least one Running VM
                +      2. Enable storage-network (should fail)
                +      3. Stop all VMs via API
                +      4. Enable storage-network
                +    '''
                +    code, data = api_client.settings.update('storage-network', storage_network.enable_spec)
                +    assert 422 == code, (
                +        f"storage-network should NOT be enabled with running VM: {code}, {data}"
                +    )
                +
                +    # stop VM by API
                +    vm_stopped, (code, data) = vm_checker.wait_status_stopped(minimal_vm.name)
                +    assert vm_stopped, (code, data)
                +
                +    code, data = api_client.vms.get(minimal_vm.name)
                +    spec = api_client.vms.Spec.from_dict(data)
                +    vol_names = [vol['volume']['persistentVolumeClaim']['claimName'] for vol in spec.volumes]
                +    vm_volumes_detached, (code, data) = volume_checker.wait_volumes_detached(vol_names)
                +    assert vm_volumes_detached, (code, data)
                +
                +    # enable storage-network
                +    code, data = api_client.settings.update('storage-network', storage_network.enable_spec)
                +    assert 200 == code, (code, data)
                +    snet_enabled, (code, data) = setting_checker.wait_storage_net_enabled_on_harvester()
                +    assert snet_enabled, (code, data)
                +    snet_enabled, (code, data) = setting_checker.wait_storage_net_enabled_on_longhorn(
                +        storage_network.cidr
                +    )
                +    assert snet_enabled, (code, data)
                +

                Steps

                1. Have at least one Running VM
                2. @@ -213,9 +672,62 @@

                  Methods

                -def test_enable_storage_network_with_cli_stopped_vm(self, api_client, ssh_keypair, minimal_vm, storage_network, setting_checker, vm_shell_from_host, wait_timeout, volume_checker) +def test_enable_storage_network_with_cli_stopped_vm(self,
                api_client,
                ssh_keypair,
                minimal_vm,
                storage_network,
                setting_checker,
                vm_shell_from_host,
                wait_timeout,
                volume_checker)
                +
                + +Expand source code + +
                def test_enable_storage_network_with_cli_stopped_vm(
                +    self, api_client, ssh_keypair, minimal_vm, storage_network, setting_checker,
                +    vm_shell_from_host, wait_timeout, volume_checker
                +):
                +    ''' Refer to https://github.com/harvester/tests/issues/1022
                +    Steps:
                +      1. Have at least one Running VM
                +      2. Enable storage-network (should fail)
                +      3. Stop all VMs via VM CLI
                +      4. Enable storage-network
                +    '''
                +    code, data = api_client.settings.update('storage-network', storage_network.enable_spec)
                +    assert 422 == code, (
                +        f"storage-network should NOT be enabled with running VM: {code}, {data}"
                +    )
                +
                +    # stop VM by CLI
                +    with vm_shell_from_host(
                +        minimal_vm.host_ip, minimal_vm.vm_ip, minimal_vm.ssh_user, pkey=ssh_keypair[1]
                +    ) as sh:
                +        sh.exec_command('sudo shutdown now')
                +
                +    endtime = datetime.now() + timedelta(seconds=wait_timeout)
                +    while endtime > datetime.now():
                +        code, data = api_client.vms.get(minimal_vm.name)
                +        if 200 == code and "Stopped" == data.get('status', {}).get('printableStatus'):
                +            break
                +        sleep(3)
                +    else:
                +        raise AssertionError(
                +            f"Fail to shutdown VM {minimal_vm.name} with error: {code}, {data}"
                +        )
                +
                +    code, data = api_client.vms.get(minimal_vm.name)
                +    spec = api_client.vms.Spec.from_dict(data)
                +    vol_names = [vol['volume']['persistentVolumeClaim']['claimName'] for vol in spec.volumes]
                +    vm_volumes_detached, (code, data) = volume_checker.wait_volumes_detached(vol_names)
                +    assert vm_volumes_detached, (code, data)
                +
                +    # enable storage-network
                +    code, data = api_client.settings.update('storage-network', storage_network.enable_spec)
                +    assert 200 == code, (code, data)
                +    snet_enabled, (code, data) = setting_checker.wait_storage_net_enabled_on_harvester()
                +    assert snet_enabled, (code, data)
                +    snet_enabled, (code, data) = setting_checker.wait_storage_net_enabled_on_longhorn(
                +        storage_network.cidr
                +    )
                +    assert snet_enabled, (code, data)
                +

                Refer to https://github.com/harvester/tests/issues/1022

                Steps

                  @@ -269,7 +781,7 @@

                  -

                  Generated by pdoc 0.11.1.

                  +

                  Generated by pdoc 0.11.5.

                  diff --git a/backend/integrations/test_3_vm_functions.html b/backend/integrations/test_3_vm_functions.html index eb4fc8994..14ed6f113 100644 --- a/backend/integrations/test_3_vm_functions.html +++ b/backend/integrations/test_3_vm_functions.html @@ -3,19 +3,30 @@ - + harvester_e2e_tests.integrations.test_3_vm_functions API documentation - + @@ -37,6 +48,63 @@

                  Functions

                  def bogus_vlan_net(request, api_client)
                  +
                  + +Expand source code + +
                  @pytest.fixture(scope='module')
                  +def bogus_vlan_net(request, api_client):
                  +    """bogus vlan network fixture (no dhcp) on mgmt network
                  +
                  +    Args:
                  +        request (FixtureRequest): https://docs.pytest.org/en/7.1.x/_modules/_pytest/fixtures.html#FixtureRequest # noqa
                  +        api_client (HarvesterAPI): HarvesterAPI client
                  +
                  +    Yields:
                  +        dict: created bogus network attachment definition dictionary
                  +    """
                  +    original_vlan_id = request.config.getoption('--vlan-id')
                  +
                  +    existing_vm_net_code, existing_vm_net_data = api_client.networks.get()
                  +    assert existing_vm_net_code == 200, 'we should be able to fetch vm networks from harvester'
                  +    existing_vm_net_list = existing_vm_net_data.get('items', [])
                  +    vlans_to_exclude = set()
                  +    vlans_to_exclude.add(1)
                  +    for existing_vm_net in existing_vm_net_list:
                  +        existing_vm_net_config = existing_vm_net.get('spec', {}).get('config', '{}')
                  +        assert existing_vm_net_config != '{}', 'existing vm net should exist'
                  +        existing_vm_net_config_dict = json.loads(existing_vm_net_config)
                  +        assert existing_vm_net_config_dict.get(
                  +            'vlan', 0) != 0, 'we should be able to get the vlan off the config'
                  +        existing_vm_net_vlan = existing_vm_net_config_dict.get('vlan')
                  +        vlans_to_exclude.add(existing_vm_net_vlan)
                  +
                  +    if original_vlan_id != -1:
                  +        vlans_to_exclude.add(original_vlan_id)
                  +
                  +    vlan_ids = set(range(2, 4095))  # 4094 is the last, 1 should always be excluded.
                  +    code, data = api_client.networks.get()
                  +    for net in data['items']:
                  +        config = json.loads(net['spec'].get('config', '{}'))
                  +        if config.get('vlan'):
                  +            try:
                  +                # try to remove the key, but VLAN may be used in both 'mgmt'
                  +                # and other cluster network(s) so it might have already been removed
                  +                vlan_ids.remove(config['vlan'])
                  +            except KeyError:
                  +                print(f"key, {config['vlan']} was already removed by another cluster network")
                  +
                  +    vlan_id = vlan_ids.pop()  # Remove and return an arbitrary set element.
                  +    vm_network_name = f'bogus-net-{vlan_id}'
                  +    code, data = api_client.networks.create(vm_network_name, vlan_id)
                  +    assert 201 == code, (
                  +        f"Failed to create N.A.D. {vm_network_name} with error {code}, {data}"
                  +    )
                  +
                  +    yield data
                  +
                  +    api_client.networks.delete(vm_network_name)
                  +

                  bogus vlan network fixture (no dhcp) on mgmt network

                  Args

                  @@ -55,24 +123,149 @@

                  Yields

                  def image(api_client, image_opensuse, unique_name, wait_timeout)
                  +
                  + +Expand source code + +
                  @pytest.fixture(scope="module")
                  +def image(api_client, image_opensuse, unique_name, wait_timeout):
                  +    unique_image_id = f'image-{unique_name}'
                  +    code, data = api_client.images.create_by_url(
                  +        unique_image_id, image_opensuse.url, display_name=f"{unique_name}-{image_opensuse.name}"
                  +    )
                  +
                  +    assert 201 == code, (code, data)
                  +
                  +    endtime = datetime.now() + timedelta(seconds=wait_timeout)
                  +    while endtime > datetime.now():
                  +        code, data = api_client.images.get(unique_image_id)
                  +        if 100 == data.get('status', {}).get('progress', 0):
                  +            break
                  +        sleep(3)
                  +    else:
                  +        raise AssertionError(
                  +            "Failed to create Image with error:\n"
                  +            f"Status({code}): {data}"
                  +        )
                  +
                  +    yield dict(id=f"{data['metadata']['namespace']}/{unique_image_id}",
                  +               user=image_opensuse.ssh_user)
                  +
                  +    code, data = api_client.images.delete(unique_image_id)
                  +
                  def small_volume(api_client, unique_name)
                  +
                  + +Expand source code + +
                  @pytest.fixture(scope="class")
                  +def small_volume(api_client, unique_name):
                  +    vol_name, size = f"sv-{unique_name}", 3
                  +    vol_spec = api_client.volumes.Spec(size)
                  +    code, data = api_client.volumes.create(vol_name, vol_spec)
                  +
                  +    assert 201 == code, (code, data)
                  +
                  +    yield vol_name, size
                  +
                  +    code, data = api_client.volumes.delete(vol_name)
                  +
                  def stopped_vm(api_client, ssh_keypair, wait_timeout, image, unique_vm_name)
                  +
                  + +Expand source code + +
                  @pytest.fixture(scope="class")
                  +def stopped_vm(api_client, ssh_keypair, wait_timeout, image, unique_vm_name):
                  +    unique_vm_name = f"stopped-{datetime.now().strftime('%m%S%f')}-{unique_vm_name}"
                  +    cpu, mem = 1, 2
                  +    pub_key, pri_key = ssh_keypair
                  +    vm_spec = api_client.vms.Spec(cpu, mem)
                  +    vm_spec.add_image("disk-0", image['id'])
                  +    vm_spec.run_strategy = "Halted"
                  +
                  +    userdata = yaml.safe_load(vm_spec.user_data)
                  +    userdata['ssh_authorized_keys'] = [pub_key]
                  +    vm_spec.user_data = yaml.dump(userdata)
                  +
                  +    code, data = api_client.vms.create(unique_vm_name, vm_spec)
                  +    endtime = datetime.now() + timedelta(seconds=wait_timeout)
                  +    while endtime > datetime.now():
                  +        code, data = api_client.vms.get(unique_vm_name)
                  +        if "Stopped" == data.get('status', {}).get('printableStatus'):
                  +            break
                  +        sleep(1)
                  +
                  +    yield unique_vm_name, image['user']
                  +
                  +    code, data = api_client.vms.get(unique_vm_name)
                  +    vm_spec = api_client.vms.Spec.from_dict(data)
                  +
                  +    api_client.vms.delete(unique_vm_name)
                  +    endtime = datetime.now() + timedelta(seconds=wait_timeout)
                  +    while endtime > datetime.now():
                  +        code, data = api_client.vms.get_status(unique_vm_name)
                  +        if 404 == code:
                  +            break
                  +        sleep(3)
                  +
                  +    for vol in vm_spec.volumes:
                  +        vol_name = vol['volume']['persistentVolumeClaim']['claimName']
                  +        api_client.volumes.delete(vol_name)
                  +
                  def test_create_stopped_vm(api_client, stopped_vm, wait_timeout)
                  +
                  + +Expand source code + +
                  @pytest.mark.p0
                  +@pytest.mark.virtualmachines
                  +def test_create_stopped_vm(api_client, stopped_vm, wait_timeout):
                  +    """
                  +    To cover test:
                  +    - https://harvester.github.io/tests/manual/virtual-machines/create-a-vm-with-start-vm-on-creation-unchecked/ # noqa
                  +
                  +    Steps:
                  +        1. Create a VM with 1 CPU 2 Memory and runStrategy is `Halted`
                  +        2. Save
                  +    Exepected Result:
                  +        - VM should created
                  +        - VM should Stooped
                  +        - VMI should not exist
                  +    """
                  +    unique_vm_name, _ = stopped_vm
                  +    endtime = datetime.now() + timedelta(seconds=wait_timeout)
                  +    while endtime > datetime.now():
                  +        code, data = api_client.vms.get(unique_vm_name)
                  +        if (code == 200
                  +                and 'Halted' == data['spec']['runStrategy']
                  +                and 'Stopped' == data.get('status', {}).get('printableStatus')):
                  +            break
                  +        sleep(3)
                  +    else:
                  +        raise AssertionError(
                  +            f"Create a Stopped VM({unique_vm_name}) with errors:\n"
                  +            f"Status({code}): {data}"
                  +        )
                  +
                  +    code, data = api_client.vms.get_status(unique_vm_name)
                  +    assert 404 == code, (code, data)
                  +

                  To cover test: - https://harvester.github.io/tests/manual/virtual-machines/create-a-vm-with-start-vm-on-creation-unchecked/ # noqa

                  Steps

                  @@ -91,6 +284,129 @@

                  Steps

                  def test_create_vm_no_available_resources(resource, api_client, image, wait_timeout, unique_vm_name, sleep_timeout)
                  +
                  + +Expand source code + +
                  @pytest.mark.p0
                  +@pytest.mark.virtualmachines
                  +@pytest.mark.negative
                  +@pytest.mark.parametrize("resource", [dict(cpu=MAX), dict(mem=MAX), dict(disk=MAX),
                  +                                      dict(mem=MAX, cpu=MAX), dict(mem=MAX, cpu=MAX, disk=MAX)],
                  +                         ids=['cpu', 'mem', 'disk', 'mem-and-cpu', 'mem-cpu-and-disk'])
                  +def test_create_vm_no_available_resources(resource, api_client, image,
                  +                                          wait_timeout, unique_vm_name, sleep_timeout):
                  +    """Creates a VM with outlandish resources for varying elements (purposefully negative test)
                  +
                  +    Prerequisite:
                  +        Setting opensuse-image-url set to a valid URL for
                  +        an opensuse image.
                  +
                  +    Manual Test Doc(s):
                  +        - https://harvester.github.io/tests/manual/virtual-machines/create-vm-with-cpu-not-in-cluster/ # noqa
                  +        - https://harvester.github.io/tests/manual/virtual-machines/create-vm-with-memory-not-in-cluster/ # noqa
                  +        - https://harvester.github.io/tests/manual/virtual-machines/create-vm-with-both-cpu-and-memory-not-in-cluster/ # noqa
                  +
                  +
                  +    Args:
                  +        request (FixtureRequest): https://docs.pytest.org/en/7.1.x/_modules/_pytest/fixtures.html#FixtureRequest # noqa
                  +        resource (dict): dict of name(s) & value that can be deconstructed
                  +        api_client (HarvesterAPI): HarvesterAPI client
                  +        image (str): corresponding image from fixture
                  +        wait_timeout (int): seconds for wait timeout from fixture
                  +        unique_vm_name (str): string of unique vm name
                  +
                  +    Raises:
                  +        AssertionError: when vm can not be created, all vms should be allowed to be created
                  +
                  +    Steps:
                  +    1. build vm object specs for outlandish resource(s) under test
                  +    2. request to build the vm, assert that succeeds
                  +    3. check for conditions of guest not running and vm being unschedulable
                  +    4. delete vm and volumes
                  +
                  +    Expected Result:
                  +    - building vm with outlandish resource requests to be successful
                  +    - asserting that the status condition of the vm that is built to not be running
                  +    - asserting that the status condition of the vm that is built to be unschedulable
                  +    - assert deleting vm and volumes to be successful
                  +    """
                  +    unique_name_for_vm = f"{''.join(resource.keys())}-{unique_vm_name}"
                  +    overall_vm_obj = dict(cpu=1, mem=2, disk=10)
                  +    overall_vm_obj.update(resource)
                  +
                  +    vm = api_client.vms.Spec(overall_vm_obj['cpu'], overall_vm_obj['mem'])
                  +    vm.add_image("disk-0", image['id'], size=overall_vm_obj.get('disk'))
                  +    code, data = api_client.vms.create(unique_name_for_vm, vm)
                  +    assert 201 == code, (code, data)
                  +
                  +    endtime = datetime.now() + timedelta(seconds=wait_timeout)
                  +    while endtime > datetime.now():
                  +        code, data = api_client.vms.get_status(unique_name_for_vm)
                  +        if 200 == code and len(data.get('status', {}).get('conditions', [])) > 1:
                  +            checks = dict(GuestNotRunning=False, Unschedulable=False)
                  +            for condition in data['status']['conditions']:
                  +                if condition.get('reason') in checks:
                  +                    checks[condition['reason']] = True
                  +
                  +            assert all(checks.values()), (
                  +                "The VM miss condition:\n"
                  +                " and ".join(k for k, v in checks.items() if not v)
                  +            )
                  +            code, data = api_client.vms.delete(unique_name_for_vm)
                  +            assert 200 == code, (code, data)
                  +
                  +            spec = api_client.vms.Spec.from_dict(data)
                  +            break
                  +        sleep(sleep_timeout)
                  +    else:
                  +        raise AssertionError(
                  +            f"Failed to create VM({overall_vm_obj.get('cpu')} core, \n"
                  +            f"{overall_vm_obj.get('mem')} RAM, \n"
                  +            f"{overall_vm_obj.get('disk')} DISK) with errors:\n"
                  +            f"Phase: {data.get('status', {}).get('phase')}\t"
                  +            f"Status: {data.get('status')}\n"
                  +            f"API Status({code}): {data}"
                  +        )
                  +    endtime = datetime.now() + timedelta(seconds=wait_timeout)
                  +    while endtime > datetime.now():
                  +        code, data = api_client.vms.get(unique_name_for_vm)
                  +        if 404 == code:
                  +            break
                  +        sleep(sleep_timeout)
                  +    else:
                  +        raise AssertionError(
                  +            f"Failed to Delete VM({unique_name_for_vm}) with errors:\n"
                  +            f"Status({code}): {data}"
                  +        )
                  +    fails, check = [], dict()
                  +    for vol in spec.volumes:
                  +        vol_name = vol['volume']['persistentVolumeClaim']['claimName']
                  +        check[vol_name] = api_client.volumes.delete(vol_name)
                  +    endtime = datetime.now() + timedelta(seconds=wait_timeout)
                  +    while endtime > datetime.now():
                  +        l_check = dict()
                  +        for vol_name, (code, data) in check.items():
                  +            if code not in (200, 204):
                  +                fails.append(
                  +                    (vol_name, f"Failed to delete\nStatus({code}): {data}"))
                  +            else:
                  +                code, data = api_client.volumes.get(vol_name)
                  +                if 404 != code:
                  +                    l_check[vol_name] = (code, data)
                  +        check = l_check
                  +        if not check:
                  +            break
                  +        sleep(sleep_timeout)
                  +    else:
                  +        for vol_name, (code, data) in check.items():
                  +            fails.append(
                  +                (vol_name, f"Failed to delete\nStatus({code}): {data}"))
                  +    assert not fails, (
                  +        f"Failed to delete VM({unique_vm_name})'s volumes with errors:\n"
                  +        "\n".join(f"Volume({n}): {r}" for n, r in fails)
                  +    )
                  +

                  Creates a VM with outlandish resources for varying elements (purposefully negative test)

                  Prerequisite

                  Setting opensuse-image-url set to a valid URL for @@ -134,6 +450,46 @@

                  Raises

                  def test_minimal_vm(api_client, image, unique_vm_name, wait_timeout)
                  +
                  + +Expand source code + +
                  @pytest.mark.p0
                  +@pytest.mark.virtualmachines
                  +@pytest.mark.dependency(name="minimal_vm")
                  +def test_minimal_vm(api_client, image, unique_vm_name, wait_timeout):
                  +    """
                  +    To cover test:
                  +    - https://harvester.github.io/tests/manual/virtual-machines/create-a-vm-with-all-the-default-values/ # noqa
                  +
                  +    Steps:
                  +        1. Create a VM with 1 CPU 2 Memory and other default values
                  +        2. Save
                  +    Exepected Result:
                  +        - VM should created
                  +        - VM should Started
                  +    """
                  +    cpu, mem = 1, 2
                  +    vm = api_client.vms.Spec(cpu, mem)
                  +    vm.add_image("disk-0", image['id'])
                  +
                  +    code, data = api_client.vms.create(unique_vm_name, vm)
                  +
                  +    assert 201 == code, (code, data)
                  +
                  +    endtime = datetime.now() + timedelta(seconds=wait_timeout)
                  +    while endtime > datetime.now():
                  +        code, data = api_client.vms.get_status(unique_vm_name)
                  +        if 200 == code and "Running" == data.get('status', {}).get('phase'):
                  +            break
                  +        sleep(3)
                  +    else:
                  +        raise AssertionError(
                  +            f"Failed to create Minimal VM({cpu} core, {mem} RAM) with errors:\n"
                  +            f"Status: {data.get('status')}\n"
                  +            f"API Status({code}): {data}"
                  +        )
                  +

                  To cover test: - https://harvester.github.io/tests/manual/virtual-machines/create-a-vm-with-all-the-default-values/ # noqa

                  Steps

                  @@ -151,6 +507,168 @@

                  Steps

                  def test_update_vm_machine_type(api_client, image, unique_vm_name, wait_timeout, machine_types, sleep_timeout)
                  +
                  + +Expand source code + +
                  @pytest.mark.p0
                  +@pytest.mark.virtualmachines
                  +@pytest.mark.skip_version_if(
                  +    "> v1.2.1", "> v1.3.0",
                  +    reason="`pc type removed, ref: https://github.com/harvester/harvester/issues/5437"
                  +)
                  +@pytest.mark.parametrize(
                  +    "machine_types", [("q35", "pc"), ("pc", "q35")], ids=['q35_to_pc', 'pc_to_q35']
                  +)
                  +def test_update_vm_machine_type(
                  +    api_client, image, unique_vm_name, wait_timeout, machine_types, sleep_timeout
                  +):
                  +    """Create a VM with machine type then update to another
                  +
                  +    Prerequisite:
                  +        Setting opensuse-image-url set to a valid URL for
                  +        an opensuse image.
                  +
                  +    Manual Test Doc(s):
                  +        - https://harvester.github.io/tests/manual/virtual-machines/create-new-vm-with-a-machine-type-pc/ # noqa
                  +        - https://harvester.github.io/tests/manual/virtual-machines/create-new-vm-with-a-machine-type-q35/ # noqa
                  +
                  +    Args:
                  +        api_client (HarvesterAPI): HarvesterAPI client
                  +        image (str): corresponding image from fixture
                  +        wait_timeout (int): seconds for wait timeout from fixture
                  +        unique_vm_name (str): fixture at module level based unique vm name
                  +        machine_types (tuple)(str): deconstructed to provide starting type and desired end type
                  +
                  +    Raises:
                  +        AssertionError: failure to create, stop, update, or start
                  +
                  +    Steps:
                  +    1. build vm with starting machine type
                  +    2. power down vm with starting machine type
                  +    3. update vm from machine type starting to machine type ending
                  +    4. power up vm
                  +    5. delete vm and volumes
                  +
                  +    Expected Result:
                  +    - building a vm with machine type starting to be successful
                  +    - powering down the vm with machine type starting to be successful
                  +    - modifying the existing machine type starting and updating to ending to be successful
                  +    - powering up the modified vm to be successful and that now has the machine type ending
                  +    - deleting the vm to be successful
                  +    """
                  +    cpu, mem = 1, 2
                  +    starting_machine_type, ending_machine_type = machine_types
                  +    vm = api_client.vms.Spec(cpu, mem)
                  +    vm.machine_type = starting_machine_type
                  +
                  +    vm.add_image("disk-0", image['id'])
                  +    unique_name_for_vm = f"{''.join(starting_machine_type)}-{unique_vm_name}"
                  +
                  +    code, vm_create_data = api_client.vms.create(unique_name_for_vm, vm)
                  +
                  +    assert 201 == code, (code, vm_create_data)
                  +
                  +    endtime = datetime.now() + timedelta(seconds=wait_timeout)
                  +    while endtime > datetime.now():
                  +        code, data = api_client.vms.get_status(unique_name_for_vm)
                  +        if 200 == code and "Running" == data.get('status', {}).get('phase'):
                  +            code, data = api_client.vms.stop(unique_name_for_vm)
                  +            assert 204 == code, "`Stop` return unexpected status code"
                  +            break
                  +        sleep(sleep_timeout)
                  +    else:
                  +        raise AssertionError(
                  +            f"Failed to create VM({cpu} core, {mem} RAM) with errors:\n"
                  +            f"Phase: {data.get('status', {}).get('phase', '')}\t"
                  +            f"Status: {data.get('status', {})}\n"
                  +            f"API Status({code}): {data}"
                  +        )
                  +    endtime = datetime.now() + timedelta(seconds=wait_timeout)
                  +    while endtime > datetime.now():
                  +        code, data = api_client.vms.get_status(unique_name_for_vm)
                  +        if 404 == code:
                  +            break
                  +        sleep(sleep_timeout)
                  +    else:
                  +        raise AssertionError(
                  +            f"Failed to Stop VM({unique_name_for_vm}) with errors:\n"
                  +            f"Status({code}): {data}"
                  +        )
                  +    code, data = api_client.vms.get(unique_name_for_vm)
                  +    assert "Halted" == data['spec']['runStrategy']
                  +    assert "Stopped" == data['status']['printableStatus']
                  +    code, vm_to_modify = api_client.vms.get(unique_name_for_vm)
                  +    assert code == 200
                  +    spec = api_client.vms.Spec.from_dict(vm_to_modify)
                  +    spec.machine_type = ending_machine_type
                  +    code, data = api_client.vms.update(unique_name_for_vm, spec)
                  +    result = api_client.vms.Spec.from_dict(data)
                  +    if 200 == code and result.machine_type == ending_machine_type:
                  +        code, data = api_client.vms.start(unique_name_for_vm)
                  +        assert 204 == code, "`Start return unexpected status code"
                  +    else:
                  +        raise AssertionError(
                  +            f"Failed to Update VM({unique_name_for_vm}) with errors:\n"
                  +            f"Phase: {data.get('status', {}).get('phase')}\t"
                  +            f"Status: {data.get('status')}\n"
                  +            f"API Status({code}): {data}"
                  +        )
                  +    endtime = datetime.now() + timedelta(seconds=wait_timeout)
                  +    while endtime > datetime.now():
                  +        code, data = api_client.vms.get(unique_name_for_vm)
                  +        strategy = data['spec']['runStrategy']
                  +        pstats = data['status']['printableStatus']
                  +        if "Halted" != strategy and "Running" == pstats:
                  +            code, data = api_client.vms.delete(unique_name_for_vm)
                  +            assert 200 == code, (code, data)
                  +
                  +            spec = api_client.vms.Spec.from_dict(data)
                  +            break
                  +        sleep(sleep_timeout)
                  +    else:
                  +        raise AssertionError(
                  +            f"Failed to Start VM({unique_name_for_vm}) with errors:\n"
                  +            f"Status({code}): {data}"
                  +        )
                  +    endtime = datetime.now() + timedelta(seconds=wait_timeout)
                  +    while endtime > datetime.now():
                  +        code, data = api_client.vms.get(unique_name_for_vm)
                  +        if 404 == code:
                  +            break
                  +        sleep(sleep_timeout)
                  +    else:
                  +        raise AssertionError(
                  +            f"Failed to Delete VM({unique_name_for_vm}) with errors:\n"
                  +            f"Status({code}): {data}"
                  +        )
                  +    fails, check = [], dict()
                  +    for vol in spec.volumes:
                  +        vol_name = vol['volume']['persistentVolumeClaim']['claimName']
                  +        check[vol_name] = api_client.volumes.delete(vol_name)
                  +    endtime = datetime.now() + timedelta(seconds=wait_timeout)
                  +    while endtime > datetime.now():
                  +        l_check = dict()
                  +        for vol_name, (code, data) in check.items():
                  +            if 200 != code:
                  +                fails.append(
                  +                    (vol_name, f"Failed to delete\nStatus({code}): {data}"))
                  +            else:
                  +                code, data = api_client.volumes.get(vol_name)
                  +                if 404 != code:
                  +                    l_check[vol_name] = (code, data)
                  +        check = l_check
                  +        if not check:
                  +            break
                  +    else:
                  +        for vol_name, (code, data) in check.items():
                  +            fails.append(
                  +                (vol_name, f"Failed to delete\nStatus({code}): {data}"))
                  +    assert not fails, (
                  +        f"Failed to delete VM({unique_name_for_vm})'s volumes with errors:\n"
                  +        "\n".join(f"Volume({n}): {r}" for n, r in fails)
                  +    )
                  +

                  Create a VM with machine type then update to another

                  Prerequisite

                  Setting opensuse-image-url set to a valid URL for @@ -192,6 +710,125 @@

                  Raises

                  def test_vm_with_bogus_vlan(api_client, image, unique_vm_name, wait_timeout, bogus_vlan_net, sleep_timeout)
                  +
                  + +Expand source code + +
                  @pytest.mark.p0
                  +@pytest.mark.negative
                  +@pytest.mark.virtualmachines
                  +def test_vm_with_bogus_vlan(api_client, image, unique_vm_name,
                  +                            wait_timeout, bogus_vlan_net, sleep_timeout):
                  +    """test building a VM with a VM (VLAN) Network has a bogus VLAN ID (no DHCP)
                  +
                  +    Prerequisite:
                  +        Setting opensuse-image-url set to a valid URL for
                  +        an opensuse image.
                  +
                  +    Manual Test Doc(s):
                  +        - N/A
                  +
                  +    Args:
                  +        api_client (HarvesterAPI): HarvesterAPI client_
                  +        image (str): corresponding image from fixture_
                  +        unique_vm_name (str): fixture at module level based unique vm name
                  +        wait_timeout (int): seconds for wait timeout from fixture
                  +        bogus_vlan_net (dict): the data dict that contains info surrounding vm net
                  +
                  +    Raises:
                  +        AssertionError: fails to create, delete, or delete volumes
                  +
                  +    Steps:
                  +    1. build vm with a single virtio network interface
                  +    that has a bogus vlan vm network (no dhcp)
                  +    2. delete vm and volumes
                  +
                  +    Expected Result:
                  +    - assert vlan vm network can be created successfully (fixture level)
                  +    - assert vm can be created successfully
                  +    - assert 'ipAddresses' not in the status of running vm's interfaces
                  +    - assert can delete vm and volumes
                  +    """
                  +    cpu, mem = 1, 2
                  +    bvn = bogus_vlan_net
                  +    vm = api_client.vms.Spec(cpu, mem)
                  +    net_uid = f"{bvn['metadata']['namespace']}/{bvn['metadata']['name']}"
                  +    vm = api_client.vms.Spec(cpu, mem)
                  +    vm.add_network('no-dhcp', net_uid)
                  +    vm.add_image("disk-0", image['id'])
                  +    code, data = api_client.vms.create(unique_vm_name, vm)
                  +
                  +    assert 201 == code, (code, data)
                  +
                  +    endtime = datetime.now() + timedelta(seconds=wait_timeout)
                  +    while endtime > datetime.now():
                  +        code, data = api_client.vms.get_status(unique_vm_name)
                  +        if 200 == code and "Running" == data.get('status', {}).get('phase'):
                  +            code, data = api_client.vms.get_status(unique_vm_name)
                  +            assert 200 == code, (code, data)
                  +            assert data['status']['interfaces'][1] is not None
                  +            assert 'infoSource' in data['status']['interfaces'][1]
                  +            assert 'mac' in data['status']['interfaces'][1]
                  +            assert data['status']['interfaces'][1]['mac'] is not None
                  +            assert 'name' in data['status']['interfaces'][1]
                  +            # checking that ipAddress/es are not present due to
                  +            # vlan that was used not having dhcp so no assignment
                  +            # kubevirt v1 virtualmachineinstancenetworkinterface
                  +            assert 'ipAddresses' not in data['status']['interfaces'][1]
                  +            assert 'ipAddress' not in data['status']['interfaces'][1]
                  +            code, data = api_client.vms.delete(unique_vm_name)
                  +            assert 200 == code, (code, data)
                  +            break
                  +        sleep(sleep_timeout)
                  +    else:
                  +        raise AssertionError(
                  +            f"Failed to create VM({cpu} core, {mem} RAM) with errors:\n"
                  +            f"Phase: {data.get('status', {}).get('phase')}\t"
                  +            f"Status: {data.get('status')}\n"
                  +            f"API Status({code}): {data}"
                  +        )
                  +    spec = api_client.vms.Spec.from_dict(data)
                  +
                  +    while endtime > datetime.now():
                  +        code, data = api_client.vms.get(unique_vm_name)
                  +        if 404 == code:
                  +            break
                  +        sleep(sleep_timeout)
                  +    else:
                  +        raise AssertionError(
                  +            f"Failed to Delete VM({unique_vm_name}) with errors:\n"
                  +            f"Status({code}): {data}"
                  +        )
                  +
                  +    fails, check = [], dict()
                  +    for vol in spec.volumes:
                  +        vol_name = vol['volume']['persistentVolumeClaim']['claimName']
                  +        check[vol_name] = api_client.volumes.delete(vol_name)
                  +
                  +    while endtime > datetime.now():
                  +        l_check = dict()
                  +        for vol_name, (code, data) in check.items():
                  +            if 200 != code:
                  +                fails.append(
                  +                    (vol_name, f"Failed to delete\nStatus({code}): {data}"))
                  +            else:
                  +                code, data = api_client.volumes.get(vol_name)
                  +                if 404 != code:
                  +                    l_check[vol_name] = (code, data)
                  +        check = l_check
                  +        if not check:
                  +            break
                  +        sleep(sleep_timeout)
                  +    else:
                  +        for vol_name, (code, data) in check.items():
                  +            fails.append(
                  +                (vol_name, f"Failed to delete\nStatus({code}): {data}"))
                  +
                  +    assert not fails, (
                  +        f"Failed to delete VM({unique_vm_name})'s volumes with errors:\n"
                  +        "\n".join(f"Volume({n}): {r}" for n, r in fails)
                  +    )
                  +

                  test building a VM with a VM (VLAN) Network has a bogus VLAN ID (no DHCP)

                  Prerequisite

                  Setting opensuse-image-url set to a valid URL for @@ -230,12 +867,41 @@

                  Raises

                  def unique_vm_name(unique_name)
                  +
                  + +Expand source code + +
                  @pytest.fixture(scope="module")
                  +def unique_vm_name(unique_name):
                  +    return f"vm-{unique_name}"
                  +
                  def unset_cpu_memory_overcommit(api_client)
                  +
                  + +Expand source code + +
                  @pytest.fixture
                  +def unset_cpu_memory_overcommit(api_client):
                  +    code, data = api_client.settings.get('overcommit-config')
                  +    assert 200 == code, (code, data)
                  +
                  +    origin_val = json.loads(data.get('value', data['default']))
                  +    spec = api_client.settings.Spec.from_dict(data)
                  +    spec.cpu = spec.memory = 100
                  +    spec.storage = origin_val['storage']
                  +    code, data = api_client.settings.update('overcommit-config', spec)
                  +    assert 200 == code, (code, data)
                  +
                  +    yield json.loads(data['value']), origin_val
                  +
                  +    spec.val = origin_val
                  +    api_client.settings.update('overcommit-config', spec)
                  +
                  @@ -247,22 +913,6 @@

                  Classes

                  class TestHotPlugVolume
                  -

                  To cover test: -- https://harvester.github.io/tests/manual/volumes/support-volume-hot-unplug/

                  -

                  Steps

                  -
                    -
                  1. Create and start VM
                  2. -
                  3. Create Data volume
                  4. -
                  5. Attach data volume
                  6. -
                  7. Detach data volume -Exepected Result:
                      -
                    • VM should started successfully
                    • -
                    • Data volume should attached and available in VM
                    • -
                    • Data volume should detached and unavailable in VM
                    • -
                    • VM should not be reboot or restart while attaching/detaching volume
                    • -
                    -
                  8. -
                  Expand source code @@ -426,6 +1076,22 @@

                  Steps

                  assert not scsi, "SCSI device still available in `/sys/block/`" assert not out, "SCSI device still available in `lsblk -r`"
                  +

                  To cover test: +- https://harvester.github.io/tests/manual/volumes/support-volume-hot-unplug/

                  +

                  Steps

                  +
                    +
                  1. Create and start VM
                  2. +
                  3. Create Data volume
                  4. +
                  5. Attach data volume
                  6. +
                  7. Detach data volume +Exepected Result:
                      +
                    • VM should started successfully
                    • +
                    • Data volume should attached and available in VM
                    • +
                    • Data volume should detached and unavailable in VM
                    • +
                    • VM should not be reboot or restart while attaching/detaching volume
                    • +
                    +
                  8. +

                  Class variables

                  var disk_name
                  @@ -443,18 +1109,168 @@

                  Methods

                  def login_to_vm_from_host(self, host_shell, vm_shell, wait_timeout, host_ip, ssh_user, pri_key, vm_ip)
                  +
                  + +Expand source code + +
                  @contextmanager
                  +def login_to_vm_from_host(
                  +    self, host_shell, vm_shell, wait_timeout, host_ip, ssh_user, pri_key, vm_ip
                  +):
                  +    with host_shell.login(host_ip, jumphost=True) as host_sh:
                  +        vm_sh = vm_shell(ssh_user, pkey=pri_key)
                  +        endtime = datetime.now() + timedelta(seconds=wait_timeout)
                  +        while endtime > datetime.now():
                  +            try:
                  +                vm_sh.connect(vm_ip, jumphost=host_sh.client)
                  +            except ChannelException as e:
                  +                login_ex = e
                  +                sleep(3)
                  +            else:
                  +                break
                  +        else:
                  +            raise AssertionError(f"Unable to login to VM {unique_vm_name}") from login_ex
                  +
                  +        with vm_sh as vm_sh:
                  +            yield (vm_sh, host_sh)
                  +
                  -def test_add(self, api_client, ssh_keypair, wait_timeout, vm_checker, host_shell, vm_shell, small_volume, stopped_vm) +def test_add(self,
                  api_client,
                  ssh_keypair,
                  wait_timeout,
                  vm_checker,
                  host_shell,
                  vm_shell,
                  small_volume,
                  stopped_vm)
                  +
                  + +Expand source code + +
                  @pytest.mark.dependency(name="hot_plug_volume")
                  +def test_add(
                  +    self, api_client, ssh_keypair, wait_timeout, vm_checker,
                  +    host_shell, vm_shell, small_volume, stopped_vm
                  +):
                  +    unique_vm_name, ssh_user = stopped_vm
                  +    pub_key, pri_key = ssh_keypair
                  +
                  +    # Start VM
                  +    code, data = api_client.vms.start(unique_vm_name)
                  +    vm_got_ips, (code, data) = vm_checker.wait_ip_addresses(unique_vm_name, ['default'])
                  +    assert vm_got_ips, (
                  +        f"Failed to Start VM({unique_vm_name}) with errors:\n"
                  +        f"Status: {data.get('status')}\n"
                  +        f"API Status({code}): {data}"
                  +    )
                  +    # Log into VM to verify OS is ready
                  +    vm_ip = next(iface['ipAddress'] for iface in data['status']['interfaces']
                  +                 if iface['name'] == 'default')
                  +    code, data = api_client.hosts.get(data['status']['nodeName'])
                  +    host_ip = next(addr['address'] for addr in data['status']['addresses']
                  +                   if addr['type'] == 'InternalIP')
                  +
                  +    with self.login_to_vm_from_host(
                  +        host_shell, vm_shell, wait_timeout, host_ip, ssh_user, pri_key, vm_ip
                  +    ) as (sh, host_sh):
                  +        endtime = datetime.now() + timedelta(seconds=wait_timeout)
                  +        while endtime > datetime.now():
                  +            out, err = sh.exec_command('cloud-init status')
                  +            if 'done' in out:
                  +                break
                  +            sleep(3)
                  +        else:
                  +            raise AssertionError(
                  +                f"VM {unique_vm_name} Started {wait_timeout} seconds"
                  +                f", but cloud-init still in {out}"
                  +            )
                  +
                  +    # attach volume
                  +    vol_name, vol_size = small_volume
                  +    code, data = api_client.vms.add_volume(unique_vm_name, self.disk_name, vol_name)
                  +
                  +    assert 204 == code, (code, data)
                  +
                  +    # Login to VM to verify volume hot pluged
                  +    with self.login_to_vm_from_host(
                  +        host_shell, vm_shell, wait_timeout, host_ip, ssh_user, pri_key, vm_ip
                  +    ) as (sh, host_sh):
                  +        endtime = datetime.now() + timedelta(seconds=wait_timeout)
                  +        while endtime > datetime.now():
                  +            scsi, err = sh.exec_command(
                  +                "ls -d /sys/block/sd*/device/scsi_device/*"
                  +                " | awk -F '[/]' '{print $4,$7}'"
                  +            )
                  +            if scsi:
                  +                break
                  +            sleep(3)
                  +        else:
                  +            raise AssertionError(
                  +                f"Hot pluged Volume {vol_name} unavailable after {wait_timeout}s\n"
                  +                f"STDOUT: {scsi}, STDERR: {err}"
                  +            )
                  +
                  +        out, err = sh.exec_command(
                  +            f"lsblk -r | grep {scsi.split()[0]}"
                  +        )
                  +
                  +    assert f"{vol_size}G 0 disk" in out, (
                  +        f"existing Volume {vol_size}G not found\n"
                  +        f"lsblk output: {out}"
                  +    )
                  +
                  -def test_remove(self, api_client, ssh_keypair, wait_timeout, host_shell, vm_shell, small_volume, stopped_vm) +def test_remove(self,
                  api_client,
                  ssh_keypair,
                  wait_timeout,
                  host_shell,
                  vm_shell,
                  small_volume,
                  stopped_vm)
                  +
                  + +Expand source code + +
                  @pytest.mark.dependency(depends=["hot_plug_volume"])
                  +def test_remove(
                  +    self, api_client, ssh_keypair, wait_timeout, host_shell, vm_shell, small_volume, stopped_vm
                  +):
                  +    unique_vm_name, ssh_user = stopped_vm
                  +    pub_key, pri_key = ssh_keypair
                  +
                  +    # remove volume
                  +    vol_name, vol_size = small_volume
                  +    code, data = api_client.vms.remove_volume(unique_vm_name, self.disk_name)
                  +
                  +    assert 204 == code, (code, data)
                  +
                  +    # Log into VM to verify volume removed
                  +    code, data = api_client.vms.get_status(unique_vm_name)
                  +    vm_ip = next(iface['ipAddress'] for iface in data['status']['interfaces']
                  +                 if iface['name'] == 'default')
                  +    code, data = api_client.hosts.get(data['status']['nodeName'])
                  +    host_ip = next(addr['address'] for addr in data['status']['addresses']
                  +                   if addr['type'] == 'InternalIP')
                  +
                  +    with self.login_to_vm_from_host(
                  +        host_shell, vm_shell, wait_timeout, host_ip, ssh_user, pri_key, vm_ip
                  +    ) as (sh, host_sh):
                  +        endtime = datetime.now() + timedelta(seconds=wait_timeout)
                  +        while endtime > datetime.now():
                  +            scsi, err = sh.exec_command(
                  +                "ls -d1 /sys/block/* | grep 'sd'"
                  +            )
                  +            if not scsi:
                  +                break
                  +            sleep(3)
                  +        else:
                  +            raise AssertionError(
                  +                f"Hot pluged Volume {vol_name} still available after {wait_timeout}s\n"
                  +                f"STDOUT: {scsi}, STDERR: {err}"
                  +            )
                  +
                  +        out, err = sh.exec_command(
                  +            "lsblk -r | grep 'sd'"
                  +        )
                  +
                  +    assert not scsi, "SCSI device still available in `/sys/block/`"
                  +    assert not out, "SCSI device still available in `lsblk -r`"
                  +
                  @@ -463,7 +1279,6 @@

                  Methods

                  class TestVMClone
                  -
                  Expand source code @@ -791,6 +1606,7 @@

                  Methods

                  vol_name = vol['volume']['persistentVolumeClaim']['claimName'] api_client.volumes.delete(vol_name)
                  +

                  Class variables

                  var pytestmark
                  @@ -804,6 +1620,160 @@

                  Methods

                  def test_clone_running_vm(self, api_client, ssh_keypair, vm_checker, wait_timeout, host_shell, vm_shell, stopped_vm)
                  +
                  + +Expand source code + +
                  def test_clone_running_vm(
                  +    self, api_client, ssh_keypair, vm_checker, wait_timeout, host_shell, vm_shell, stopped_vm
                  +):
                  +    """
                  +    To cover test:
                  +    - (legacy) https://harvester.github.io/tests/manual/virtual-machines/clone-vm-that-is-turned-on/ # noqa
                  +    - (new) https://github.com/harvester/tests/issues/361
                  +
                  +    Steps:
                  +        1. Create a VM with 1 CPU 2 Memory
                  +        2. Start the VM and write some data
                  +        3. Clone the VM into VM-cloned
                  +        4. Verify VM-Cloned
                  +
                  +    Exepected Result:
                  +        - Cloned-VM should be available and starting
                  +        - Cloned-VM should becomes `Running`
                  +        - Written data should available in Cloned-VM
                  +    """
                  +    unique_vm_name, ssh_user = stopped_vm
                  +    pub_key, pri_key = ssh_keypair
                  +    code, data = api_client.vms.start(unique_vm_name)
                  +
                  +    vm_got_ips, (code, data) = vm_checker.wait_ip_addresses(unique_vm_name, ['default'])
                  +    assert vm_got_ips, (
                  +        f"Failed to Start VM({unique_vm_name}) with errors:\n"
                  +        f"Status: {data.get('status')}\n"
                  +        f"API Status({code}): {data}"
                  +    )
                  +
                  +    vm_ip = next(iface['ipAddress'] for iface in data['status']['interfaces']
                  +                 if iface['name'] == 'default')
                  +    code, data = api_client.hosts.get(data['status']['nodeName'])
                  +    host_ip = next(addr['address'] for addr in data['status']['addresses']
                  +                   if addr['type'] == 'InternalIP')
                  +
                  +    # Log into VM to make some data
                  +    with host_shell.login(host_ip, jumphost=True) as h:
                  +        vm_sh = vm_shell(ssh_user, pkey=pri_key)
                  +        endtime = datetime.now() + timedelta(seconds=wait_timeout)
                  +        while endtime > datetime.now():
                  +            try:
                  +                vm_sh.connect(vm_ip, jumphost=h.client)
                  +            except ChannelException as e:
                  +                login_ex = e
                  +                sleep(3)
                  +            else:
                  +                break
                  +        else:
                  +            raise AssertionError(f"Unable to login to VM {unique_vm_name}") from login_ex
                  +
                  +        with vm_sh as sh:
                  +            endtime = datetime.now() + timedelta(seconds=wait_timeout)
                  +            while endtime > datetime.now():
                  +                out, err = sh.exec_command('cloud-init status')
                  +                if 'done' in out:
                  +                    break
                  +                sleep(3)
                  +            else:
                  +                raise AssertionError(
                  +                    f"VM {unique_vm_name} Started {wait_timeout} seconds"
                  +                    f", but cloud-init still in {out}"
                  +                )
                  +            out, err = sh.exec_command(f'echo {unique_vm_name!r} > ~/vmname')
                  +            assert not err, (out, err)
                  +            sh.exec_command('sync')
                  +
                  +    # Clone VM into new VM
                  +    cloned_name = f"cloned-{unique_vm_name}"
                  +    code, _ = api_client.vms.clone(unique_vm_name, cloned_name)
                  +    assert 204 == code, f"Failed to clone VM {unique_vm_name} into new VM {cloned_name}"
                  +
                  +    # Check cloned VM is created
                  +    endtime = datetime.now() + timedelta(seconds=wait_timeout)
                  +    while endtime > datetime.now():
                  +        code, data = api_client.vms.get(cloned_name)
                  +        if 200 == code:
                  +            break
                  +        sleep(3)
                  +    else:
                  +        raise AssertionError(
                  +            f"restored VM {cloned_name} is not created"
                  +        )
                  +    vm_got_ips, (code, data) = vm_checker.wait_ip_addresses(cloned_name, ['default'])
                  +    assert vm_got_ips, (
                  +        f"Failed to Start VM({cloned_name}) with errors:\n"
                  +        f"Status: {data.get('status')}\n"
                  +        f"API Status({code}): {data}"
                  +    )
                  +
                  +    vm_ip = next(iface['ipAddress'] for iface in data['status']['interfaces']
                  +                 if iface['name'] == 'default')
                  +    code, data = api_client.hosts.get(data['status']['nodeName'])
                  +    host_ip = next(addr['address'] for addr in data['status']['addresses']
                  +                   if addr['type'] == 'InternalIP')
                  +
                  +    # Log into new VM to check VM is cloned as old one
                  +    with host_shell.login(host_ip, jumphost=True) as h:
                  +        vm_sh = vm_shell(ssh_user, pkey=pri_key)
                  +        endtime = datetime.now() + timedelta(seconds=wait_timeout)
                  +        while endtime > datetime.now():
                  +            try:
                  +                vm_sh.connect(vm_ip, jumphost=h.client)
                  +            except ChannelException as e:
                  +                login_ex = e
                  +                sleep(3)
                  +            else:
                  +                break
                  +        else:
                  +            raise AssertionError(f"Unable to login to VM {cloned_name}") from login_ex
                  +
                  +        with vm_sh as sh:
                  +            endtime = datetime.now() + timedelta(seconds=wait_timeout)
                  +            while endtime > datetime.now():
                  +                out, err = sh.exec_command('cloud-init status')
                  +                if 'done' in out:
                  +                    break
                  +                sleep(3)
                  +            else:
                  +                raise AssertionError(
                  +                    f"VM {unique_vm_name} Started {wait_timeout} seconds"
                  +                    f", but cloud-init still in {out}"
                  +                )
                  +
                  +            out, err = sh.exec_command('cat ~/vmname')
                  +        assert unique_vm_name in out, (
                  +            f"cloud-init writefile failed\n"
                  +            f"Executed stdout: {out}\n"
                  +            f"Executed stderr: {err}"
                  +        )
                  +
                  +    # Remove cloned VM and volumes
                  +    code, data = api_client.vms.get(cloned_name)
                  +    cloned_spec = api_client.vms.Spec.from_dict(data)
                  +    api_client.vms.delete(cloned_name)
                  +    endtime = datetime.now() + timedelta(seconds=wait_timeout)
                  +    while endtime > datetime.now():
                  +        code, data = api_client.vms.get(cloned_name)
                  +        if 404 == code:
                  +            break
                  +        sleep(3)
                  +    else:
                  +        raise AssertionError(
                  +            f"Failed to Delete VM({cloned_name}) with errors:\n"
                  +            f"Status({code}): {data}"
                  +        )
                  +    for vol in cloned_spec.volumes:
                  +        vol_name = vol['volume']['persistentVolumeClaim']['claimName']
                  +        api_client.volumes.delete(vol_name)
                  +

                  To cover test: - (legacy) https://harvester.github.io/tests/manual/virtual-machines/clone-vm-that-is-turned-on/ # noqa - (new) https://github.com/harvester/tests/issues/361

                  @@ -823,6 +1793,180 @@

                  Steps

                  def test_clone_stopped_vm(self, api_client, ssh_keypair, vm_checker, wait_timeout, host_shell, vm_shell, stopped_vm)
                  +
                  + +Expand source code + +
                  def test_clone_stopped_vm(
                  +    self, api_client, ssh_keypair, vm_checker, wait_timeout, host_shell, vm_shell, stopped_vm
                  +):
                  +    """
                  +    To cover test:
                  +    - (legacy) https://harvester.github.io/tests/manual/virtual-machines/clone-vm-that-is-turned-off/ # noqa
                  +    - (new) https://github.com/harvester/tests/issues/361
                  +
                  +    Steps:
                  +        1. Create a VM with 1 CPU 2 Memory
                  +        2. Start the VM and write some data
                  +        3. Stop the VM
                  +        4. Clone the VM into VM-cloned
                  +        5. Verify VM-Cloned
                  +
                  +    Exepected Result:
                  +        - Cloned-VM should be available and stopped
                  +        - Cloned-VM should able to start and becomes `Running`
                  +        - Written data should available in Cloned-VM
                  +    """
                  +    unique_vm_name, ssh_user = stopped_vm
                  +    pub_key, pri_key = ssh_keypair
                  +    code, data = api_client.vms.start(unique_vm_name)
                  +    vm_got_ips, (code, data) = vm_checker.wait_ip_addresses(unique_vm_name, ['default'])
                  +    assert vm_got_ips, (
                  +        f"Failed to Start VM({unique_vm_name}) with errors:\n"
                  +        f"Status: {data.get('status')}\n"
                  +        f"API Status({code}): {data}"
                  +    )
                  +
                  +    vm_ip = next(iface['ipAddress'] for iface in data['status']['interfaces']
                  +                 if iface['name'] == 'default')
                  +    code, data = api_client.hosts.get(data['status']['nodeName'])
                  +    host_ip = next(addr['address'] for addr in data['status']['addresses']
                  +                   if addr['type'] == 'InternalIP')
                  +
                  +    # Log into VM to make some data
                  +    with host_shell.login(host_ip, jumphost=True) as h:
                  +        vm_sh = vm_shell(ssh_user, pkey=pri_key)
                  +        endtime = datetime.now() + timedelta(seconds=wait_timeout)
                  +        while endtime > datetime.now():
                  +            try:
                  +                vm_sh.connect(vm_ip, jumphost=h.client)
                  +            except ChannelException as e:
                  +                login_ex = e
                  +                sleep(3)
                  +            else:
                  +                break
                  +        else:
                  +            raise AssertionError(f"Unable to login to VM {unique_vm_name}") from login_ex
                  +
                  +        with vm_sh as sh:
                  +            endtime = datetime.now() + timedelta(seconds=wait_timeout)
                  +            while endtime > datetime.now():
                  +                out, err = sh.exec_command('cloud-init status')
                  +                if 'done' in out:
                  +                    break
                  +                sleep(3)
                  +            else:
                  +                raise AssertionError(
                  +                    f"VM {unique_vm_name} Started {wait_timeout} seconds"
                  +                    f", but cloud-init still in {out}"
                  +                )
                  +            out, err = sh.exec_command(f'echo "stopped-{unique_vm_name}" > ~/vmname')
                  +            assert not err, (out, err)
                  +            sh.exec_command('sync')
                  +
                  +    # Stop the VM
                  +    code, data = api_client.vms.stop(unique_vm_name)
                  +    assert 204 == code, "`Stop` return unexpected status code"
                  +    endtime = datetime.now() + timedelta(seconds=wait_timeout)
                  +    while endtime > datetime.now():
                  +        code, data = api_client.vms.get_status(unique_vm_name)
                  +        if 404 == code:
                  +            break
                  +        sleep(3)
                  +    else:
                  +        raise AssertionError(
                  +            f"Failed to Stop VM({unique_vm_name}) with errors:\n"
                  +            f"Status({code}): {data}"
                  +        )
                  +
                  +    # Clone VM into new VM
                  +    cloned_name = f"cloned-{unique_vm_name}"
                  +    code, _ = api_client.vms.clone(unique_vm_name, cloned_name)
                  +    assert 204 == code, f"Failed to clone VM {unique_vm_name} into new VM {cloned_name}"
                  +
                  +    # Check cloned VM is available and stooped
                  +    endtime = datetime.now() + timedelta(seconds=wait_timeout)
                  +    while endtime > datetime.now():
                  +        code, data = api_client.vms.get(cloned_name)
                  +        if (200 == code
                  +           and "Halted" == data['spec'].get('runStrategy')
                  +           and "Stopped" == data.get('status', {}).get('printableStatus')):
                  +            break
                  +        sleep(3)
                  +    else:
                  +        raise AssertionError(
                  +            f"Cloned VM {cloned_name} is not available and stopped"
                  +            f"Status({code}): {data}"
                  +        )
                  +
                  +    # Check cloned VM started
                  +    api_client.vms.start(cloned_name)
                  +    vm_got_ips, (code, data) = vm_checker.wait_ip_addresses(cloned_name, ['default'])
                  +    assert vm_got_ips, (
                  +        f"Failed to Start VM({cloned_name}) with errors:\n"
                  +        f"Status: {data.get('status')}\n"
                  +        f"API Status({code}): {data}"
                  +    )
                  +    vm_ip = next(iface['ipAddress'] for iface in data['status']['interfaces']
                  +                 if iface['name'] == 'default')
                  +    code, data = api_client.hosts.get(data['status']['nodeName'])
                  +    host_ip = next(addr['address'] for addr in data['status']['addresses']
                  +                   if addr['type'] == 'InternalIP')
                  +
                  +    # Log into new VM to check VM is cloned as old one
                  +    with host_shell.login(host_ip, jumphost=True) as h:
                  +        vm_sh = vm_shell(ssh_user, pkey=pri_key)
                  +        endtime = datetime.now() + timedelta(seconds=wait_timeout)
                  +        while endtime > datetime.now():
                  +            try:
                  +                vm_sh.connect(vm_ip, jumphost=h.client)
                  +            except ChannelException as e:
                  +                login_ex = e
                  +                sleep(3)
                  +            else:
                  +                break
                  +        else:
                  +            raise AssertionError(f"Unable to login to VM {cloned_name}") from login_ex
                  +
                  +        with vm_sh as sh:
                  +            endtime = datetime.now() + timedelta(seconds=wait_timeout)
                  +            while endtime > datetime.now():
                  +                out, err = sh.exec_command('cloud-init status')
                  +                if 'done' in out:
                  +                    break
                  +                sleep(3)
                  +            else:
                  +                raise AssertionError(
                  +                    f"VM {unique_vm_name} Started {wait_timeout} seconds"
                  +                    f", but cloud-init still in {out}"
                  +                )
                  +
                  +            out, err = sh.exec_command('cat ~/vmname')
                  +        assert f"stopped-{unique_vm_name}" in out, (
                  +            f"cloud-init writefile failed\n"
                  +            f"Executed stdout: {out}\n"
                  +            f"Executed stderr: {err}"
                  +        )
                  +
                  +    # Remove cloned VM and volumes
                  +    code, data = api_client.vms.get(cloned_name)
                  +    cloned_spec = api_client.vms.Spec.from_dict(data)
                  +    api_client.vms.delete(cloned_name)
                  +    endtime = datetime.now() + timedelta(seconds=wait_timeout)
                  +    while endtime > datetime.now():
                  +        code, data = api_client.vms.get(cloned_name)
                  +        if 404 == code:
                  +            break
                  +        sleep(3)
                  +    else:
                  +        raise AssertionError(
                  +            f"Failed to Delete VM({cloned_name}) with errors:\n"
                  +            f"Status({code}): {data}"
                  +        )
                  +    for vol in cloned_spec.volumes:
                  +        vol_name = vol['volume']['persistentVolumeClaim']['claimName']
                  +        api_client.volumes.delete(vol_name)
                  +

                  To cover test: - (legacy) https://harvester.github.io/tests/manual/virtual-machines/clone-vm-that-is-turned-off/ # noqa - (new) https://github.com/harvester/tests/issues/361

                  @@ -845,8 +1989,6 @@

                  Steps

                  class TestVMOperations
                  -
                  Expand source code @@ -1222,6 +2364,8 @@

                  Steps

                  "\n".join(f"Volume({n}): {r}" for n, r in fails) )
                  +

                  Class variables

                  var pytestmark
                  @@ -1235,6 +2379,63 @@

                  Methods

                  def test_abort_migrate(self, api_client, unique_vm_name, wait_timeout)
                  +
                  + +Expand source code + +
                  def test_abort_migrate(self, api_client, unique_vm_name, wait_timeout):
                  +    """
                  +    To cover test:
                  +    - https://harvester.github.io/tests/manual/live-migration/abort-live-migration/
                  +
                  +    Steps:
                  +        1. Abort the VM was created and migrating
                  +    Exepected Result:
                  +        - VM should able to perform migrate
                  +        - VM should stay in current host when migrating be aborted.
                  +    """
                  +    code, host_data = api_client.hosts.get()
                  +    assert 200 == code, (code, host_data)
                  +    code, data = api_client.vms.get_status(unique_vm_name)
                  +    cur_host = data['status'].get('nodeName')
                  +    assert cur_host, (
                  +        f"VMI exists but `nodeName` is empty.\n"
                  +        f"{data}"
                  +    )
                  +
                  +    new_host = next(h['id'] for h in host_data['data']
                  +                    if cur_host != h['id'] and not h['spec'].get('taint'))
                  +
                  +    code, data = api_client.vms.migrate(unique_vm_name, new_host)
                  +    assert 204 == code, (code, data)
                  +
                  +    states = ["Aborting migration", "Migrating"]
                  +    endtime = datetime.now() + timedelta(seconds=wait_timeout)
                  +    while endtime > datetime.now():
                  +        code, data = api_client.vms.get_status(unique_vm_name)
                  +        m_state = data['metadata']['annotations'].get("harvesterhci.io/migrationState")
                  +        if m_state == states[-1]:
                  +            states.pop()
                  +            if states:
                  +                code, err = api_client.vms.abort_migrate(unique_vm_name)
                  +                assert 204 == code, (code, err)
                  +            else:
                  +                break
                  +        elif len(states) == 1 and not m_state:
                  +            # we did abort migration, and the anootation removed
                  +            break
                  +        sleep(3)
                  +    else:
                  +        raise AssertionError(
                  +            f"Failed to abort VM({unique_vm_name})'s migration, stuck on {states[-1]}\n"
                  +            f"API Status({code}): {data}"
                  +        )
                  +
                  +    assert cur_host == data['status']['nodeName'], (
                  +        f"Failed to abort VM({unique_vm_name})'s migration,"
                  +        f"VM been moved to {data['status']['nodeName']} is not the origin host {cur_host}\n"
                  +    )
                  +

                  To cover test: - https://harvester.github.io/tests/manual/live-migration/abort-live-migration/

                  Steps

                  @@ -1251,6 +2452,65 @@

                  Steps

                  def test_delete(self, api_client, unique_vm_name, wait_timeout)
                  +
                  + +Expand source code + +
                  def test_delete(self, api_client, unique_vm_name, wait_timeout):
                  +    '''
                  +    Steps:
                  +        1. Delete the VM was created
                  +        2. Delete Volumes was belonged to the VM
                  +    Exepected Result:
                  +        - VM should able to be deleted and success
                  +        - Volumes should able to be deleted and success
                  +    '''
                  +
                  +    code, data = api_client.vms.delete(unique_vm_name)
                  +    assert 200 == code, (code, data)
                  +
                  +    spec = api_client.vms.Spec.from_dict(data)
                  +
                  +    endtime = datetime.now() + timedelta(seconds=wait_timeout)
                  +    while endtime > datetime.now():
                  +        code, data = api_client.vms.get(unique_vm_name)
                  +        if 404 == code:
                  +            break
                  +        sleep(3)
                  +    else:
                  +        raise AssertionError(
                  +            f"Failed to Delete VM({unique_vm_name}) with errors:\n"
                  +            f"Status({code}): {data}"
                  +        )
                  +
                  +    fails, check = [], dict()
                  +    for vol in spec.volumes:
                  +        vol_name = vol['volume']['persistentVolumeClaim']['claimName']
                  +        check[vol_name] = api_client.volumes.delete(vol_name)
                  +
                  +    endtime = datetime.now() + timedelta(seconds=wait_timeout)
                  +    while endtime > datetime.now():
                  +        l_check = dict()
                  +        for vol_name, (code, data) in check.items():
                  +            if code not in (200, 204):
                  +                fails.append((vol_name, f"Failed to delete\nStatus({code}): {data}"))
                  +            else:
                  +                code, data = api_client.volumes.get(vol_name)
                  +                if 404 != code:
                  +                    l_check[vol_name] = (code, data)
                  +        check = l_check
                  +        if not check:
                  +            break
                  +        sleep(5)
                  +    else:
                  +        for vol_name, (code, data) in check.items():
                  +            fails.append((vol_name, f"Failed to delete\nStatus({code}): {data}"))
                  +
                  +    assert not fails, (
                  +        f"Failed to delete VM({unique_vm_name})'s volumes with errors:\n"
                  +        "\n".join(f"Volume({n}): {r}" for n, r in fails)
                  +    )
                  +

                  Steps

                  1. Delete the VM was created
                  2. @@ -1266,6 +2526,48 @@

                    Steps

                    def test_migrate(self, api_client, unique_vm_name, wait_timeout)
                    +
                    + +Expand source code + +
                    def test_migrate(self, api_client, unique_vm_name, wait_timeout):
                    +    """
                    +    To cover test:
                    +    - https://harvester.github.io/tests/manual/live-migration/migrate-turned-on-vm-to-another-host/ # noqa
                    +
                    +    Steps:
                    +        1. migrate the VM was created
                    +    Exepected Result:
                    +        - VM's host Node should be changed to another one
                    +    """
                    +    code, host_data = api_client.hosts.get()
                    +    assert 200 == code, (code, host_data)
                    +    code, data = api_client.vms.get_status(unique_vm_name)
                    +    cur_host = data['status'].get('nodeName')
                    +    assert cur_host, (
                    +        f"VMI exists but `nodeName` is empty.\n"
                    +        f"{data}"
                    +    )
                    +
                    +    new_host = next(h['id'] for h in host_data['data']
                    +                    if cur_host != h['id'] and not h['spec'].get('taint'))
                    +
                    +    code, data = api_client.vms.migrate(unique_vm_name, new_host)
                    +    assert 204 == code, (code, data)
                    +
                    +    endtime = datetime.now() + timedelta(seconds=wait_timeout)
                    +    while endtime > datetime.now():
                    +        code, data = api_client.vms.get_status(unique_vm_name)
                    +        migrating = data['metadata']['annotations'].get("harvesterhci.io/migrationState")
                    +        if not migrating and new_host == data['status']['nodeName']:
                    +            break
                    +        sleep(5)
                    +    else:
                    +        raise AssertionError(
                    +            f"Failed to Migrate VM({unique_vm_name}) from {cur_host} to {new_host}\n"
                    +            f"API Status({code}): {data}"
                    +        )
                    +

                    To cover test: - https://harvester.github.io/tests/manual/live-migration/migrate-turned-on-vm-to-another-host/ # noqa

                    Steps

                    @@ -1281,6 +2583,38 @@

                    Steps

                    def test_pause(self, api_client, unique_vm_name, wait_timeout)
                    +
                    + +Expand source code + +
                    @pytest.mark.dependency(name="pause_vm", depends=["minimal_vm"])
                    +def test_pause(self, api_client, unique_vm_name, wait_timeout):
                    +    '''
                    +    Steps:
                    +        1. Pause the VM was created
                    +    Exepected Result:
                    +        - VM should change status into `Paused`
                    +    '''
                    +    code, data = api_client.vms.pause(unique_vm_name)
                    +    assert 204 == code, "`Pause` return unexpected status code"
                    +
                    +    endtime = datetime.now() + timedelta(seconds=wait_timeout)
                    +    while endtime > datetime.now():
                    +        code, data = api_client.vms.get_status(unique_vm_name)
                    +        if [c for c in data['status'].get('conditions', []) if "Paused" == c['type']]:
                    +            conditions = data['status']['conditions']
                    +            break
                    +        sleep(3)
                    +    else:
                    +        raise AssertionError(
                    +            f"Failed to pause VM({unique_vm_name}) with errors:\n"
                    +            f"VM Status: {data['status']}\n"
                    +            f"API Status({code}): {data}"
                    +        )
                    +
                    +    assert "Paused" == conditions[-1].get('type'), conditions
                    +    assert "PausedByUser" == conditions[-1].get('reason'), conditions
                    +

                    Steps

                    1. Pause the VM was created @@ -1294,6 +2628,57 @@

                      Steps

                      def test_restart(self, api_client, unique_vm_name, wait_timeout)
                      +
                      + +Expand source code + +
                      def test_restart(self, api_client, unique_vm_name, wait_timeout):
                      +    '''
                      +    Steps:
                      +        1. Restart the VM was created
                      +    Exepected Result:
                      +        - VM's ActivePods should be updated (which means the VM restarted)
                      +        - VM's status should update to `Running`
                      +        - VM's qemu-agent should be connected
                      +    '''
                      +    code, data = api_client.vms.get_status(unique_vm_name)
                      +    assert 200 == code, (
                      +        f"unable to get VM({unique_vm_name})'s instance infos with errors:\n"
                      +        f"Status({code}): {data}"
                      +    )
                      +
                      +    old_pods = set(data['status']['activePods'].items())
                      +
                      +    code, data = api_client.vms.restart(unique_vm_name)
                      +    assert 204 == code, "`Restart return unexpected status code"
                      +
                      +    endtime = datetime.now() + timedelta(seconds=wait_timeout)
                      +    while endtime > datetime.now():
                      +        code, data = api_client.vms.get_status(unique_vm_name)
                      +        if old_pods.difference(data['status'].get('activePods', {}).items() or old_pods):
                      +            break
                      +        sleep(5)
                      +    else:
                      +        raise AssertionError(
                      +            f"Failed to Restart VM({unique_vm_name}), activePods is not updated.\n"
                      +            f"Status({code}): {data}"
                      +        )
                      +
                      +    endtime = datetime.now() + timedelta(seconds=wait_timeout)
                      +    while endtime > datetime.now():
                      +        code, data = api_client.vms.get_status(unique_vm_name)
                      +        phase = data.get('status', {}).get('phase')
                      +        conds = data.get('status', {}).get('conditions', [{}])
                      +        if "Running" == phase and conds and "AgentConnected" == conds[-1].get('type'):
                      +            break
                      +        sleep(3)
                      +    else:
                      +        raise AssertionError(
                      +            f"Failed to Restart VM({unique_vm_name}) with errors:\n"
                      +            f"Status: {data.get('status')}\n"
                      +            f"API Status({code}): {data}"
                      +        )
                      +

                      Steps

                      1. Restart the VM was created @@ -1309,6 +2694,58 @@

                        Steps

                        def test_softreboot(self, api_client, unique_vm_name, wait_timeout)
                        +
                        + +Expand source code + +
                        def test_softreboot(self, api_client, unique_vm_name, wait_timeout):
                        +    '''
                        +    Steps:
                        +        1. Softreboot the VM was created
                        +    Exepected Result:
                        +        - VM's qemu-agent should disconnected (which means the VM rebooting)
                        +        - VM's qemu-agent should re-connected (which means the VM boot into OS)
                        +        - VM's status should be changed to `Running`
                        +    '''
                        +    code, data = api_client.vms.get_status(unique_vm_name)
                        +    assert 200 == code, (
                        +        f"unable to get VM({unique_vm_name})'s instance infos with errors:\n"
                        +        f"Status({code}): {data}"
                        +    )
                        +    old_agent = data['status']['conditions'][-1]
                        +    assert "AgentConnected" == old_agent['type'], (code, data)
                        +
                        +    api_client.vms.softreboot(unique_vm_name)
                        +    # Wait until agent disconnected (leaving OS)
                        +    endtime = datetime.now() + timedelta(seconds=wait_timeout)
                        +    while endtime > datetime.now():
                        +        code, data = api_client.vms.get_status(unique_vm_name)
                        +        if "AgentConnected" not in data['status']['conditions'][-1]['type']:
                        +            break
                        +        sleep(5)
                        +    # then wait agent connected again (Entering OS)
                        +    endtime = datetime.now() + timedelta(seconds=wait_timeout)
                        +    while endtime > datetime.now():
                        +        code, data = api_client.vms.get_status(unique_vm_name)
                        +        phase, conds = data['status']['phase'], data['status'].get('conditions', [{}])
                        +        if "Running" == phase and "AgentConnected" == conds[-1].get('type'):
                        +            break
                        +        sleep(3)
                        +    else:
                        +        raise AssertionError(
                        +            f"Failed to Softreboot VM({unique_vm_name}) with errors:\n"
                        +            f"API Status({code}): {data}"
                        +        )
                        +
                        +    old_t = datetime.strptime(old_agent['lastProbeTime'], '%Y-%m-%dT%H:%M:%SZ')
                        +    new_t = datetime.strptime(conds[-1]['lastProbeTime'], '%Y-%m-%dT%H:%M:%SZ')
                        +
                        +    assert new_t > old_t, (
                        +        "Agent's probe time is not updated.\t"
                        +        f"Before softreboot: {old_t}, After softreboot: {new_t}\n"
                        +        f"Last API Status({code}): {data}"
                        +    )
                        +

                        Steps

                        1. Softreboot the VM was created @@ -1324,6 +2761,50 @@

                          Steps

                          def test_start(self, api_client, unique_vm_name, wait_timeout)
                          +
                          + +Expand source code + +
                          @pytest.mark.dependency(name="start_vm", depends=["stop_vm"])
                          +def test_start(self, api_client, unique_vm_name, wait_timeout):
                          +    '''
                          +    Steps:
                          +        1. Start the VM was created and stopped
                          +    Exepected Result:
                          +        - VM should change status into `Running`
                          +    '''
                          +    code, data = api_client.vms.start(unique_vm_name)
                          +    assert 204 == code, "`Start return unexpected status code"
                          +
                          +    endtime = datetime.now() + timedelta(seconds=wait_timeout)
                          +    while endtime > datetime.now():
                          +        code, data = api_client.vms.get(unique_vm_name)
                          +        strategy = data['spec']['runStrategy']
                          +        pstats = data['status']['printableStatus']
                          +        if "Halted" != strategy and "Running" == pstats:
                          +            break
                          +        sleep(3)
                          +    else:
                          +        raise AssertionError(
                          +            f"Failed to Start VM({unique_vm_name}) with errors:\n"
                          +            f"Status({code}): {data}"
                          +        )
                          +
                          +    endtime = datetime.now() + timedelta(seconds=wait_timeout)
                          +    while endtime > datetime.now():
                          +        code, data = api_client.vms.get_status(unique_vm_name)
                          +        phase = data.get('status', {}).get('phase')
                          +        conds = data.get('status', {}).get('conditions', [{}])
                          +        if "Running" == phase and conds and "AgentConnected" == conds[-1].get('type'):
                          +            break
                          +        sleep(3)
                          +    else:
                          +        raise AssertionError(
                          +            f"Failed to Start VM({unique_vm_name}) with errors:\n"
                          +            f"Status: {data.get('status')}\n"
                          +            f"API Status({code}): {data}"
                          +        )
                          +

                          Steps

                          1. Start the VM was created and stopped @@ -1337,6 +2818,38 @@

                            Steps

                            def test_stop(self, api_client, unique_vm_name, wait_timeout)
                            +
                            + +Expand source code + +
                            @pytest.mark.dependency(name="stop_vm", depends=["minimal_vm"])
                            +def test_stop(self, api_client, unique_vm_name, wait_timeout):
                            +    '''
                            +    Steps:
                            +        1. Stop the VM was created and not stopped
                            +    Exepected Result:
                            +        - VM's status should be changed to `Stopped`
                            +        - VM's `RunStrategy` should be changed to `Halted`
                            +    '''
                            +    code, data = api_client.vms.stop(unique_vm_name)
                            +    assert 204 == code, "`Stop` return unexpected status code"
                            +
                            +    endtime = datetime.now() + timedelta(seconds=wait_timeout)
                            +    while endtime > datetime.now():
                            +        code, data = api_client.vms.get_status(unique_vm_name)
                            +        if 404 == code:
                            +            break
                            +        sleep(3)
                            +    else:
                            +        raise AssertionError(
                            +            f"Failed to Stop VM({unique_vm_name}) with errors:\n"
                            +            f"Status({code}): {data}"
                            +        )
                            +
                            +    code, data = api_client.vms.get(unique_vm_name)
                            +    assert "Halted" == data['spec']['runStrategy']
                            +    assert "Stopped" == data['status']['printableStatus']
                            +

                            Steps

                            1. Stop the VM was created and not stopped @@ -1351,6 +2864,35 @@

                              Steps

                              def test_unpause(self, api_client, unique_vm_name, wait_timeout)
                              +
                              + +Expand source code + +
                              @pytest.mark.dependency(depends=["pause_vm"])
                              +def test_unpause(self, api_client, unique_vm_name, wait_timeout):
                              +    '''
                              +    Steps:
                              +        1. Unpause the VM was paused
                              +    Exepected Result:
                              +        - VM's status should not be `Paused`
                              +    '''
                              +    code, data = api_client.vms.unpause(unique_vm_name)
                              +    assert 204 == code, "`Unpause` return unexpected status code"
                              +
                              +    endtime = datetime.now() + timedelta(seconds=wait_timeout)
                              +    while endtime > datetime.now():
                              +        code, data = api_client.vms.get_status(unique_vm_name)
                              +        cond_types = set(c['type'] for c in data['status'].get('conditions', []))
                              +        if {"AgentConnected"} & cond_types and not {"Paused"} & cond_types:
                              +            break
                              +        sleep(3)
                              +    else:
                              +        raise AssertionError(
                              +            f"Failed to unpause VM({unique_vm_name}) with errors:\n"
                              +            f"VM Status: {data['status']}\n"
                              +            f"API Status({code}): {data}"
                              +        )
                              +

                              Steps

                              1. Unpause the VM was paused @@ -1366,7 +2908,6 @@

                                Steps

                                class TestVMResource
                                -
                                Expand source code @@ -1670,6 +3211,7 @@

                                Steps

                                vol_name = vol['volume']['persistentVolumeClaim']['claimName'] api_client.volumes.delete(vol_name)
                                +

                                Class variables

                                var pytestmark
                                @@ -1680,33 +3222,349 @@

                                Class variables

                                Methods

                                -def test_create_schedule_on_maximum(self, api_client, unique_vm_name, vm_checker, vm_calc, image, unset_cpu_memory_overcommit, res_type) +def test_create_schedule_on_maximum(self,
                                api_client,
                                unique_vm_name,
                                vm_checker,
                                vm_calc,
                                image,
                                unset_cpu_memory_overcommit,
                                res_type)
                                +
                                + +Expand source code + +
                                @pytest.mark.parametrize("res_type", ["cpu", "memory"])
                                +def test_create_schedule_on_maximum(
                                +    self, api_client, unique_vm_name, vm_checker, vm_calc, image,
                                +    unset_cpu_memory_overcommit, res_type
                                +):
                                +    # get the node having the maximum resource
                                +    code, data = api_client.hosts.get()
                                +    nodes_res = [(n['metadata']['name'], vm_calc.node_resources(n)['schedulable'])
                                +                 for n in data['data']]
                                +    nodes_res = sorted(nodes_res, key=lambda n: n[1][res_type], reverse=True)
                                +    expected_host, expected_res = next((cn, nd[res_type] + (cd[res_type] - nd[res_type]) / 2)
                                +                                       for (cn, cd), (nn, nd) in zip(nodes_res, nodes_res[1:]))
                                +
                                +    # Calculate the maximum resource
                                +    vm_spec, namespace = api_client.vms.Spec(1, 2), 'default'
                                +    vm_spec.add_image("disk-0", image['id'])
                                +    data = vm_spec.to_dict(unique_vm_name, namespace)
                                +
                                +    if res_type == 'cpu':
                                +        exp = -2
                                +        data['spec']['template']['spec']['domain'][res_type]['cores'] = int(expected_res) + 1
                                +    else:
                                +        exp = 2
                                +
                                +    for k, resource in data['spec']['template']['spec']['domain']['resources'].items():
                                +        resource[res_type] = vm_calc.format_unit(expected_res, max_exp=exp, suffix_space=False)
                                +
                                +    try:
                                +        # Create VM then verify it
                                +        code, data = api_client.vms.create(unique_vm_name, data, namespace)
                                +        assert 201 == code, (code, data)
                                +        vm_started, (code, vmi) = vm_checker.wait_started(unique_vm_name)
                                +        assert vm_started, (code, vmi)
                                +
                                +        code, hosts = api_client.hosts.get()
                                +        cur_res = [(n['metadata']['name'], vm_calc.node_resources(n)['schedulable'])
                                +                   for n in hosts['data']]
                                +        schedulables = [(name, res) for (name, res) in cur_res if res[res_type] > expected_res]
                                +        if not schedulables:
                                +            # General case
                                +            assert expected_host == vmi['status']['nodeName'], (
                                +                f"VM started but not hosted on expected host: {expected_host}"
                                +            )
                                +        else:
                                +            # ???: node's resources released while creating VM
                                +            expected_conditions = [
                                +                # VM still be hosted on expected host
                                +                expected_host == vmi['status']['nodeName'],
                                +                # VM be hosted on others
                                +                expected_host in [name for name, _ in schedulables],
                                +            ]
                                +            assert any(expected_conditions), (
                                +                f"VM started on another host {vmi['status']['nodeName']},"
                                +                f" and the resource of expected host {expected_host!r} be updated."
                                +            )
                                +    finally:
                                +        # We must delete the VM to release node's resources
                                +        _ = vm_checker.wait_deleted(unique_vm_name, namespace=namespace)
                                +        for vol in data['spec']['template']['spec']['volumes']:
                                +            if 'persistentVolumeClaim' in vol:
                                +                api_client.volumes.delete(vol['persistentVolumeClaim']['claimName'])
                                +
                                def test_update_cpu(self, api_client, ssh_keypair, vm_shell_from_host, vm_checker, stopped_vm)
                                +
                                + +Expand source code + +
                                def test_update_cpu(
                                +    self, api_client, ssh_keypair, vm_shell_from_host, vm_checker,
                                +    stopped_vm
                                +):
                                +    unique_vm_name, ssh_user = stopped_vm
                                +    pub_key, pri_key = ssh_keypair
                                +
                                +    code, data = api_client.vms.start(unique_vm_name)
                                +    assert 204 == code, (code, data)
                                +    vm_got_ips, (code, data) = vm_checker.wait_ip_addresses(unique_vm_name, ["default"])
                                +    assert vm_got_ips, (
                                +        f"Failed to Start VM({unique_vm_name}) with errors:\n"
                                +        f"Status: {data.get('status')}\n"
                                +        f"API Status({code}): {data}"
                                +    )
                                +
                                +    # Login to VM to check current CPU counts
                                +    vm_ip = next(iface['ipAddress'] for iface in data['status']['interfaces']
                                +                 if iface['name'] == 'default')
                                +    code, data = api_client.hosts.get(data['status']['nodeName'])
                                +    host_ip = next(addr['address'] for addr in data['status']['addresses']
                                +                   if addr['type'] == 'InternalIP')
                                +    with vm_shell_from_host(host_ip, vm_ip, ssh_user, pkey=pri_key) as sh:
                                +        cloud_inited, (out, err) = vm_checker.wait_cloudinit_done(sh)
                                +        assert cloud_inited, (
                                +            f"VM {unique_vm_name} Started {vm_checker.wait_timeout} seconds"
                                +            f", but cloud-init still in {out}"
                                +        )
                                +
                                +        out, err = sh.exec_command("lscpu -e=cpu | tail -1")
                                +        out = out.strip()
                                +    assert out.isdigit() and not err, (
                                +        f"Failed to list cpu amount, output: {out}\nerror: {err}"
                                +    )
                                +    new_cpus = (int(out) + 1) + 1  # (shift to 1) + 1
                                +
                                +    # Update CPUs
                                +    code, data = api_client.vms.get(unique_vm_name)
                                +    assert 200 == code, (code, data)
                                +    spec = api_client.vms.Spec.from_dict(data)
                                +    spec.cpu_cores = new_cpus
                                +    code, data = api_client.vms.update(unique_vm_name, spec)
                                +    assert 200 == code, (code, data)
                                +    vm_restarted, ctx = vm_checker.wait_restarted(unique_vm_name)
                                +    assert vm_restarted, (
                                +        f"Failed to Restart VM({unique_vm_name}),"
                                +        f" timed out while executing {ctx.callee!r}"
                                +    )
                                +    vm_got_ips, (code, data) = vm_checker.wait_ip_addresses(unique_vm_name, ["default"])
                                +    assert vm_got_ips, (
                                +        f"Failed to Start VM({unique_vm_name}) with errors:\n"
                                +        f"Status: {data.get('status')}\n"
                                +        f"API Status({code}): {data}"
                                +    )
                                +
                                +    # Login to VM to check updated CPUs
                                +    vm_ip = next(iface['ipAddress'] for iface in data['status']['interfaces']
                                +                 if iface['name'] == 'default')
                                +    code, data = api_client.hosts.get(data['status']['nodeName'])
                                +    host_ip = next(addr['address'] for addr in data['status']['addresses']
                                +                   if addr['type'] == 'InternalIP')
                                +    with vm_shell_from_host(host_ip, vm_ip, ssh_user, pkey=pri_key) as sh:
                                +        cloud_inited, (out, err) = vm_checker.wait_cloudinit_done(sh)
                                +        assert cloud_inited, (
                                +            f"VM {unique_vm_name} Started {vm_checker.wait_timeout} seconds"
                                +            f", but cloud-init still in {out}"
                                +        )
                                +
                                +        out, err = sh.exec_command("lscpu -e=cpu | tail -1")
                                +        out = out.strip()
                                +    assert out.isdigit() and not err, (
                                +        f"Failed to list cpu amount, output: {out}\nerror: {err}"
                                +    )
                                +    assert new_cpus == int(out) + 1, (
                                +        f"Failed to update CPU to {new_cpus}, it still be {out}"
                                +    )
                                +
                                +    # stop the VM
                                +    vm_checker.wait_stopped(unique_vm_name)
                                +
                                def test_update_enable_usb(self, api_client, unique_vm_name, vm_checker, image)
                                +
                                + +Expand source code + +
                                def test_update_enable_usb(self, api_client, unique_vm_name, vm_checker, image):
                                +    cpu, mem = 1, 2
                                +    vm_spec = api_client.vms.Spec(cpu, mem)
                                +    vm_spec.add_image("disk-0", image['id'])
                                +    vm_spec.run_strategy = "Halted"
                                +    vm_spec.usbtablet = False
                                +
                                +    # Create a stopped VM without usbtablet
                                +    code, data = api_client.vms.create(unique_vm_name, vm_spec)
                                +    assert 201 == code, (code, data)
                                +    with vm_checker.configure(snooze=1):
                                +        vm_show_stopped, (code, data) = vm_checker.wait_status_stopped(unique_vm_name)
                                +    assert vm_show_stopped, (code, data)
                                +
                                +    # update VM to enable usbtablet and start
                                +    code, data = api_client.vms.get(unique_vm_name)
                                +    vm_spec = api_client.vms.Spec.from_dict(data)
                                +    vm_spec.usbtablet = True
                                +    code, data = api_client.vms.update(unique_vm_name, vm_spec)
                                +    assert 200 == code, (code, data)
                                +    vm_started, (code, data) = vm_checker.wait_started(unique_vm_name)
                                +    assert vm_started, (
                                +        f"Failed to Start VM({unique_vm_name}) with errors:\n"
                                +        f"Status: {data.get('status')}\n"
                                +        f"API Status({code}): {data}"
                                +    )
                                +
                                +    devices = data['spec']['domain']['devices']
                                +    assert 'inputs' in devices
                                +    assert 'usb' in devices['inputs'][0]['bus']
                                +    assert 'tablet' in devices['inputs'][0]['name']
                                +    assert 'tablet' in devices['inputs'][0]['type']
                                +
                                +    # teardown: remove the VM
                                +    _ = vm_checker.wait_deleted(unique_vm_name)
                                +    for vol in vm_spec.volumes:
                                +        vol_name = vol['volume']['persistentVolumeClaim']['claimName']
                                +        api_client.volumes.delete(vol_name)
                                +
                                def test_update_enable_user_data(self, api_client, unique_vm_name, vm_checker, image)
                                +
                                + +Expand source code + +
                                def test_update_enable_user_data(self, api_client, unique_vm_name, vm_checker, image):
                                +    cpu, mem = 1, 2
                                +    vm_spec = api_client.vms.Spec(cpu, mem)
                                +    vm_spec.add_image("disk-0", image['id'])
                                +    vm_spec.run_strategy = "Halted"
                                +    vm_spec.guest_agent = False
                                +
                                +    # Create a stopped VM without user data (guest agent)
                                +    code, data = api_client.vms.create(unique_vm_name, vm_spec)
                                +    assert 201 == code, (code, data)
                                +    with vm_checker.configure(snooze=1):
                                +        vm_show_stopped, (code, data) = vm_checker.wait_status_stopped(unique_vm_name)
                                +    assert vm_show_stopped, (code, data)
                                +
                                +    # update VM to add guest agent and start
                                +    code, data = api_client.vms.get(unique_vm_name)
                                +    vm_spec = api_client.vms.Spec.from_dict(data)
                                +    vm_spec.guest_agent = True
                                +    code, data = api_client.vms.update(unique_vm_name, vm_spec)
                                +    assert 200 == code, (code, data)
                                +    vm_started, (code, data) = vm_checker.wait_started(unique_vm_name)
                                +    assert vm_started, (
                                +        f"Failed to Start VM({unique_vm_name}) with errors:\n"
                                +        f"Status: {data.get('status')}\n"
                                +        f"API Status({code}): {data}"
                                +    )
                                +
                                +    cloudinit = next(d for d in data['spec']['volumes'] if 'cloudInitNoCloud' in d)
                                +    user_data = yaml.safe_load(cloudinit['cloudInitNoCloud']['userData'])
                                +
                                +    assert 'packages' in user_data
                                +    assert 'runcmd' in user_data
                                +    assert 'qemu-guest-agent' in user_data['packages']
                                +    assert 'systemctl' in user_data['runcmd'][0]
                                +    assert 'enable' in user_data['runcmd'][0]
                                +    assert '--now' in user_data['runcmd'][0]
                                +    assert 'qemu-guest-agent.service' in user_data['runcmd'][0]
                                +
                                +    # teardown: remove the VM
                                +    _ = vm_checker.wait_deleted(unique_vm_name)
                                +    for vol in vm_spec.volumes:
                                +        vol_name = vol['volume']['persistentVolumeClaim']['claimName']
                                +        api_client.volumes.delete(vol_name)
                                +
                                -def test_update_schedule_on_maximum(self, api_client, vm_checker, vm_calc, stopped_vm, unset_cpu_memory_overcommit, res_type) +def test_update_schedule_on_maximum(self,
                                api_client,
                                vm_checker,
                                vm_calc,
                                stopped_vm,
                                unset_cpu_memory_overcommit,
                                res_type)
                                +
                                + +Expand source code + +
                                @pytest.mark.parametrize("res_type", ["cpu", "memory"])
                                +def test_update_schedule_on_maximum(
                                +    self, api_client, vm_checker, vm_calc, stopped_vm, unset_cpu_memory_overcommit, res_type
                                +):
                                +    unique_vm_name, ssh_user = stopped_vm
                                +
                                +    # make sure VM stopped and configure as minimal resource
                                +    vm_stopped, (code, data) = vm_checker.wait_stopped(unique_vm_name)
                                +    assert vm_stopped, (code, data)
                                +    code, data = api_client.vms.get(unique_vm_name)
                                +    vm_spec = api_client.vms.Spec.from_dict(data)
                                +    vm_spec.cpu_cores, vm_spec.memory = 1, 2
                                +    code, data = api_client.vms.update(unique_vm_name, vm_spec)
                                +    assert 200 == code, (code, data)
                                +
                                +    # get the node having the maximum resource
                                +    code, data = api_client.hosts.get()
                                +    nodes_res = [(n['metadata']['name'], vm_calc.node_resources(n)['schedulable'])
                                +                 for n in data['data']]
                                +    nodes_res = sorted(nodes_res, key=lambda n: n[1][res_type], reverse=True)
                                +    expected_host, expected_res = next((cn, nd[res_type] + (cd[res_type] - nd[res_type]) / 2)
                                +                                       for (cn, cd), (nn, nd) in zip(nodes_res, nodes_res[1:]))
                                +
                                +    # update VM to target resource and start it
                                +    code, data = api_client.vms.get(unique_vm_name)
                                +    if res_type == 'cpu':
                                +        exp = -2
                                +        data['spec']['template']['spec']['domain'][res_type]['cores'] = int(expected_res) + 1
                                +    else:
                                +        exp = 2
                                +
                                +    for k, resource in data['spec']['template']['spec']['domain']['resources'].items():
                                +        resource[res_type] = vm_calc.format_unit(expected_res, max_exp=exp, suffix_space=False)
                                +
                                +    try:
                                +        code, data = api_client.vms.update(unique_vm_name, data)
                                +        assert 200 == code, (code, data)
                                +        vm_started, (code, vmi) = vm_checker.wait_started(unique_vm_name)
                                +        assert vm_started, (code, vmi)
                                +
                                +        # Verify the VM be hosted expected
                                +        code, hosts = api_client.hosts.get()
                                +        cur_res = [(n['metadata']['name'], vm_calc.node_resources(n)['schedulable'])
                                +                   for n in hosts['data']]
                                +        schedulables = [(name, res) for (name, res) in cur_res if res[res_type] > expected_res]
                                +        if not schedulables:
                                +            # General case
                                +            assert expected_host == vmi['status']['nodeName'], (
                                +                f"VM started but not hosted on expected host: {expected_host}"
                                +            )
                                +        else:
                                +            # ???: node's resources released while creating VM
                                +            expected_conditions = [
                                +                # VM still be hosted on expected host
                                +                expected_host == vmi['status']['nodeName'],
                                +                # VM be hosted on others
                                +                expected_host in [name for name, _ in schedulables],
                                +            ]
                                +            assert any(expected_conditions), (
                                +                f"VM started on another host {vmi['status']['nodeName']},"
                                +                f" and the resource of expected host {expected_host!r} be updated."
                                +            )
                                +    finally:
                                +        # Stop the VM
                                +        vm_checker.wait_stopped(unique_vm_name)
                                +        # Revert the VM to request minimal resource
                                +        code, data = api_client.vms.get(unique_vm_name)
                                +        vm_spec._data = data
                                +        code, data = api_client.vms.update(unique_vm_name, vm_spec)
                                +        assert 200 == code, (code, data)
                                +
                                @@ -1715,7 +3573,6 @@

                                Methods

                                class TestVMWithVolumes
                                -
                                Expand source code @@ -1987,6 +3844,7 @@

                                Methods

                                api_client.volumes.delete(vol_name)
                                +

                                Class variables

                                var pytestmark
                                @@ -2000,6 +3858,123 @@

                                Methods

                                def test_create_with_existing_volume(self, api_client, ssh_keypair, vm_checker, wait_timeout, host_shell, vm_shell, stopped_vm)
                                +
                                + +Expand source code + +
                                def test_create_with_existing_volume(
                                +    self, api_client, ssh_keypair, vm_checker, wait_timeout, host_shell, vm_shell, stopped_vm
                                +):
                                +    """
                                +    To cover test:
                                +    - https://harvester.github.io/tests/manual/virtual-machines/create-vm-with-existing-volume/ # noqa
                                +
                                +    Steps:
                                +        1. Create a data volume
                                +        2. Create a VM with 1 CPU 2 Memory and the existing data volume
                                +        3. Start the VM
                                +        4. Verify the VM
                                +
                                +    Exepected Result:
                                +        - VM should able to start and becomes `Running`
                                +        - Disk volume should be available in the VM
                                +        - Disk size in VM should be the same as its volume configured
                                +    """
                                +    unique_vm_name, ssh_user = stopped_vm
                                +    pub_key, pri_key = ssh_keypair
                                +
                                +    vol_name, size = 'disk-existing', 3
                                +    vol_spec = api_client.volumes.Spec(size)
                                +    code, data = api_client.volumes.create(f"{unique_vm_name}-{vol_name}", vol_spec)
                                +
                                +    assert 201 == code, (code, data)
                                +
                                +    code, data = api_client.vms.get(unique_vm_name)
                                +    vm_spec = api_client.vms.Spec.from_dict(data)
                                +    vm_spec.run_strategy = "RerunOnFailure"
                                +    vm_spec.add_existing_volume(vol_name, f"{unique_vm_name}-{vol_name}")
                                +
                                +    # Start VM with added existing volume
                                +    code, data = api_client.vms.update(unique_vm_name, vm_spec)
                                +    vm_got_ips, (code, data) = vm_checker.wait_ip_addresses(unique_vm_name, ['default'])
                                +    assert vm_got_ips, (
                                +        f"Failed to Start VM({unique_vm_name}) with errors:\n"
                                +        f"Status: {data.get('status')}\n"
                                +        f"API Status({code}): {data}"
                                +    )
                                +
                                +    # Log into VM to verify added volumes
                                +    vm_ip = next(iface['ipAddress'] for iface in data['status']['interfaces']
                                +                 if iface['name'] == 'default')
                                +    code, data = api_client.hosts.get(data['status']['nodeName'])
                                +    host_ip = next(addr['address'] for addr in data['status']['addresses']
                                +                   if addr['type'] == 'InternalIP')
                                +
                                +    with host_shell.login(host_ip, jumphost=True) as h:
                                +        vm_sh = vm_shell(ssh_user, pkey=pri_key)
                                +        endtime = datetime.now() + timedelta(seconds=wait_timeout)
                                +        while endtime > datetime.now():
                                +            try:
                                +                vm_sh.connect(vm_ip, jumphost=h.client)
                                +            except ChannelException as e:
                                +                login_ex = e
                                +                sleep(3)
                                +            else:
                                +                break
                                +        else:
                                +            raise AssertionError(f"Unable to login to VM {unique_vm_name}") from login_ex
                                +
                                +        with vm_sh as sh:
                                +            endtime = datetime.now() + timedelta(seconds=wait_timeout)
                                +            while endtime > datetime.now():
                                +                out, err = sh.exec_command('cloud-init status')
                                +                if 'done' in out:
                                +                    break
                                +                sleep(3)
                                +            else:
                                +                raise AssertionError(
                                +                    f"VM {unique_vm_name} Started {wait_timeout} seconds"
                                +                    f", but cloud-init still in {out}"
                                +                )
                                +            out, err = sh.exec_command("lsblk -r")
                                +            assert not err, (out, err)
                                +
                                +    assert 1 + len(vm_spec.volumes) == len(re.findall('disk', out)), (
                                +        f"Added Volumes amount is not correct.\n"
                                +        f"lsblk output: {out}"
                                +    )
                                +
                                +    assert f"{size}G 0 disk" in out, (
                                +        f"existing Volume {size}G not found\n"
                                +        f"lsblk output: {out}"
                                +    )
                                +
                                +    # Tear down: Stop VM and remove added volumes
                                +    code, data = api_client.vms.get(unique_vm_name)
                                +    vm_spec = api_client.vms.Spec.from_dict(data)
                                +    vm_spec.run_strategy = "Halted"
                                +    vols, claims = [], []
                                +    for vd in vm_spec.volumes:
                                +        if vd['disk']['name'] == vol_name:
                                +            claims.append(vd['volume']['persistentVolumeClaim']['claimName'])
                                +        else:
                                +            vols.append(vd)
                                +    else:
                                +        vm_spec.volumes = vols
                                +
                                +    api_client.vms.update(unique_vm_name, vm_spec)
                                +    endtime = datetime.now() + timedelta(seconds=wait_timeout)
                                +    while endtime > datetime.now():
                                +        code, data = api_client.vms.get(unique_vm_name)
                                +        if (code == 200
                                +                and 'Halted' == data['spec']['runStrategy']
                                +                and 'Stopped' == data.get('status', {}).get('printableStatus')):
                                +            break
                                +        sleep(3)
                                +
                                +    for claim in claims:
                                +        api_client.volumes.delete(claim)
                                +

                                To cover test: - https://harvester.github.io/tests/manual/virtual-machines/create-vm-with-existing-volume/ # noqa

                                Steps

                                @@ -2018,6 +3993,122 @@

                                Steps

                                def test_create_with_two_volumes(self, api_client, ssh_keypair, vm_checker, wait_timeout, host_shell, vm_shell, stopped_vm)
                                +
                                + +Expand source code + +
                                def test_create_with_two_volumes(
                                +    self, api_client, ssh_keypair, vm_checker, wait_timeout, host_shell, vm_shell, stopped_vm
                                +):
                                +    """
                                +    To cover test:
                                +    - https://harvester.github.io/tests/manual/virtual-machines/create-vm-with-two-disk-volumes/ # noqa
                                +
                                +    Steps:
                                +        1. Create a VM with 1 CPU 2 Memory and 2 disk volumes
                                +        2. Start the VM
                                +        3. Verify the VM
                                +
                                +    Exepected Result:
                                +        - VM should able to start and becomes `Running`
                                +        - 2 disk volumes should be available in the VM
                                +        - Disk size in VM should be the same as its volume configured
                                +    """
                                +    unique_vm_name, ssh_user = stopped_vm
                                +    pub_key, pri_key = ssh_keypair
                                +    code, data = api_client.vms.get(unique_vm_name)
                                +    vm_spec = api_client.vms.Spec.from_dict(data)
                                +    vm_spec.run_strategy = "RerunOnFailure"
                                +    volumes = [('disk-1', 1), ('disk-2', 2)]
                                +    for name, size in volumes:
                                +        vm_spec.add_volume(name, size)
                                +
                                +    # Start VM with 2 additional volumes
                                +    code, data = api_client.vms.update(unique_vm_name, vm_spec)
                                +    assert 200 == code, (code, data)
                                +    vm_got_ips, (code, data) = vm_checker.wait_ip_addresses(unique_vm_name, ['default'])
                                +    assert vm_got_ips, (
                                +        f"Failed to Start VM({unique_vm_name}) with errors:\n"
                                +        f"Status: {data.get('status')}\n"
                                +        f"API Status({code}): {data}"
                                +    )
                                +
                                +    # Log into VM to verify added volumes
                                +    vm_ip = next(iface['ipAddress'] for iface in data['status']['interfaces']
                                +                 if iface['name'] == 'default')
                                +    code, data = api_client.hosts.get(data['status']['nodeName'])
                                +    host_ip = next(addr['address'] for addr in data['status']['addresses']
                                +                   if addr['type'] == 'InternalIP')
                                +
                                +    with host_shell.login(host_ip, jumphost=True) as h:
                                +        vm_sh = vm_shell(ssh_user, pkey=pri_key)
                                +        endtime = datetime.now() + timedelta(seconds=wait_timeout)
                                +        while endtime > datetime.now():
                                +            try:
                                +                vm_sh.connect(vm_ip, jumphost=h.client)
                                +            except ChannelException as e:
                                +                login_ex = e
                                +                sleep(3)
                                +            else:
                                +                break
                                +        else:
                                +            raise AssertionError(f"Unable to login to VM {unique_vm_name}") from login_ex
                                +
                                +        with vm_sh as sh:
                                +            endtime = datetime.now() + timedelta(seconds=wait_timeout)
                                +            while endtime > datetime.now():
                                +                out, err = sh.exec_command('cloud-init status')
                                +                if 'done' in out:
                                +                    break
                                +                sleep(3)
                                +            else:
                                +                raise AssertionError(
                                +                    f"VM {unique_vm_name} Started {wait_timeout} seconds"
                                +                    f", but cloud-init still in {out}"
                                +                )
                                +            out, err = sh.exec_command("lsblk -r")
                                +            assert not err, (out, err)
                                +
                                +    assert 1 + len(vm_spec.volumes) == len(re.findall('disk', out)), (
                                +        f"Added Volumes amount is not correct.\n"
                                +        f"lsblk output: {out}"
                                +    )
                                +    fails = []
                                +    for _, size in volumes:
                                +        if not re.search(f"vd.*{size}G 0 disk", out):
                                +            fails.append(f"Volume size {size}G not found")
                                +
                                +    assert not fails, (
                                +        f"lsblk output: {out}\n"
                                +        "\n".join(fails)
                                +    )
                                +
                                +    # Tear down: Stop VM and remove added volumes
                                +    code, data = api_client.vms.get(unique_vm_name)
                                +    vm_spec = api_client.vms.Spec.from_dict(data)
                                +    vm_spec.run_strategy = "Halted"
                                +    vol_names, vols, claims = [n for n, s in volumes], [], []
                                +    for vd in vm_spec.volumes:
                                +        if vd['disk']['name'] in vol_names:
                                +            claims.append(vd['volume']['persistentVolumeClaim']['claimName'])
                                +        else:
                                +            vols.append(vd)
                                +    else:
                                +        vm_spec.volumes = vols
                                +
                                +    api_client.vms.update(unique_vm_name, vm_spec)
                                +    endtime = datetime.now() + timedelta(seconds=wait_timeout)
                                +    while endtime > datetime.now():
                                +        code, data = api_client.vms.get(unique_vm_name)
                                +        if (code == 200
                                +                and 'Halted' == data['spec']['runStrategy']
                                +                and 'Stopped' == data.get('status', {}).get('printableStatus')):
                                +            break
                                +        sleep(3)
                                +
                                +    for vol_name in claims:
                                +        api_client.volumes.delete(vol_name)
                                +

                                To cover test: - https://harvester.github.io/tests/manual/virtual-machines/create-vm-with-two-disk-volumes/ # noqa

                                Steps

                                @@ -2032,9 +4123,52 @@

                                Steps

                                - Disk size in VM should be the same as its volume configured

                                -def test_create_with_volume_image(self, api_client, ssh_keypair, vm_checker, wait_timeout, host_shell, vm_shell, unique_name, image) +def test_create_with_volume_image(self,
                                api_client,
                                ssh_keypair,
                                vm_checker,
                                wait_timeout,
                                host_shell,
                                vm_shell,
                                unique_name,
                                image)
                                +
                                + +Expand source code + +
                                def test_create_with_volume_image(
                                +    self, api_client, ssh_keypair, vm_checker, wait_timeout, host_shell, vm_shell, unique_name,
                                +    image
                                +):
                                +    # Create the volume from image
                                +    vol_name, size = f"vm-image-vol-{unique_name}", 10
                                +    vol_spec = api_client.volumes.Spec(size)
                                +    code, data = api_client.volumes.create(vol_name, vol_spec, image_id=image['id'])
                                +    assert 201 == code, (code, data)
                                +
                                +    # Create VM using the image volume
                                +    cpu, mem, unique_vm_name = 1, 2, vol_name
                                +    pub_key, pri_key = ssh_keypair
                                +    vm_spec = api_client.vms.Spec(cpu, mem)
                                +    vm_spec.add_existing_volume("disk-0", vol_name)
                                +    userdata = yaml.safe_load(vm_spec.user_data)
                                +    userdata['ssh_authorized_keys'] = [pub_key]
                                +    vm_spec.user_data = yaml.dump(userdata)
                                +
                                +    code, data = api_client.vms.create(unique_vm_name, vm_spec)
                                +    # Verify the VM will got IP address
                                +    vm_got_ips, (code, data) = vm_checker.wait_ip_addresses(unique_vm_name, ["default"])
                                +    assert vm_got_ips, (
                                +        f"Failed to Start VM({unique_vm_name}) with errors:\n"
                                +        f"Status: {data.get('status')}\n"
                                +        f"API Status({code}): {data}"
                                +    )
                                +
                                +    # Teardown: delete the VM and volume
                                +    api_client.vms.delete(unique_vm_name)
                                +    endtime = datetime.now() + timedelta(seconds=wait_timeout)
                                +    while endtime > datetime.now():
                                +        code, data = api_client.vms.get_status(unique_vm_name)
                                +        if 404 == code:
                                +            break
                                +        sleep(3)
                                +
                                +    api_client.volumes.delete(vol_name)
                                +
                                @@ -2128,7 +4262,7 @@

                                -

                                Generated by pdoc 0.11.1.

                                +

                                Generated by pdoc 0.11.5.

                                diff --git a/backend/integrations/test_4_vm_backup_restore.html b/backend/integrations/test_4_vm_backup_restore.html index c10529e53..9e9e96671 100644 --- a/backend/integrations/test_4_vm_backup_restore.html +++ b/backend/integrations/test_4_vm_backup_restore.html @@ -3,19 +3,30 @@ - + harvester_e2e_tests.integrations.test_4_vm_backup_restore API documentation - + @@ -37,54 +48,365 @@

                                Functions

                                def NFS_config(request)
                                +
                                + +Expand source code + +
                                @pytest.fixture(scope='module')
                                +def NFS_config(request):
                                +    nfs_endpoint = request.config.getoption('--nfs-endpoint')
                                +
                                +    assert nfs_endpoint, f"NFS endpoint not configured: {nfs_endpoint}"
                                +    assert nfs_endpoint.startswith("nfs://"), (
                                +        f"NFS endpoint should starts with `nfs://`, not {nfs_endpoint}"
                                +    )
                                +
                                +    return ("NFS", dict(endpoint=nfs_endpoint))
                                +
                                def S3_config(request)
                                +
                                + +Expand source code + +
                                @pytest.fixture(scope='module')
                                +def S3_config(request):
                                +    config = {
                                +        "bucket": request.config.getoption('--bucketName'),
                                +        "region": request.config.getoption('--region'),
                                +        "access_id": request.config.getoption('--accessKeyId'),
                                +        "access_secret": request.config.getoption('--secretAccessKey')
                                +    }
                                +
                                +    empty_options = ', '.join(k for k, v in config.items() if not v)
                                +    assert not empty_options, (
                                +        f"S3 configuration missing, `{empty_options}` should not be empty."
                                +    )
                                +
                                +    config['endpoint'] = request.config.getoption('--s3-endpoint')
                                +
                                +    return ("S3", config)
                                +
                                def backup_config(request)
                                +
                                + +Expand source code + +
                                @pytest.fixture(scope="class")
                                +def backup_config(request):
                                +    return request.getfixturevalue(f"{request.param}_config")
                                +
                                def base_vm(api_client, ssh_keypair, unique_name, vm_checker, image, backup_config)
                                +
                                + +Expand source code + +
                                @pytest.fixture(scope="class")
                                +def base_vm(api_client, ssh_keypair, unique_name, vm_checker, image, backup_config):
                                +    unique_vm_name = f"{datetime.now().strftime('%m%S%f')}-{unique_name}"
                                +    cpu, mem = 1, 2
                                +    pub_key, pri_key = ssh_keypair
                                +    vm_spec = api_client.vms.Spec(cpu, mem)
                                +    vm_spec.add_image("disk-0", image['id'])
                                +
                                +    userdata = yaml.safe_load(vm_spec.user_data)
                                +    userdata['ssh_authorized_keys'] = [pub_key]
                                +    userdata['password'] = 'password'
                                +    userdata['chpasswd'] = dict(expire=False)
                                +    userdata['sshpwauth'] = True
                                +    vm_spec.user_data = yaml.dump(userdata)
                                +    code, data = api_client.vms.create(unique_vm_name, vm_spec)
                                +
                                +    # Check VM started and get IPs (vm and host)
                                +    vm_got_ips, (code, data) = vm_checker.wait_ip_addresses(unique_vm_name, ['default'])
                                +    assert vm_got_ips, (
                                +        f"Failed to Start VM({unique_vm_name}) with errors:\n"
                                +        f"Status: {data.get('status')}\n"
                                +        f"API Status({code}): {data}"
                                +    )
                                +    vm_ip = next(iface['ipAddress'] for iface in data['status']['interfaces']
                                +                 if iface['name'] == 'default')
                                +    code, data = api_client.hosts.get(data['status']['nodeName'])
                                +    host_ip = next(addr['address'] for addr in data['status']['addresses']
                                +                   if addr['type'] == 'InternalIP')
                                +    yield {
                                +        "name": unique_vm_name,
                                +        "host_ip": host_ip,
                                +        "vm_ip": vm_ip,
                                +        "ssh_user": image['user'],
                                +    }
                                +
                                +    # remove created VM
                                +    code, data = api_client.vms.get(unique_vm_name)
                                +    vm_spec = api_client.vms.Spec.from_dict(data)
                                +    vm_deleted, (code, data) = vm_checker.wait_deleted(unique_vm_name)
                                +
                                +    for vol in vm_spec.volumes:
                                +        vol_name = vol['volume']['persistentVolumeClaim']['claimName']
                                +        api_client.volumes.delete(vol_name)
                                +
                                def base_vm_migrated(api_client, vm_checker, backup_config, base_vm)
                                +
                                + +Expand source code + +
                                @pytest.fixture(scope="class")
                                +def base_vm_migrated(api_client, vm_checker, backup_config, base_vm):
                                +    unique_vm_name = base_vm['name']
                                +
                                +    code, host_data = api_client.hosts.get()
                                +    assert 200 == code, (code, host_data)
                                +    code, data = api_client.vms.get_status(unique_vm_name)
                                +    cur_host = data['status'].get('nodeName')
                                +    assert cur_host, (
                                +        f"VMI exists but `nodeName` is empty.\n"
                                +        f"{data}"
                                +    )
                                +
                                +    new_host = next(h['id'] for h in host_data['data']
                                +                    if cur_host != h['id'] and not h['spec'].get('taint'))
                                +
                                +    vm_migrated, (code, data) = vm_checker.wait_migrated(unique_vm_name, new_host)
                                +    assert vm_migrated, (
                                +        f"Failed to Migrate VM({unique_vm_name}) from {cur_host} to {new_host}\n"
                                +        f"API Status({code}): {data}"
                                +    )
                                +
                                +    # update for new IPs
                                +    vm_ip = next(iface['ipAddress'] for iface in data['status']['interfaces']
                                +                 if iface['name'] == 'default')
                                +    code, data = api_client.hosts.get(data['status']['nodeName'])
                                +    host_ip = next(addr['address'] for addr in data['status']['addresses']
                                +                   if addr['type'] == 'InternalIP')
                                +    base_vm['vm_ip'] = vm_ip
                                +    base_vm['host_ip'] = host_ip
                                +
                                +    return (cur_host, new_host)
                                +
                                -def base_vm_with_data(api_client, vm_shell_from_host, ssh_keypair, wait_timeout, vm_checker, backup_config, base_vm) +def base_vm_with_data(api_client,
                                vm_shell_from_host,
                                ssh_keypair,
                                wait_timeout,
                                vm_checker,
                                backup_config,
                                base_vm)
                                +
                                + +Expand source code + +
                                @pytest.fixture(scope="class")
                                +def base_vm_with_data(
                                +    api_client, vm_shell_from_host, ssh_keypair, wait_timeout, vm_checker, backup_config, base_vm
                                +):
                                +    pub_key, pri_key = ssh_keypair
                                +    unique_vm_name = base_vm['name']
                                +
                                +    # Log into VM to make some data
                                +    with vm_shell_from_host(
                                +        base_vm['host_ip'], base_vm['vm_ip'], base_vm['ssh_user'], pkey=pri_key
                                +    ) as sh:
                                +        cloud_inited, (out, err) = vm_checker.wait_cloudinit_done(sh)
                                +        assert cloud_inited, (
                                +            f"VM {unique_vm_name} Started {vm_checker.wait_timeout} seconds"
                                +            f", but cloud-init still in {out}"
                                +        )
                                +        out, err = sh.exec_command(f'echo {unique_vm_name!r} > ~/vmname')
                                +        assert not err, (out, err)
                                +        sh.exec_command('sync')
                                +
                                +    yield {
                                +        "name": unique_vm_name,
                                +        "host_ip": base_vm['host_ip'],
                                +        "vm_ip": base_vm['vm_ip'],
                                +        "ssh_user": base_vm['ssh_user'],
                                +        "data": dict(path="~/vmname", content=f'{unique_vm_name}')
                                +    }
                                +
                                +    # remove backups link to the VM and is ready
                                +    code, data = api_client.backups.get()
                                +
                                +    check_names = []
                                +    for backup in data['data']:
                                +        if (backup['status'].get('readyToUse') and
                                +                unique_vm_name == backup['spec']['source']['name']):
                                +            api_client.backups.delete(backup['metadata']['name'])
                                +            check_names.append(backup['metadata']['name'])
                                +
                                +    endtime = datetime.now() + timedelta(seconds=wait_timeout)
                                +    while endtime > datetime.now():
                                +        for name in check_names[:]:
                                +            code, data = api_client.backups.get(name)
                                +            if 404 == code:
                                +                check_names.remove(name)
                                +        if not check_names:
                                +            break
                                +        sleep(3)
                                +    else:
                                +        raise AssertionError(
                                +            f"Failed to delete backups: {check_names}\n"
                                +            f"Last API Status({code}): {data}"
                                +            )
                                +
                                def config_backup_target(api_client, conflict_retries, backup_config, wait_timeout)
                                +
                                + +Expand source code + +
                                @pytest.fixture(scope="class")
                                +def config_backup_target(api_client, conflict_retries, backup_config, wait_timeout):
                                +    backup_type, config = backup_config
                                +    code, data = api_client.settings.get('backup-target')
                                +    origin_spec = api_client.settings.BackupTargetSpec.from_dict(data)
                                +
                                +    spec = getattr(api_client.settings.BackupTargetSpec, backup_type)(**config)
                                +    # ???: when switching S3 -> NFS, update backup-target will easily hit resource conflict
                                +    # so we would need retries to apply the change.
                                +    for _ in range(conflict_retries):
                                +        code, data = api_client.settings.update('backup-target', spec)
                                +        if 409 == code and "Conflict" == data['reason']:
                                +            sleep(3)
                                +        else:
                                +            break
                                +    else:
                                +        raise AssertionError(
                                +            f"Unable to update backup-target after {conflict_retries} retried."
                                +            f"API Status({code}): {data}"
                                +        )
                                +    assert 200 == code, (
                                +        f'Failed to update backup target to {backup_type} with {config}\n'
                                +        f"API Status({code}): {data}"
                                +    )
                                +
                                +    yield spec
                                +
                                +    # remove unbound LH backupVolumes
                                +    code, data = api_client.lhbackupvolumes.get()
                                +    assert 200 == code, "Failed to list lhbackupvolumes"
                                +
                                +    check_names = []
                                +    for volume_data in data["items"]:
                                +        volume_name = volume_data["metadata"]["name"]
                                +        backup_name = volume_data["status"]["lastBackupName"]
                                +        if not backup_name:
                                +            api_client.lhbackupvolumes.delete(volume_name)
                                +            check_names.append(volume_name)
                                +
                                +    endtime = datetime.now() + timedelta(seconds=wait_timeout)
                                +    while endtime > datetime.now():
                                +        for name in check_names[:]:
                                +            code, data = api_client.lhbackupvolumes.get(name)
                                +            if 404 == code:
                                +                check_names.remove(name)
                                +        if not check_names:
                                +            break
                                +        sleep(3)
                                +    else:
                                +        raise AssertionError(
                                +            f"Failed to delete unbound lhbackupvolumes: {check_names}\n"
                                +            f"Last API Status({code}): {data}"
                                +            )
                                +
                                +    # restore to original backup-target and remove backups not belong to it
                                +    code, data = api_client.settings.update('backup-target', origin_spec)
                                +    code, data = api_client.backups.get()
                                +    assert 200 == code, "Failed to list backups"
                                +
                                +    check_names = []
                                +    for backup in data['data']:
                                +        endpoint = backup['status']['backupTarget'].get('endpoint')
                                +        if endpoint != origin_spec.value.get('endpoint'):
                                +            api_client.backups.delete(backup['metadata']['name'])
                                +            check_names.append(backup['metadata']['name'])
                                +
                                +    endtime = datetime.now() + timedelta(seconds=wait_timeout)
                                +    while endtime > datetime.now():
                                +        for name in check_names[:]:
                                +            code, data = api_client.backups.get(name)
                                +            if 404 == code:
                                +                check_names.remove(name)
                                +        if not check_names:
                                +            break
                                +        sleep(3)
                                +    else:
                                +        raise AssertionError(
                                +            f"Failed to delete backups: {check_names}\n"
                                +            f"Last API Status({code}): {data}"
                                +            )
                                +
                                def conflict_retries()
                                +
                                + +Expand source code + +
                                @pytest.fixture(scope="module")
                                +def conflict_retries():
                                +    # This might be able to moved to config options in need.
                                +    return 5
                                +
                                def image(api_client, unique_name, wait_timeout, image_opensuse)
                                +
                                + +Expand source code + +
                                @pytest.fixture(scope="module")
                                +def image(api_client, unique_name, wait_timeout, image_opensuse):
                                +    unique_image_id = f'image-{unique_name}'
                                +    code, data = api_client.images.create_by_url(
                                +        unique_image_id, image_opensuse.url, display_name=f"{unique_name}-{image_opensuse.name}"
                                +    )
                                +
                                +    assert 201 == code, (code, data)
                                +
                                +    endtime = datetime.now() + timedelta(seconds=wait_timeout)
                                +    while endtime > datetime.now():
                                +        code, data = api_client.images.get(unique_image_id)
                                +        if 100 == data.get('status', {}).get('progress', 0):
                                +            break
                                +        sleep(3)
                                +    else:
                                +        raise AssertionError(
                                +            "Failed to create Image with error:\n"
                                +            f"Status({code}): {data}"
                                +        )
                                +
                                +    yield dict(id=f"{data['metadata']['namespace']}/{unique_image_id}",
                                +               user=image_opensuse.ssh_user)
                                +
                                +    code, data = api_client.images.delete(unique_image_id)
                                +

                                @@ -96,7 +418,6 @@

                                Classes

                                class TestBackupRestore
                                -
                                Expand source code @@ -425,6 +746,7 @@

                                Classes

                                f"Executed stderr: {err}" )
                                +

                                Class variables

                                var pytestmark
                                @@ -438,24 +760,192 @@

                                Methods

                                def test_connection(self, api_client, backup_config, config_backup_target)
                                +
                                + +Expand source code + +
                                @pytest.mark.dependency()
                                +def test_connection(self, api_client, backup_config, config_backup_target):
                                +    code, data = api_client.settings.backup_target_test_connection()
                                +    assert 200 == code, f'Failed to test backup target connection: {data}'
                                +
                                def test_restore_replace_vm_not_stop(self, api_client, backup_config, base_vm_with_data)
                                +
                                + +Expand source code + +
                                @pytest.mark.negative
                                +@pytest.mark.dependency(depends=["TestBackupRestore::tests_backup_vm"], param=True)
                                +def test_restore_replace_vm_not_stop(self, api_client, backup_config, base_vm_with_data):
                                +    spec = api_client.backups.RestoreSpec.for_existing(delete_volumes=True)
                                +    code, data = api_client.backups.restore(base_vm_with_data['name'], spec)
                                +
                                +    assert 422 == code, (code, data)
                                +
                                -def test_restore_replace_with_delete_vols(self, api_client, vm_shell_from_host, ssh_keypair, wait_timeout, vm_checker, backup_config, base_vm_with_data) +def test_restore_replace_with_delete_vols(self,
                                api_client,
                                vm_shell_from_host,
                                ssh_keypair,
                                wait_timeout,
                                vm_checker,
                                backup_config,
                                base_vm_with_data)
                                +
                                + +Expand source code + +
                                @pytest.mark.dependency(depends=["TestBackupRestore::tests_backup_vm"], param=True)
                                +def test_restore_replace_with_delete_vols(
                                +    self, api_client, vm_shell_from_host, ssh_keypair, wait_timeout, vm_checker,
                                +    backup_config, base_vm_with_data
                                +):
                                +    unique_vm_name, backup_data = base_vm_with_data['name'], base_vm_with_data['data']
                                +    pub_key, pri_key = ssh_keypair
                                +
                                +    # mess up the existing data
                                +    with vm_shell_from_host(
                                +        base_vm_with_data['host_ip'], base_vm_with_data['vm_ip'],
                                +        base_vm_with_data['ssh_user'], pkey=pri_key
                                +    ) as sh:
                                +        out, err = sh.exec_command(f"echo {pub_key!r} > {base_vm_with_data['data']['path']}")
                                +        assert not err, (out, err)
                                +        sh.exec_command('sync')
                                +
                                +    # Stop the VM then restore existing
                                +    vm_stopped, (code, data) = vm_checker.wait_stopped(unique_vm_name)
                                +    assert vm_stopped, (
                                +        f"Failed to Stop VM({unique_vm_name}) with errors:\n"
                                +        f"Status({code}): {data}"
                                +    )
                                +
                                +    spec = api_client.backups.RestoreSpec.for_existing(delete_volumes=True)
                                +    code, data = api_client.backups.restore(unique_vm_name, spec)
                                +    assert 201 == code, f'Failed to restore backup with current VM replaced, {data}'
                                +
                                +    vm_running, (code, data) = vm_checker.wait_status_running(unique_vm_name)
                                +    assert vm_running, (
                                +        f"Failed to restore VM({unique_vm_name}) with errors:\n"
                                +        f"Status({code}): {data}"
                                +    )
                                +
                                +    # Check VM Started then get IPs (vm and host)
                                +    vm_got_ips, (code, data) = vm_checker.wait_ip_addresses(unique_vm_name, ['default'])
                                +    assert vm_got_ips, (
                                +        f"Failed to Start VM({unique_vm_name}) with errors:\n"
                                +        f"Status: {data.get('status')}\n"
                                +        f"API Status({code}): {data}"
                                +    )
                                +    vm_ip = next(iface['ipAddress'] for iface in data['status']['interfaces']
                                +                 if iface['name'] == 'default')
                                +    code, data = api_client.hosts.get(data['status']['nodeName'])
                                +    host_ip = next(addr['address'] for addr in data['status']['addresses']
                                +                   if addr['type'] == 'InternalIP')
                                +    base_vm_with_data['host_ip'], base_vm_with_data['vm_ip'] = host_ip, vm_ip
                                +
                                +    # Login to the new VM and check data is existing
                                +    with vm_shell_from_host(host_ip, vm_ip, base_vm_with_data['ssh_user'], pkey=pri_key) as sh:
                                +        cloud_inited, (out, err) = vm_checker.wait_cloudinit_done(sh)
                                +        assert cloud_inited, (
                                +            f"VM {unique_vm_name} Started {wait_timeout} seconds"
                                +            f", but cloud-init still in {out}"
                                +        )
                                +        out, err = sh.exec_command(f"cat {backup_data['path']}")
                                +
                                +    assert backup_data['content'] in out, (
                                +        f"cloud-init writefile failed\n"
                                +        f"Executed stdout: {out}\n"
                                +        f"Executed stderr: {err}"
                                +    )
                                +
                                -def test_restore_replace_with_vm_shutdown_command(self, api_client, vm_shell_from_host, ssh_keypair, wait_timeout, vm_checker, backup_config, base_vm_with_data) +def test_restore_replace_with_vm_shutdown_command(self,
                                api_client,
                                vm_shell_from_host,
                                ssh_keypair,
                                wait_timeout,
                                vm_checker,
                                backup_config,
                                base_vm_with_data)
                                +
                                + +Expand source code + +
                                @pytest.mark.skip_version_if('< v1.2.2')
                                +@pytest.mark.dependency(depends=["TestBackupRestore::tests_backup_vm"], param=True)
                                +def test_restore_replace_with_vm_shutdown_command(
                                +    self, api_client, vm_shell_from_host, ssh_keypair, wait_timeout, vm_checker,
                                +    backup_config, base_vm_with_data
                                +):
                                +    ''' ref: https://github.com/harvester/tests/issues/943
                                +    1. Create VM and write some data
                                +    2. Take backup for the VM
                                +    3. Mess up existing data
                                +    3. Shutdown the VM by executing `shutdown` command in OS
                                +    4. Restore backup to replace existing VM
                                +    5. VM should be restored successfully
                                +    6. Data in VM should be the same as backed up
                                +    '''
                                +
                                +    unique_vm_name, backup_data = base_vm_with_data['name'], base_vm_with_data['data']
                                +    pub_key, pri_key = ssh_keypair
                                +
                                +    # mess up the existing data then shutdown it
                                +    with vm_shell_from_host(
                                +        base_vm_with_data['host_ip'], base_vm_with_data['vm_ip'],
                                +        base_vm_with_data['ssh_user'], pkey=pri_key
                                +    ) as sh:
                                +        out, err = sh.exec_command(f"echo {pub_key!r} > {base_vm_with_data['data']['path']}")
                                +        assert not err, (out, err)
                                +        sh.exec_command('sync')
                                +        sh.exec_command('sudo shutdown now')
                                +
                                +    endtime = datetime.now() + timedelta(seconds=wait_timeout)
                                +    while endtime > datetime.now():
                                +        code, data = api_client.vms.get(unique_vm_name)
                                +        if 200 == code and "Stopped" == data.get('status', {}).get('printableStatus'):
                                +            break
                                +        sleep(5)
                                +    else:
                                +        raise AssertionError(
                                +            f"Failed to shut down VM({unique_vm_name}) with errors:\n"
                                +            f"Status({code}): {data}"
                                +        )
                                +
                                +    # restore VM to existing
                                +    spec = api_client.backups.RestoreSpec.for_existing(delete_volumes=True)
                                +    code, data = api_client.backups.restore(unique_vm_name, spec)
                                +    assert 201 == code, f'Failed to restore backup with current VM replaced, {data}'
                                +    vm_getable, (code, data) = vm_checker.wait_getable(unique_vm_name)
                                +    assert vm_getable, (code, data)
                                +
                                +    # Check VM Started then get IPs (vm and host)
                                +    vm_got_ips, (code, data) = vm_checker.wait_ip_addresses(unique_vm_name, ['default'])
                                +    assert vm_got_ips, (
                                +        f"Failed to Start VM({unique_vm_name}) with errors:\n"
                                +        f"Status: {data.get('status')}\n"
                                +        f"API Status({code}): {data}"
                                +    )
                                +    vm_ip = next(iface['ipAddress'] for iface in data['status']['interfaces']
                                +                 if iface['name'] == 'default')
                                +    code, data = api_client.hosts.get(data['status']['nodeName'])
                                +    host_ip = next(addr['address'] for addr in data['status']['addresses']
                                +                   if addr['type'] == 'InternalIP')
                                +
                                +    # Login to the new VM and check data is existing
                                +    with vm_shell_from_host(host_ip, vm_ip, base_vm_with_data['ssh_user'], pkey=pri_key) as sh:
                                +        cloud_inited, (out, err) = vm_checker.wait_cloudinit_done(sh)
                                +        assert cloud_inited, (
                                +            f"VM {unique_vm_name} Started {wait_timeout} seconds"
                                +            f", but cloud-init still in {out}"
                                +        )
                                +        out, err = sh.exec_command(f"cat {backup_data['path']}")
                                +
                                +    assert backup_data['content'] in out, (
                                +        f"cloud-init writefile failed\n"
                                +        f"Executed stdout: {out}\n"
                                +        f"Executed stderr: {err}"
                                +    )
                                +

                                ref: https://github.com/harvester/tests/issues/943 1. Create VM and write some data 2. Take backup for the VM @@ -469,24 +959,202 @@

                                Methods

                                def test_restore_with_invalid_name(self, api_client, backup_config, base_vm_with_data)
                                +
                                + +Expand source code + +
                                @pytest.mark.negative
                                +@pytest.mark.skip_version_if('< v1.1.2', '< v1.2.1')
                                +@pytest.mark.dependency(depends=["TestBackupRestore::tests_backup_vm"], param=True)
                                +def test_restore_with_invalid_name(self, api_client, backup_config, base_vm_with_data):
                                +    # RFC1123 DNS Subdomain name rules:
                                +    # 1. contain no more than 253 characters
                                +    # 2. contain only lowercase alphanumeric characters, '-' or '.'
                                +    # 3. start with an alphanumeric character
                                +    # 4. end with an alphanumeric character
                                +
                                +    unique_vm_name = base_vm_with_data['name']
                                +
                                +    # Case 1: longer than 253 chars
                                +    invalid_name = 'a' * 254
                                +    spec = api_client.backups.RestoreSpec.for_new(invalid_name)
                                +    code, data = api_client.backups.restore(unique_vm_name, spec)
                                +    assert 422 == code, (code, data)
                                +
                                +    # Case 2: having upper case
                                +    invalid_name = 'the.name.IS.invalid'
                                +    spec = api_client.backups.RestoreSpec.for_new(invalid_name)
                                +    code, data = api_client.backups.restore(unique_vm_name, spec)
                                +    assert 422 == code, (code, data)
                                +
                                +    # Case 3: Not start with an alphanumeric character
                                +    invalid_name = '-the.name.is.invalid'
                                +    spec = api_client.backups.RestoreSpec.for_new(invalid_name)
                                +    code, data = api_client.backups.restore(unique_vm_name, spec)
                                +    assert 422 == code, (code, data)
                                +
                                +    # Case 4: Not end with an alphanumeric character
                                +    invalid_name = 'the.name.is.invalid.'
                                +    spec = api_client.backups.RestoreSpec.for_new(invalid_name)
                                +    code, data = api_client.backups.restore(unique_vm_name, spec)
                                +    assert 422 == code, (code, data)
                                +
                                -def test_restore_with_new_vm(self, api_client, vm_shell_from_host, vm_checker, ssh_keypair, wait_timeout, backup_config, base_vm_with_data) +def test_restore_with_new_vm(self,
                                api_client,
                                vm_shell_from_host,
                                vm_checker,
                                ssh_keypair,
                                wait_timeout,
                                backup_config,
                                base_vm_with_data)
                                +
                                + +Expand source code + +
                                @pytest.mark.dependency(depends=["TestBackupRestore::tests_backup_vm"], param=True)
                                +def test_restore_with_new_vm(
                                +    self, api_client, vm_shell_from_host, vm_checker, ssh_keypair, wait_timeout,
                                +    backup_config, base_vm_with_data
                                +):
                                +    unique_vm_name, backup_data = base_vm_with_data['name'], base_vm_with_data['data']
                                +    pub_key, pri_key = ssh_keypair
                                +
                                +    # mess up the existing data
                                +    with vm_shell_from_host(
                                +        base_vm_with_data['host_ip'], base_vm_with_data['vm_ip'],
                                +        base_vm_with_data['ssh_user'], pkey=pri_key
                                +    ) as sh:
                                +        out, err = sh.exec_command(f"echo {pub_key!r} > {base_vm_with_data['data']['path']}")
                                +        assert not err, (out, err)
                                +        sh.exec_command('sync')
                                +
                                +    # Restore VM into new
                                +    restored_vm_name = f"{backup_config[0].lower()}-restore-{unique_vm_name}"
                                +    spec = api_client.backups.RestoreSpec.for_new(restored_vm_name)
                                +    code, data = api_client.backups.restore(unique_vm_name, spec)
                                +    assert 201 == code, (code, data)
                                +    vm_getable, (code, data) = vm_checker.wait_getable(restored_vm_name)
                                +    assert vm_getable, (code, data)
                                +
                                +    # Check VM Started then get IPs (vm and host)
                                +    vm_got_ips, (code, data) = vm_checker.wait_ip_addresses(restored_vm_name, ['default'])
                                +    assert vm_got_ips, (
                                +        f"Failed to Start VM({restored_vm_name}) with errors:\n"
                                +        f"Status: {data.get('status')}\n"
                                +        f"API Status({code}): {data}"
                                +    )
                                +    vm_ip = next(iface['ipAddress'] for iface in data['status']['interfaces']
                                +                 if iface['name'] == 'default')
                                +    code, data = api_client.hosts.get(data['status']['nodeName'])
                                +    host_ip = next(addr['address'] for addr in data['status']['addresses']
                                +                   if addr['type'] == 'InternalIP')
                                +
                                +    # Login to the new VM and check data is existing
                                +    with vm_shell_from_host(host_ip, vm_ip, base_vm_with_data['ssh_user'], pkey=pri_key) as sh:
                                +        endtime = datetime.now() + timedelta(seconds=wait_timeout)
                                +        while endtime > datetime.now():
                                +            out, err = sh.exec_command('cloud-init status')
                                +            if 'done' in out:
                                +                break
                                +            sleep(3)
                                +        else:
                                +            raise AssertionError(
                                +                f"VM {restored_vm_name} Started {wait_timeout} seconds"
                                +                f", but cloud-init still in {out}"
                                +            )
                                +
                                +        out, err = sh.exec_command(f"cat {backup_data['path']}")
                                +
                                +    assert backup_data['content'] in out, (
                                +        f"cloud-init writefile failed\n"
                                +        f"Executed stdout: {out}\n"
                                +        f"Executed stderr: {err}"
                                +    )
                                +
                                +    # teardown: delete restored vm and volumes
                                +    code, data = api_client.vms.get(restored_vm_name)
                                +    vm_spec = api_client.vms.Spec.from_dict(data)
                                +    api_client.vms.delete(restored_vm_name)
                                +    endtime = datetime.now() + timedelta(seconds=wait_timeout)
                                +    while endtime > datetime.now():
                                +        code, data = api_client.vms.get(restored_vm_name)
                                +        if 404 == code:
                                +            break
                                +        sleep(3)
                                +    else:
                                +        raise AssertionError(
                                +            f"Failed to Delete VM({restored_vm_name}) with errors:\n"
                                +            f"Status({code}): {data}"
                                +        )
                                +    for vol in vm_spec.volumes:
                                +        vol_name = vol['volume']['persistentVolumeClaim']['claimName']
                                +        api_client.volumes.delete(vol_name)
                                +
                                def test_update_backup_by_yaml(self, api_client, wait_timeout, backup_config, base_vm_with_data)
                                +
                                + +Expand source code + +
                                @pytest.mark.dependency(depends=["TestBackupRestore::tests_backup_vm"], param=True)
                                +def test_update_backup_by_yaml(
                                +    self, api_client, wait_timeout, backup_config, base_vm_with_data
                                +):
                                +    backup_name = base_vm_with_data['name']
                                +    # Get backup as yaml
                                +    req_yaml = dict(Accept='application/yaml')
                                +    resp = api_client.backups.get(backup_name, headers=req_yaml, raw=True)
                                +    assert 200 == resp.status_code, (resp.status_code, resp.text)
                                +
                                +    # update annotation
                                +    yaml_header = {'Content-Type': 'application/yaml'}
                                +    customized_annotations = {'test.harvesterhci.io': 'for-test-update'}
                                +    data = yaml.safe_load(resp.text)
                                +    data['metadata'].setdefault('annotations', {}).update(customized_annotations)
                                +    yaml_data = yaml.safe_dump(data)
                                +    code, data = api_client.backups.update(backup_name, yaml_data,
                                +                                           as_json=False, headers=yaml_header)
                                +    assert 200 == code, (code, data)
                                +
                                +    # Verify annotation updated
                                +    code, data = api_client.backups.get(backup_name)
                                +    all_updated = all(
                                +        True for key, val in data['metadata']['annotations'].items()
                                +        if customized_annotations.get(key, "") == val
                                +    )
                                +    assert all_updated, f"Failed to update annotations: {customized_annotations!r}"
                                +
                                def tests_backup_vm(self, api_client, wait_timeout, backup_config, base_vm_with_data)
                                +
                                + +Expand source code + +
                                @pytest.mark.dependency(depends=["TestBackupRestore::test_connection"], param=True)
                                +def tests_backup_vm(self, api_client, wait_timeout, backup_config, base_vm_with_data):
                                +    unique_vm_name = base_vm_with_data['name']
                                +
                                +    # Create backup with the name as VM's name
                                +    code, data = api_client.vms.backup(unique_vm_name, unique_vm_name)
                                +    assert 204 == code, (code, data)
                                +    # Check backup is ready
                                +    endtime = datetime.now() + timedelta(seconds=wait_timeout)
                                +    while endtime > datetime.now():
                                +        code, backup = api_client.backups.get(unique_vm_name)
                                +        if 200 == code and backup.get('status', {}).get('readyToUse'):
                                +            break
                                +        sleep(3)
                                +    else:
                                +        raise AssertionError(
                                +            f'Timed-out waiting for the backup \'{unique_vm_name}\' to be ready.'
                                +        )
                                +
                                @@ -495,7 +1163,6 @@

                                Methods

                                class TestBackupRestoreOnMigration
                                -
                                Expand source code @@ -605,6 +1272,7 @@

                                Methods

                                f"Executed stderr: {err}" )
                                +

                                Class variables

                                var pytestmark
                                @@ -615,15 +1283,117 @@

                                Class variables

                                Methods

                                -def test_backup_migrated_vm(self, api_client, wait_timeout, backup_config, config_backup_target, base_vm_migrated, base_vm_with_data) +def test_backup_migrated_vm(self,
                                api_client,
                                wait_timeout,
                                backup_config,
                                config_backup_target,
                                base_vm_migrated,
                                base_vm_with_data)
                                +
                                + +Expand source code + +
                                @pytest.mark.dependency(param=True)
                                +def test_backup_migrated_vm(
                                +    self, api_client, wait_timeout, backup_config, config_backup_target,
                                +    base_vm_migrated, base_vm_with_data
                                +):
                                +    unique_vm_name = base_vm_with_data['name']
                                +
                                +    # Create backup with the name as VM's name
                                +    code, data = api_client.vms.backup(unique_vm_name, unique_vm_name)
                                +    assert 204 == code, (code, data)
                                +    # Check backup is ready
                                +    endtime = datetime.now() + timedelta(seconds=wait_timeout)
                                +    while endtime > datetime.now():
                                +        code, backup = api_client.backups.get(unique_vm_name)
                                +        if 200 == code and backup.get('status', {}).get('readyToUse'):
                                +            break
                                +        sleep(3)
                                +    else:
                                +        raise AssertionError(
                                +            f'Timed-out waiting for the backup \'{unique_vm_name}\' to be ready.'
                                +        )
                                +
                                -def test_restore_replace_migrated_vm(self, api_client, wait_timeout, ssh_keypair, vm_shell_from_host, vm_checker, backup_config, base_vm_migrated, base_vm_with_data) +def test_restore_replace_migrated_vm(self,
                                api_client,
                                wait_timeout,
                                ssh_keypair,
                                vm_shell_from_host,
                                vm_checker,
                                backup_config,
                                base_vm_migrated,
                                base_vm_with_data)
                                +
                                + +Expand source code + +
                                @pytest.mark.dependency(
                                +    depends=["TestBackupRestoreOnMigration::test_backup_migrated_vm"],
                                +    param=True
                                +)
                                +def test_restore_replace_migrated_vm(
                                +    self, api_client, wait_timeout, ssh_keypair, vm_shell_from_host, vm_checker, backup_config,
                                +    base_vm_migrated, base_vm_with_data
                                +):
                                +    unique_vm_name, backup_data = base_vm_with_data['name'], base_vm_with_data['data']
                                +    pub_key, pri_key = ssh_keypair
                                +
                                +    # mess up the existing data
                                +    with vm_shell_from_host(
                                +        base_vm_with_data['host_ip'], base_vm_with_data['vm_ip'],
                                +        base_vm_with_data['ssh_user'], pkey=pri_key
                                +    ) as sh:
                                +        out, err = sh.exec_command(f"echo {pub_key!r} > {base_vm_with_data['data']['path']}")
                                +        assert not err, (out, err)
                                +        sh.exec_command('sync')
                                +
                                +    # Stop the VM then restore existing
                                +    vm_stopped, (code, data) = vm_checker.wait_stopped(unique_vm_name)
                                +    assert vm_stopped, (
                                +        f"Failed to Stop VM({unique_vm_name}) with errors:\n"
                                +        f"Status({code}): {data}"
                                +    )
                                +
                                +    spec = api_client.backups.RestoreSpec.for_existing()
                                +    code, data = api_client.backups.restore(unique_vm_name, spec)
                                +    assert 201 == code, f'Failed to restore backup with current VM replaced, {data}'
                                +    vm_getable, (code, data) = vm_checker.wait_getable(unique_vm_name)
                                +    assert vm_getable, (code, data)
                                +
                                +    # Check VM Started
                                +    vm_got_ips, (code, data) = vm_checker.wait_ip_addresses(unique_vm_name, ['default'])
                                +    assert vm_got_ips, (
                                +        f"Failed to Start VM({unique_vm_name}) with errors:\n"
                                +        f"Status: {data.get('status')}\n"
                                +        f"API Status({code}): {data}"
                                +    )
                                +
                                +    # Check VM is not hosting on the migrated node
                                +    host = data['status']['nodeName']
                                +    original_host, migrated_host = base_vm_migrated
                                +
                                +    assert host == migrated_host, (
                                +        f"Restored VM is not hosted on {migrated_host} but {host},"
                                +        f" the VM was initialized hosted on {original_host}"
                                +    )
                                +
                                +    # Get IP of VM and host
                                +    vm_ip = next(iface['ipAddress'] for iface in data['status']['interfaces']
                                +                 if iface['name'] == 'default')
                                +    code, data = api_client.hosts.get(data['status']['nodeName'])
                                +    host_ip = next(addr['address'] for addr in data['status']['addresses']
                                +                   if addr['type'] == 'InternalIP')
                                +
                                +    # Login to the new VM and check data is existing
                                +    with vm_shell_from_host(host_ip, vm_ip, base_vm_with_data['ssh_user'], pkey=pri_key) as sh:
                                +        cloud_inited, (out, err) = vm_checker.wait_cloudinit_done(sh)
                                +        assert cloud_inited, (
                                +            f"VM {unique_vm_name} Started {vm_checker.wait_timeout} seconds"
                                +            f", but cloud-init still in {out}"
                                +        )
                                +        out, err = sh.exec_command(f"cat {backup_data['path']}")
                                +
                                +    assert backup_data['content'] in out, (
                                +        f"cloud-init writefile failed\n"
                                +        f"Executed stdout: {out}\n"
                                +        f"Executed stderr: {err}"
                                +    )
                                +
                                @@ -632,7 +1402,6 @@

                                Methods

                                class TestMultipleBackupRestore
                                -
                                Expand source code @@ -1011,6 +1780,7 @@

                                Methods

                                f"Executed stderr: {err}" )
                                +

                                Class variables

                                var pytestmark
                                @@ -1021,27 +1791,408 @@

                                Class variables

                                Methods

                                -def test_backup_multiple(self, api_client, wait_timeout, host_shell, vm_shell, vm_checker, ssh_keypair, backup_config, config_backup_target, base_vm_with_data) +def test_backup_multiple(self,
                                api_client,
                                wait_timeout,
                                host_shell,
                                vm_shell,
                                vm_checker,
                                ssh_keypair,
                                backup_config,
                                config_backup_target,
                                base_vm_with_data)
                                +
                                + +Expand source code + +
                                @pytest.mark.dependency()
                                +def test_backup_multiple(
                                +    self, api_client, wait_timeout, host_shell, vm_shell, vm_checker, ssh_keypair,
                                +    backup_config, config_backup_target, base_vm_with_data
                                +):
                                +    def write_data(content):
                                +        pub_key, pri_key = ssh_keypair
                                +        # Log into VM to make some data
                                +        with host_shell.login(host_ip, jumphost=True) as h:
                                +            vm_sh = vm_shell(base_vm_with_data['ssh_user'], pkey=pri_key)
                                +            endtime = datetime.now() + timedelta(seconds=wait_timeout)
                                +            while endtime > datetime.now():
                                +                try:
                                +                    vm_sh.connect(vm_ip, jumphost=h.client)
                                +                except ChannelException as e:
                                +                    login_ex = e
                                +                    sleep(3)
                                +                else:
                                +                    break
                                +            else:
                                +                raise AssertionError(f"Unable to login to VM {unique_vm_name}") from login_ex
                                +
                                +            with vm_sh as sh:
                                +                endtime = datetime.now() + timedelta(seconds=wait_timeout)
                                +                while endtime > datetime.now():
                                +                    out, err = sh.exec_command('cloud-init status')
                                +                    if 'done' in out:
                                +                        break
                                +                    sleep(3)
                                +                else:
                                +                    raise AssertionError(
                                +                        f"VM {unique_vm_name} Started {wait_timeout} seconds"
                                +                        f", but cloud-init still in {out}"
                                +                    )
                                +                out, err = sh.exec_command(f'echo {content!r} >> ~/vmname')
                                +                assert not err, (out, err)
                                +                sh.exec_command('sync')
                                +
                                +    def create_backup(vm_name, backup_name):
                                +        code, data = api_client.vms.backup(vm_name, backup_name)
                                +        assert 204 == code, (code, data)
                                +        # Check backup is ready
                                +        endtime = datetime.now() + timedelta(seconds=wait_timeout)
                                +        while endtime > datetime.now():
                                +            code, backup = api_client.backups.get(backup_name)
                                +            if 200 == code and backup.get('status', {}).get('readyToUse'):
                                +                break
                                +            sleep(3)
                                +        else:
                                +            raise AssertionError(
                                +                f'Timed-out waiting for the backup \'{backup_name}\' to be ready.'
                                +            )
                                +
                                +    unique_vm_name = base_vm_with_data['name']
                                +    # Check VM started and get IPs (vm and host)
                                +    vm_got_ips, (code, data) = vm_checker.wait_ip_addresses(unique_vm_name, ['default'])
                                +    assert vm_got_ips, (
                                +        f"Failed to Start VM({unique_vm_name}) with errors:\n"
                                +        f"Status: {data.get('status')}\n"
                                +        f"API Status({code}): {data}"
                                +    )
                                +    vm_ip = next(iface['ipAddress'] for iface in data['status']['interfaces']
                                +                 if iface['name'] == 'default')
                                +    code, data = api_client.hosts.get(data['status']['nodeName'])
                                +    host_ip = next(addr['address'] for addr in data['status']['addresses']
                                +                   if addr['type'] == 'InternalIP')
                                +
                                +    content = ""
                                +    # Create multiple backups
                                +    for idx in range(0, 5):
                                +        backup_name = f"{idx}-{unique_vm_name}"
                                +        write_data(backup_name)
                                +        create_backup(unique_vm_name, backup_name)
                                +        content += f"{backup_name}\n"
                                +        base_vm_with_data['data'].setdefault('backups', []).append((backup_name, content))
                                +
                                -def test_delete_first_backup(self, api_client, host_shell, vm_shell, vm_checker, ssh_keypair, wait_timeout, backup_config, config_backup_target, base_vm_with_data) +def test_delete_first_backup(self,
                                api_client,
                                host_shell,
                                vm_shell,
                                vm_checker,
                                ssh_keypair,
                                wait_timeout,
                                backup_config,
                                config_backup_target,
                                base_vm_with_data)
                                +
                                + +Expand source code + +
                                @pytest.mark.dependency(
                                +    depends=["TestMultipleBackupRestore::test_backup_multiple"], param=True
                                +)
                                +def test_delete_first_backup(
                                +    self, api_client, host_shell, vm_shell, vm_checker, ssh_keypair, wait_timeout,
                                +    backup_config, config_backup_target, base_vm_with_data
                                +):
                                +    unique_vm_name, backup_data = base_vm_with_data['name'], base_vm_with_data['data']
                                +    pub_key, pri_key = ssh_keypair
                                +
                                +    backups = backup_data['backups']
                                +    (first_backup, content), *backup_data['backups'] = backups
                                +    latest_backup = backups[-1][0]
                                +
                                +    # Delete first backup
                                +    code, data = api_client.backups.delete(first_backup)
                                +    endtime = datetime.now() + timedelta(seconds=wait_timeout)
                                +    while endtime > datetime.now():
                                +        code, data = api_client.backups.get(first_backup)
                                +        if 404 == code:
                                +            break
                                +        sleep(3)
                                +    else:
                                +        raise AssertionError(
                                +            f"Failed to delete backup {first_backup}\n"
                                +            f"API Status({code}): {data}"
                                +        )
                                +
                                +    # Stop the VM
                                +    code, data = api_client.vms.stop(unique_vm_name)
                                +    assert 204 == code, "`Stop` return unexpected status code"
                                +    endtime = datetime.now() + timedelta(seconds=wait_timeout)
                                +    while endtime > datetime.now():
                                +        code, data = api_client.vms.get_status(unique_vm_name)
                                +        if 404 == code:
                                +            break
                                +        sleep(3)
                                +    else:
                                +        raise AssertionError(
                                +            f"Failed to Stop VM({unique_vm_name}) with errors:\n"
                                +            f"Status({code}): {data}"
                                +        )
                                +
                                +    spec = api_client.backups.RestoreSpec.for_existing(delete_volumes=True)
                                +    code, data = api_client.backups.restore(latest_backup, spec)
                                +    assert 201 == code, f'Failed to restore backup with current VM replaced, {data}'
                                +    vm_getable, (code, data) = vm_checker.wait_getable(unique_vm_name)
                                +    assert vm_getable, (code, data)
                                +
                                +    # Check VM Started then get IPs (vm and host)
                                +    vm_got_ips, (code, data) = vm_checker.wait_ip_addresses(unique_vm_name, ['default'])
                                +    assert vm_got_ips, (
                                +        f"Failed to Start VM({unique_vm_name}) with errors:\n"
                                +        f"Status: {data.get('status')}\n"
                                +        f"API Status({code}): {data}"
                                +    )
                                +    vm_ip = next(iface['ipAddress'] for iface in data['status']['interfaces']
                                +                 if iface['name'] == 'default')
                                +    code, data = api_client.hosts.get(data['status']['nodeName'])
                                +    host_ip = next(addr['address'] for addr in data['status']['addresses']
                                +                   if addr['type'] == 'InternalIP')
                                +
                                +    # Login to the new VM and check data is existing
                                +    with host_shell.login(host_ip, jumphost=True) as h:
                                +        vm_sh = vm_shell(base_vm_with_data['ssh_user'], pkey=pri_key)
                                +        endtime = datetime.now() + timedelta(seconds=wait_timeout)
                                +        while endtime > datetime.now():
                                +            try:
                                +                vm_sh.connect(vm_ip, jumphost=h.client)
                                +            except ChannelException as e:
                                +                login_ex = e
                                +                sleep(3)
                                +            else:
                                +                break
                                +        else:
                                +            raise AssertionError(f"Unable to login to VM {unique_vm_name}") from login_ex
                                +
                                +        with vm_sh as sh:
                                +            endtime = datetime.now() + timedelta(seconds=wait_timeout)
                                +            while endtime > datetime.now():
                                +                out, err = sh.exec_command('cloud-init status')
                                +                if 'done' in out:
                                +                    break
                                +                sleep(3)
                                +            else:
                                +                raise AssertionError(
                                +                    f"VM {unique_vm_name} Started {wait_timeout} seconds"
                                +                    f", but cloud-init still in {out}"
                                +                )
                                +
                                +            out, err = sh.exec_command(f"cat {backup_data['path']}")
                                +        assert content in out, (
                                +            f"cloud-init writefile failed\n"
                                +            f"Executed stdout: {out}\n"
                                +            f"Executed stderr: {err}"
                                +        )
                                +
                                -def test_delete_last_backup(self, api_client, host_shell, vm_shell, vm_checker, ssh_keypair, wait_timeout, backup_config, config_backup_target, base_vm_with_data) +def test_delete_last_backup(self,
                                api_client,
                                host_shell,
                                vm_shell,
                                vm_checker,
                                ssh_keypair,
                                wait_timeout,
                                backup_config,
                                config_backup_target,
                                base_vm_with_data)
                                +
                                + +Expand source code + +
                                @pytest.mark.dependency(
                                +    depends=["TestMultipleBackupRestore::test_backup_multiple"], param=True
                                +)
                                +def test_delete_last_backup(
                                +    self, api_client, host_shell, vm_shell, vm_checker, ssh_keypair, wait_timeout,
                                +    backup_config, config_backup_target, base_vm_with_data
                                +):
                                +    unique_vm_name, backup_data = base_vm_with_data['name'], base_vm_with_data['data']
                                +    pub_key, pri_key = ssh_keypair
                                +
                                +    *backups, (latest_backup, content), (last_backup, _) = backup_data['backups']
                                +    backup_data['backups'] = backup_data['backups'][:-1]
                                +
                                +    # Delete first backup
                                +    code, data = api_client.backups.delete(last_backup)
                                +    endtime = datetime.now() + timedelta(seconds=wait_timeout)
                                +    while endtime > datetime.now():
                                +        code, data = api_client.backups.get(last_backup)
                                +        if 404 == code:
                                +            break
                                +        sleep(3)
                                +    else:
                                +        raise AssertionError(
                                +            f"Failed to delete backup {last_backup}\n"
                                +            f"API Status({code}): {data}"
                                +        )
                                +
                                +    # Stop the VM
                                +    code, data = api_client.vms.stop(unique_vm_name)
                                +    assert 204 == code, "`Stop` return unexpected status code"
                                +    endtime = datetime.now() + timedelta(seconds=wait_timeout)
                                +    while endtime > datetime.now():
                                +        code, data = api_client.vms.get_status(unique_vm_name)
                                +        if 404 == code:
                                +            break
                                +        sleep(3)
                                +    else:
                                +        raise AssertionError(
                                +            f"Failed to Stop VM({unique_vm_name}) with errors:\n"
                                +            f"Status({code}): {data}"
                                +        )
                                +
                                +    spec = api_client.backups.RestoreSpec.for_existing(delete_volumes=True)
                                +    code, data = api_client.backups.restore(latest_backup, spec)
                                +    assert 201 == code, f'Failed to restore backup with current VM replaced, {data}'
                                +    vm_getable, (code, data) = vm_checker.wait_getable(unique_vm_name)
                                +    assert vm_getable, (code, data)
                                +
                                +    # Check VM Started then get IPs (vm and host)
                                +    vm_got_ips, (code, data) = vm_checker.wait_ip_addresses(unique_vm_name, ['default'])
                                +    assert vm_got_ips, (
                                +        f"Failed to Start VM({unique_vm_name}) with errors:\n"
                                +        f"Status: {data.get('status')}\n"
                                +        f"API Status({code}): {data}"
                                +    )
                                +    vm_ip = next(iface['ipAddress'] for iface in data['status']['interfaces']
                                +                 if iface['name'] == 'default')
                                +    code, data = api_client.hosts.get(data['status']['nodeName'])
                                +    host_ip = next(addr['address'] for addr in data['status']['addresses']
                                +                   if addr['type'] == 'InternalIP')
                                +
                                +    # Login to the new VM and check data is existing
                                +    with host_shell.login(host_ip, jumphost=True) as h:
                                +        vm_sh = vm_shell(base_vm_with_data['ssh_user'], pkey=pri_key)
                                +        endtime = datetime.now() + timedelta(seconds=wait_timeout)
                                +        while endtime > datetime.now():
                                +            try:
                                +                vm_sh.connect(vm_ip, jumphost=h.client)
                                +            except ChannelException as e:
                                +                login_ex = e
                                +                sleep(3)
                                +            else:
                                +                break
                                +        else:
                                +            raise AssertionError(f"Unable to login to VM {unique_vm_name}") from login_ex
                                +
                                +        with vm_sh as sh:
                                +            endtime = datetime.now() + timedelta(seconds=wait_timeout)
                                +            while endtime > datetime.now():
                                +                out, err = sh.exec_command('cloud-init status')
                                +                if 'done' in out:
                                +                    break
                                +                sleep(3)
                                +            else:
                                +                raise AssertionError(
                                +                    f"VM {unique_vm_name} Started {wait_timeout} seconds"
                                +                    f", but cloud-init still in {out}"
                                +                )
                                +
                                +            out, err = sh.exec_command(f"cat {backup_data['path']}")
                                +        assert content in out, (
                                +            f"cloud-init writefile failed\n"
                                +            f"Executed stdout: {out}\n"
                                +            f"Executed stderr: {err}"
                                +        )
                                +
                                -def test_delete_middle_backup(self, api_client, host_shell, vm_shell, vm_checker, ssh_keypair, wait_timeout, backup_config, config_backup_target, base_vm_with_data) +def test_delete_middle_backup(self,
                                api_client,
                                host_shell,
                                vm_shell,
                                vm_checker,
                                ssh_keypair,
                                wait_timeout,
                                backup_config,
                                config_backup_target,
                                base_vm_with_data)
                                +
                                + +Expand source code + +
                                @pytest.mark.dependency(
                                +    depends=["TestMultipleBackupRestore::test_backup_multiple"], param=True
                                +)
                                +def test_delete_middle_backup(
                                +    self, api_client, host_shell, vm_shell, vm_checker, ssh_keypair, wait_timeout,
                                +    backup_config, config_backup_target, base_vm_with_data
                                +):
                                +    unique_vm_name, backup_data = base_vm_with_data['name'], base_vm_with_data['data']
                                +    pub_key, pri_key = ssh_keypair
                                +
                                +    *backups, (middle_backup, _), (latest_backup, content) = backup_data['backups']
                                +    backup_data['backups'] = backups + [(latest_backup, content)]
                                +
                                +    # Delete second last backup
                                +    code, data = api_client.backups.delete(middle_backup)
                                +    endtime = datetime.now() + timedelta(seconds=wait_timeout)
                                +    while endtime > datetime.now():
                                +        code, data = api_client.backups.get(middle_backup)
                                +        if 404 == code:
                                +            break
                                +        sleep(3)
                                +    else:
                                +        raise AssertionError(
                                +            f"Failed to delete backup {middle_backup}\n"
                                +            f"API Status({code}): {data}"
                                +        )
                                +
                                +    # Stop the VM
                                +    code, data = api_client.vms.stop(unique_vm_name)
                                +    assert 204 == code, "`Stop` return unexpected status code"
                                +    endtime = datetime.now() + timedelta(seconds=wait_timeout)
                                +    while endtime > datetime.now():
                                +        code, data = api_client.vms.get_status(unique_vm_name)
                                +        if 404 == code:
                                +            break
                                +        sleep(3)
                                +    else:
                                +        raise AssertionError(
                                +            f"Failed to Stop VM({unique_vm_name}) with errors:\n"
                                +            f"Status({code}): {data}"
                                +        )
                                +
                                +    spec = api_client.backups.RestoreSpec.for_existing(delete_volumes=True)
                                +    code, data = api_client.backups.restore(latest_backup, spec)
                                +    assert 201 == code, f'Failed to restore backup with current VM replaced, {data}'
                                +    vm_getable, (code, data) = vm_checker.wait_getable(unique_vm_name)
                                +    assert vm_getable, (code, data)
                                +
                                +    # Check VM Started then get IPs (vm and host)
                                +    vm_got_ips, (code, data) = vm_checker.wait_ip_addresses(unique_vm_name, ['default'])
                                +    assert vm_got_ips, (
                                +        f"Failed to Start VM({unique_vm_name}) with errors:\n"
                                +        f"Status: {data.get('status')}\n"
                                +        f"API Status({code}): {data}"
                                +    )
                                +    vm_ip = next(iface['ipAddress'] for iface in data['status']['interfaces']
                                +                 if iface['name'] == 'default')
                                +    code, data = api_client.hosts.get(data['status']['nodeName'])
                                +    host_ip = next(addr['address'] for addr in data['status']['addresses']
                                +                   if addr['type'] == 'InternalIP')
                                +
                                +    # Login to the new VM and check data is existing
                                +    with host_shell.login(host_ip, jumphost=True) as h:
                                +        vm_sh = vm_shell(base_vm_with_data['ssh_user'], pkey=pri_key)
                                +        endtime = datetime.now() + timedelta(seconds=wait_timeout)
                                +        while endtime > datetime.now():
                                +            try:
                                +                vm_sh.connect(vm_ip, jumphost=h.client)
                                +            except ChannelException as e:
                                +                login_ex = e
                                +                sleep(3)
                                +            else:
                                +                break
                                +        else:
                                +            raise AssertionError(f"Unable to login to VM {unique_vm_name}") from login_ex
                                +
                                +        with vm_sh as sh:
                                +            endtime = datetime.now() + timedelta(seconds=wait_timeout)
                                +            while endtime > datetime.now():
                                +                out, err = sh.exec_command('cloud-init status')
                                +                if 'done' in out:
                                +                    break
                                +                sleep(3)
                                +            else:
                                +                raise AssertionError(
                                +                    f"VM {unique_vm_name} Started {wait_timeout} seconds"
                                +                    f", but cloud-init still in {out}"
                                +                )
                                +
                                +            out, err = sh.exec_command(f"cat {backup_data['path']}")
                                +        assert content in out, (
                                +            f"cloud-init writefile failed\n"
                                +            f"Executed stdout: {out}\n"
                                +            f"Executed stderr: {err}"
                                +        )
                                +
                                @@ -1112,7 +2263,7 @@

                                -

                                Generated by pdoc 0.11.1.

                                +

                                Generated by pdoc 0.11.5.

                                diff --git a/backend/integrations/test_4_vm_host_powercycle.html b/backend/integrations/test_4_vm_host_powercycle.html index 884881f46..d7d527516 100644 --- a/backend/integrations/test_4_vm_host_powercycle.html +++ b/backend/integrations/test_4_vm_host_powercycle.html @@ -3,19 +3,30 @@ - + harvester_e2e_tests.integrations.test_4_vm_host_powercycle API documentation - + @@ -37,24 +48,236 @@

                                Functions

                                def available_node_names(api_client)
                                +
                                + +Expand source code + +
                                @pytest.fixture(scope="class")
                                +def available_node_names(api_client):
                                +    status_code, nodes_info = api_client.hosts.get()
                                +    assert status_code == 200, f"Failed to list nodes with error: {nodes_info}"
                                +
                                +    node_names = []
                                +    for node_info in nodes_info.get('data', []):
                                +        is_ready = False
                                +        for condition in node_info.get('status', {}).get('conditions', []):
                                +            if condition.get('type', "") == "Ready" and \
                                +                    condition.get('status', "") == "True":
                                +                is_ready = True
                                +                break
                                +
                                +        if is_ready and not node_info.get('spec', {}).get('unschedulable', False):
                                +            node_names.append(node_info['metadata']['name'])
                                +
                                +    return node_names
                                +
                                def focal_image(api_client, unique_name, image_ubuntu, wait_timeout)
                                +
                                + +Expand source code + +
                                @pytest.fixture(scope="module")
                                +def focal_image(api_client, unique_name, image_ubuntu, wait_timeout):
                                +    code, data = api_client.images.create_by_url(unique_name, image_ubuntu.url)
                                +    assert 201 == code, (
                                +        f"Failed to upload focal image with error: {code}, {data}"
                                +    )
                                +
                                +    endtime = datetime.now() + timedelta(seconds=wait_timeout)
                                +    while endtime > datetime.now():
                                +        code, data = api_client.images.get(unique_name)
                                +        if 'status' in data and 'progress' in data['status'] and \
                                +                data['status']['progress'] == 100:
                                +            break
                                +        sleep(5)
                                +    else:
                                +        raise AssertionError(
                                +            f"Image {unique_name} can't be ready with {wait_timeout} timed out\n"
                                +            f"Got error: {code}, {data}"
                                +        )
                                +
                                +    namespace = data['metadata']['namespace']
                                +    name = data['metadata']['name']
                                +
                                +    yield dict(ssh_user=image_ubuntu.ssh_user, id=f"{namespace}/{name}")
                                +
                                +    api_client.images.delete(name, namespace)
                                +
                                def focal_vm(api_client, focal_image, wait_timeout)
                                +
                                + +Expand source code + +
                                @pytest.fixture()
                                +def focal_vm(api_client, focal_image, wait_timeout):
                                +    unique_name = f'vm-{datetime.now().strftime("%Hh%Mm%Ss%f-%m-%d")}'
                                +    vm_spec = api_client.vms.Spec(1, 1)
                                +    vm_spec.add_image('disk-0', focal_image['id'])
                                +    code, data = api_client.vms.create(unique_name, vm_spec)
                                +    assert 201 == code, (
                                +        f"Failed to create VM {unique_name} with error: {code}, {data}"
                                +    )
                                +
                                +    endtime = datetime.now() + timedelta(seconds=wait_timeout)
                                +    while endtime > datetime.now():
                                +        code, data = api_client.vms.get(unique_name)
                                +        if data.get('status', {}).get('ready', False):
                                +            code, data = api_client.vms.get_status(unique_name)
                                +            if data['status']['conditions'][-1]['status'] == 'True':
                                +                break
                                +        sleep(5)
                                +    else:
                                +        raise AssertionError(
                                +            f"Can't find VM {unique_name} with {wait_timeout} timed out\n"
                                +            f"Got error: {code}, {data}"
                                +        )
                                +
                                +    data['name'] = data['metadata']['name']
                                +    data['namespace'] = data['metadata']['namespace']
                                +    yield data
                                +
                                +    code, data = api_client.vms.get(unique_name)
                                +    if 200 == code:  # ???: https://github.com/harvester/harvester/issues/4388
                                +        volume_name = ""
                                +        for volume in data['spec']['template']['spec']['volumes']:
                                +            if volume['name'] == 'disk-0':
                                +                volume_name = volume['persistentVolumeClaim']['claimName']
                                +        api_client.vms.delete(unique_name)
                                +        api_client.volumes.delete(volume_name)
                                +
                                def test_delete_vm_after_host_shutdown(api_client, host_state, wait_timeout, focal_vm, available_node_names)
                                +
                                + +Expand source code + +
                                @pytest.mark.p0
                                +@pytest.mark.hosts
                                +@pytest.mark.dependency(depends=["host_poweroff", "host_poweron"])
                                +def test_delete_vm_after_host_shutdown(
                                +    api_client, host_state, wait_timeout, focal_vm, available_node_names
                                +):
                                +    """
                                +    To cover test:
                                +    - https://harvester.github.io/tests/manual/hosts/delete_vm_after_host_shutdown
                                +
                                +    Prerequisite:
                                +        - Cluster's nodes >= 2
                                +    Steps:
                                +        1. Create a VM with 1 CPU 1 Memory and runStrategy is `RerunOnFailure`
                                +        2. Power off the node hosting the VM
                                +        3. Delete the VM
                                +        4. Verify the VM
                                +    Exepected Result:
                                +        - VM should created and started successfully
                                +        - Node should be unavailable after shutdown
                                +        - VM should able to be deleted
                                +    """
                                +    assert 2 <= len(available_node_names), (
                                +        f"The cluster only have {len(available_node_names)} available node."
                                +        " It's not enough for test."
                                +    )
                                +
                                +    code, data = api_client.vms.get_status(focal_vm['name'], focal_vm['namespace'])
                                +    assert 200 == code, (
                                +        f"Can't get VMI {focal_vm['namespace']}/{focal_vm['name']} with error: {code}, {data}"
                                +    )
                                +
                                +    # poweroff host
                                +    _, node = api_client.hosts.get(data['status']['nodeName'])
                                +    node_ip = next(val["address"] for val in node['status']['addresses']
                                +                   if val["type"] == "InternalIP")
                                +    rc, out, err = host_state.power(node['id'], node_ip, on=False)
                                +    assert rc == 0, (f"Failed to PowerOff node {node['id']} with error({rc}):\n"
                                +                     f"stdout: {out}\n\nstderr: {err}")
                                +    sleep(host_state.delay)  # Wait for the node to disappear
                                +    endtime = datetime.now() + timedelta(seconds=wait_timeout)
                                +    while endtime > datetime.now():
                                +        code, metric = api_client.hosts.get_metrics(node['id'])
                                +        if 404 == code:
                                +            break
                                +        sleep(5)
                                +    else:
                                +        raise AssertionError(
                                +            f"Node {node['id']} still available after PowerOff script executed"
                                +            f", script path: {host_state.path}"
                                +        )
                                +
                                +    try:
                                +        # Delete VM and check it been deleted
                                +        code, data = api_client.vms.delete(focal_vm['name'], focal_vm['namespace'])
                                +        assert 200 == code, (code, data)
                                +        spec = api_client.vms.Spec.from_dict(data)
                                +        endtime = datetime.now() + timedelta(seconds=wait_timeout)
                                +        while endtime > datetime.now():
                                +            code, data = api_client.vms.get(focal_vm['name'], focal_vm['namespace'])
                                +            if 404 == code:
                                +                break
                                +            sleep(3)
                                +        else:
                                +            raise AssertionError(
                                +                f"Failed to Delete VM({focal_vm['name']}) with errors:\n"
                                +                f"Status({code}): {data}"
                                +            )
                                +
                                +        fails, check = [], dict()
                                +        for vol in spec.volumes:
                                +            vol_name = vol['volume']['persistentVolumeClaim']['claimName']
                                +            code, data = api_client.volumes.delete(vol_name)
                                +            if 200 == code:
                                +                check[vol_name] = (code, data)
                                +            else:
                                +                fails[vol_name] = (code, data)
                                +
                                +        endtime = datetime.now() + timedelta(seconds=wait_timeout)
                                +        while endtime > datetime.now():
                                +            for vol_name, (code, data) in check.copy().items():
                                +                code, data = api_client.volumes.get(vol_name)
                                +                if 404 == code:
                                +                    check.pop(vol_name)
                                +            if not check:
                                +                break
                                +            sleep(5)
                                +        else:
                                +            for vol_name, (code, data) in check.items():
                                +                fails.append((vol_name, f"Failed to delete\nStatus({code}): {data}"))
                                +
                                +        assert not fails, (
                                +            f"Failed to delete VM({focal_vm['name']})'s volumes with errors:\n"
                                +            "\n".join(f"Volume({n}): {r}" for n, r in fails)
                                +        )
                                +    finally:
                                +        # teardown: power on the host
                                +        rc, out, err = host_state.power(node['id'], node_ip, on=True)
                                +        assert rc == 0, (f"Failed to PowerOn node {node['id']} with error({rc}):\n"
                                +                         f"stdout: {out}\n\nstderr: {err}")
                                +        sleep(host_state.delay)  # Wait for the node to disappear
                                +        endtime = datetime.now() + timedelta(seconds=wait_timeout)
                                +        while endtime > datetime.now():
                                +            _, metric = api_client.hosts.get_metrics(node['id'])
                                +            if not metric.get("metadata", {}).get("state", {}).get("error"):
                                +                break
                                +            sleep(5)
                                +        else:
                                +            raise AssertionError(
                                +                f"Node {node['id']} still unavailable after PowerOn script executed"
                                +                f", script path: {host_state.path}"
                                +            )
                                +

                                To cover test: - https://harvester.github.io/tests/manual/hosts/delete_vm_after_host_shutdown

                                Prerequisite

                                @@ -79,12 +302,162 @@

                                Steps

                                def test_maintenance_mode_trigger_vm_migrate(api_client, focal_vm, wait_timeout, available_node_names)
                                +
                                + +Expand source code + +
                                @pytest.mark.hosts
                                +@pytest.mark.p0
                                +def test_maintenance_mode_trigger_vm_migrate(
                                +    api_client, focal_vm, wait_timeout, available_node_names
                                +):
                                +    assert 2 <= len(available_node_names), (
                                +        f"The cluster only have {len(available_node_names)} available node. \
                                +            It's not enough for migration test."
                                +    )
                                +
                                +    code, data = api_client.vms.get_status(focal_vm['name'], focal_vm['namespace'])
                                +    assert 200 == code, (
                                +        f"Can't get VMI {focal_vm['namespace']}/{focal_vm['name']} with error: {code}, {data}"
                                +    )
                                +    src_host = data['status']['nodeName']
                                +
                                +    code, data = api_client.hosts.maintenance_mode(src_host, enable=True)
                                +    assert 204 == code, (
                                +        f"Failed to enable maintenance mode on node {src_host} with error: {code}, {data}",
                                +    )
                                +
                                +    endtime = datetime.now() + timedelta(seconds=wait_timeout)
                                +    while endtime > datetime.now():
                                +        code, data = api_client.hosts.get(src_host)
                                +        if data.get('metadata', {}) \
                                +                .get('annotations', {}) \
                                +                .get('harvesterhci.io/maintain-status', '') == "completed":
                                +            break
                                +        sleep(5)
                                +    else:
                                +        raise AssertionError(
                                +            f"The maintain-status of node {src_host} can't be completed \
                                +                with {wait_timeout} timed out\n"
                                +            f"Got error: {code}, {data}"
                                +        )
                                +
                                +    code, data = api_client.vms.get_status(focal_vm['name'], focal_vm['namespace'])
                                +    assert 200 == code, (
                                +        f"Failed to get VM {focal_vm['namespace']}/{focal_vm['name']} with error: {code}, {data}"
                                +    )
                                +    assert src_host != data['status']['nodeName'], (
                                +        f"Failed to migrate VM {focal_vm['namespace']}/{focal_vm['name']} \
                                +            from {src_host} to another node"
                                +    )
                                +    assert 'Running' == data['status']['phase'], (
                                +        f"Failed to migrate VM {focal_vm['namespace']}/{focal_vm['name']}, \
                                +            it's not running after migration"
                                +    )
                                +
                                +    # teardown
                                +    code, data = api_client.hosts.maintenance_mode(src_host, enable=False)
                                +    assert 204 == code, (
                                +        f"Failed to disable maintenance mode on node {src_host} with error: {code}, {data}",
                                +    )
                                +
                                -def test_poweroff_node_trigger_vm_reschedule(api_client, host_state, focal_vm, wait_timeout, available_node_names, vm_force_reset_policy) +def test_poweroff_node_trigger_vm_reschedule(api_client,
                                host_state,
                                focal_vm,
                                wait_timeout,
                                available_node_names,
                                vm_force_reset_policy)
                                +
                                + +Expand source code + +
                                @pytest.mark.hosts
                                +@pytest.mark.p0
                                +@pytest.mark.dependency(depends=["host_poweroff", "host_poweron"])
                                +def test_poweroff_node_trigger_vm_reschedule(
                                +    api_client, host_state, focal_vm, wait_timeout, available_node_names, vm_force_reset_policy
                                +):
                                +    """
                                +    To cover test:
                                +    - https://harvester.github.io/tests/manual/hosts/vm_rescheduled_after_host_poweroff
                                +
                                +    Prerequisite:
                                +        - Cluster's nodes >= 2
                                +    Steps:
                                +        1. Create a VM with 1 CPU 1 Memory and runStrategy is `RerunOnFailure`
                                +        2. Power off the node hosting the VM
                                +        3. Verify the VM
                                +    Exepected Result:
                                +        - VM should created and started successfully
                                +        - Node should be unavailable after shutdown
                                +        - VM should restarted automatically
                                +    """
                                +    assert 2 <= len(available_node_names), (
                                +        f"The cluster only have {len(available_node_names)} available node. \
                                +            It's not enough for migration test."
                                +    )
                                +
                                +    code, data = api_client.vms.get_status(focal_vm['name'], focal_vm['namespace'])
                                +    assert 200 == code, (
                                +        f"Can't get VMI {focal_vm['namespace']}/{focal_vm['name']} with error: {code}, {data}"
                                +    )
                                +    old_uid = data['metadata']['uid']
                                +
                                +    # poweroff host
                                +    _, node = api_client.hosts.get(data['status']['nodeName'])
                                +    node_ip = next(val["address"] for val in node['status']['addresses']
                                +                   if val["type"] == "InternalIP")
                                +    rc, out, err = host_state.power(node['id'], node_ip, on=False)
                                +    assert rc == 0, (f"Failed to PowerOff node {node['id']} with error({rc}):\n"
                                +                     f"stdout: {out}\n\nstderr: {err}")
                                +    sleep(host_state.delay)  # Wait for the node to disappear
                                +    endtime = datetime.now() + timedelta(seconds=wait_timeout)
                                +    while endtime > datetime.now():
                                +        code, metric = api_client.hosts.get_metrics(node['id'])
                                +        if 404 == code:
                                +            break
                                +        sleep(5)
                                +    else:
                                +        raise AssertionError(
                                +            f"Node {node['id']} still available after PowerOff script executed"
                                +            f", script path: {host_state.path}"
                                +        )
                                +
                                +    try:
                                +        # XXX: default `node-monitor-grace-period` is 5 minutes
                                +        # ref: https://github.com/harvester/harvester/issues/3896#issuecomment-1553154738
                                +        sleep(5 * 60 + vm_force_reset_policy['period'])
                                +        # check vm is restarted
                                +        endtime = datetime.now() + timedelta(seconds=wait_timeout)
                                +        while endtime > datetime.now():
                                +            code, data = api_client.vms.get_status(focal_vm['name'], focal_vm['namespace'])
                                +            if old_uid != data['metadata']['uid'] and "Running" == data['status'].get('phase'):
                                +                break
                                +            sleep(5)
                                +        else:
                                +            raise AssertionError(
                                +                f"VM {focal_vm['namespace']}/{focal_vm['name']} can't be Running \
                                +                    with {wait_timeout} timed out\n"
                                +                f"Got error: {code}, {data}"
                                +            )
                                +    finally:
                                +        # teardown
                                +        rc, out, err = host_state.power(node['id'], node_ip, on=True)
                                +        assert rc == 0, (f"Failed to PowerOn node {node['id']} with error({rc}):\n"
                                +                         f"stdout: {out}\n\nstderr: {err}")
                                +        sleep(host_state.delay)  # Wait for the node to appear
                                +        endtime = datetime.now() + timedelta(seconds=wait_timeout)
                                +        while endtime > datetime.now():
                                +            _, metric = api_client.hosts.get_metrics(node['id'])
                                +            if not metric.get("metadata", {}).get("state", {}).get("error"):
                                +                break
                                +            sleep(5)
                                +        else:
                                +            raise AssertionError(
                                +                f"Node {node['id']} still unavailable after PowerOn script executed"
                                +                f", script path: {host_state.path}"
                                +            )
                                +

                                To cover test: - https://harvester.github.io/tests/manual/hosts/vm_rescheduled_after_host_poweroff

                                Prerequisite

                                @@ -108,12 +481,127 @@

                                Steps

                                def test_verify_host_info(api_client)
                                +
                                + +Expand source code + +
                                @pytest.mark.hosts
                                +@pytest.mark.p0
                                +def test_verify_host_info(api_client):
                                +    status_code, nodes_info = api_client.hosts.get()
                                +
                                +    assert 200 == status_code, f"Failed to list nodes with error: {nodes_info}"
                                +
                                +    fails = []
                                +    for node in nodes_info['data']:
                                +        node_name = node.get('metadata', {}).get('name', "")
                                +        if node_name == "":
                                +            fails.append((node['id'], "node name can't be empty"))
                                +
                                +        cpus = node.get('status', {}).get('capacity', {}).get('cpu', 0)
                                +        if cpus == 0:
                                +            fails.append((node['id'], "cpu should not be zero"))
                                +
                                +        mems = node.get('status', {}).get('capacity', {}).get('memory', "0Ki")
                                +        mems = "".join(c for c in mems if c.isdigit())
                                +        mems = mems or "0"
                                +        if int(mems, 10) == 0:
                                +            fails.append((node['id'], "memory should not be zero"))
                                +
                                +    assert not fails, (
                                +        "Failed to get node information with errors:\n",
                                +        "\n".join(f"Node {n}: {r}" for n, r in fails)
                                +    )
                                +
                                def test_vm_restarted_after_host_reboot(api_client, host_state, wait_timeout, focal_vm, available_node_names)
                                +
                                + +Expand source code + +
                                @pytest.mark.p0
                                +@pytest.mark.hosts
                                +@pytest.mark.dependency(depends=["host_poweroff", "host_poweron"])
                                +def test_vm_restarted_after_host_reboot(
                                +    api_client, host_state, wait_timeout, focal_vm, available_node_names
                                +):
                                +    """
                                +    To cover test:
                                +    - https://harvester.github.io/tests/manual/hosts/vm_migrated_after_host_reboot
                                +
                                +    Prerequisite:
                                +        - Cluster's nodes >= 2
                                +    Steps:
                                +        1. Create a VM with 1 CPU 1 Memory and runStrategy is `RerunOnFailure`
                                +        2. Reboot the node hosting the VM
                                +        4. Verify the VM
                                +    Exepected Result:
                                +        - VM should created
                                +        - Node should be unavailable while rebooting
                                +        - VM should be restarted
                                +    """
                                +    assert 2 <= len(available_node_names), (
                                +        f"The cluster only have {len(available_node_names)} available node. \
                                +            It's not enough for migration test."
                                +    )
                                +
                                +    code, data = api_client.vms.get_status(focal_vm['name'], focal_vm['namespace'])
                                +    assert 200 == code, (
                                +        f"Can't get VMI {focal_vm['namespace']}/{focal_vm['name']} with error: {code}, {data}"
                                +    )
                                +    old_uid = data['metadata']['uid']
                                +
                                +    # reboot the host
                                +    _, node = api_client.hosts.get(data['status']['nodeName'])
                                +    node_ip = next(val["address"] for val in node['status']['addresses']
                                +                   if val["type"] == "InternalIP")
                                +    rc, out, err = host_state.reboot(node['id'], node_ip)
                                +    assert rc == 0, (f"Failed to reboot node {node['id']} with error({rc}):\n"
                                +                     f"stdout: {out}\n\nstderr: {err}")
                                +
                                +    endtime = datetime.now() + timedelta(seconds=wait_timeout + host_state.delay)
                                +    while endtime > datetime.now():
                                +        code, metric = api_client.hosts.get_metrics(node['id'])
                                +        if 404 == code:
                                +            break
                                +        sleep(5)
                                +    else:
                                +        raise AssertionError(
                                +            f"Node {node['id']} still available after reboot script executed"
                                +            f", script path: {host_state.path}"
                                +        )
                                +
                                +    # check vm is restarted
                                +    endtime = datetime.now() + timedelta(seconds=wait_timeout)
                                +    while endtime > datetime.now():
                                +        code, data = api_client.vms.get_status(focal_vm['name'], focal_vm['namespace'])
                                +        if (200 == code and old_uid != data['metadata']['uid']
                                +                and "Running" == data['status'].get('phase')):
                                +            break
                                +        sleep(5)
                                +    else:
                                +        raise AssertionError(
                                +            "VMI didn't changed after host rebooted.\n"
                                +            f"Got error: {code}, {data}"
                                +        )
                                +
                                +    # check the node is back
                                +    endtime = datetime.now() + timedelta(seconds=wait_timeout)
                                +    while endtime > datetime.now():
                                +        _, metric = api_client.hosts.get_metrics(node['id'])
                                +        if not metric.get("metadata", {}).get("state", {}).get("error"):
                                +            break
                                +        sleep(5)
                                +    else:
                                +        raise AssertionError(
                                +            f"Node {node['id']} still unavailable after reboot script executed"
                                +            f", script path: {host_state.path}"
                                +        )
                                +

                                To cover test: - https://harvester.github.io/tests/manual/hosts/vm_migrated_after_host_reboot

                                Prerequisite

                                @@ -137,6 +625,30 @@

                                Steps

                                def vm_force_reset_policy(api_client)
                                +
                                + +Expand source code + +
                                @pytest.fixture(scope="class")
                                +def vm_force_reset_policy(api_client):
                                +    # update vm-force-reset-policy to 10s, so we don't need to wait 300s to force remove VM
                                +    code, data = api_client.settings.get("vm-force-reset-policy")
                                +    assert 200 == code, (
                                +        f"Failed to get vm-force-reset-policy setting with error: {code}, {data}")
                                +    original_value = data.get("value", data['default'])
                                +
                                +    spec = dict(enable=True, period=10)
                                +    code, data = api_client.settings.update("vm-force-reset-policy", dict(value=json.dumps(spec)))
                                +    assert 200 == code, (
                                +        f"Failed to update vm-force-reset-policy setting with error: {code}, {data}"
                                +    )
                                +
                                +    yield spec
                                +
                                +    # teardown
                                +    updates = {"value": original_value}
                                +    api_client.settings.update("vm-force-reset-policy", updates)
                                +

                                @@ -148,7 +660,6 @@

                                Classes

                                class TestHostState
                                -
                                Expand source code @@ -227,6 +738,7 @@

                                Classes

                                _, node = api_client.hosts.get(node['id']) assert "active" == node["metadata"]["state"]["name"]
                                +

                                Class variables

                                var pytestmark
                                @@ -240,6 +752,45 @@

                                Methods

                                def test_poweroff_state(self, api_client, host_state, wait_timeout, available_node_names)
                                +
                                + +Expand source code + +
                                @pytest.mark.dependency(name="host_poweroff")
                                +def test_poweroff_state(self, api_client, host_state, wait_timeout, available_node_names):
                                +    """
                                +    Test the hosts are the nodes which make the cluster
                                +    Covers:
                                +        hosts-01-Negative test-Verify the state for Powered down node
                                +    """
                                +    assert 2 <= len(available_node_names), (
                                +        f"The cluster only have {len(available_node_names)} available node."
                                +        " It's not enough for power off test."
                                +    )
                                +    _, node = api_client.hosts.get(available_node_names[-1])
                                +
                                +    node_ip = next(val["address"] for val in node['status']['addresses']
                                +                   if val["type"] == "InternalIP")
                                +
                                +    rc, out, err = host_state.power(node['id'], node_ip, on=False)
                                +    assert rc == 0, (f"Failed to PowerOff node {node['id']} with error({rc}):\n"
                                +                     f"stdout: {out}\n\nstderr: {err}")
                                +    sleep(host_state.delay)  # Wait for the node to disappear
                                +    endtime = datetime.now() + timedelta(seconds=wait_timeout)
                                +    while endtime > datetime.now():
                                +        code, metric = api_client.hosts.get_metrics(node['id'])
                                +        if 404 == code:
                                +            break
                                +        sleep(5)
                                +    else:
                                +        raise AssertionError(
                                +            f"Node {node['id']} still available after PowerOff script executed"
                                +            f", script path: {host_state.path}"
                                +        )
                                +
                                +    _, node = api_client.hosts.get(node['id'])
                                +    assert node["metadata"]["state"]["name"] in ("in-progress", "unavailable")
                                +

                                Test the hosts are the nodes which make the cluster

                                Covers

                                hosts-01-Negative test-Verify the state for Powered down node

                                @@ -248,6 +799,46 @@

                                Covers

                                def test_poweron_state(self, api_client, host_state, wait_timeout, available_node_names)
                                +
                                + +Expand source code + +
                                @pytest.mark.dependency(name="host_poweron", depends=["host_poweroff"])
                                +def test_poweron_state(self, api_client, host_state, wait_timeout, available_node_names):
                                +    assert 2 <= len(available_node_names), (
                                +        f"The cluster only have {len(available_node_names)} available node."
                                +        " It's not enough for power on test."
                                +    )
                                +    _, node = api_client.hosts.get(available_node_names[-1])
                                +
                                +    assert node['metadata']['state']['error'], (
                                +        f"The node {available_node_names[-1]} was not poweroff.\n"
                                +        f"Node Status: {node['metadata']['status']}"
                                +    )
                                +
                                +    node_ip = next(val["address"] for val in node['status']['addresses']
                                +                   if val["type"] == "InternalIP")
                                +
                                +    rc, out, err = host_state.power(node['id'], node_ip, on=True)
                                +    assert rc == 0, (f"Failed to PowerOn node {node['id']} with error({rc}):\n"
                                +                     f"stdout: {out}\n\nstderr: {err}")
                                +    sleep(host_state.delay)  # Wait for the node to appear
                                +    endtime = datetime.now() + timedelta(seconds=wait_timeout)
                                +    while endtime > datetime.now():
                                +        _, metric = api_client.hosts.get_metrics(node['id'])
                                +        state = metric.get("metadata", {}).get("state", {})
                                +        if state and not state.get("error") and state.get('name') != 'unavailable':
                                +            break
                                +        sleep(5)
                                +    else:
                                +        raise AssertionError(
                                +            f"Node {node['id']} still unavailable after PowerOn script executed"
                                +            f", script path: {host_state.path}"
                                +        )
                                +
                                +    _, node = api_client.hosts.get(node['id'])
                                +    assert "active" == node["metadata"]["state"]["name"]
                                +
                                @@ -294,7 +885,7 @@

                                -

                                Generated by pdoc 0.11.1.

                                +

                                Generated by pdoc 0.11.5.

                                diff --git a/backend/integrations/test_4_vm_snapshot.html b/backend/integrations/test_4_vm_snapshot.html index e175ba54f..b5afe17d1 100644 --- a/backend/integrations/test_4_vm_snapshot.html +++ b/backend/integrations/test_4_vm_snapshot.html @@ -3,19 +3,30 @@ - + harvester_e2e_tests.integrations.test_4_vm_snapshot API documentation - + @@ -37,90 +48,392 @@

                                Functions

                                def create_vm(name, api_client, ssh_keypair, image, timeout_secs)
                                +
                                + +Expand source code + +
                                def create_vm(name, api_client, ssh_keypair, image, timeout_secs):
                                +    cpu = 1
                                +    mem = 2
                                +    pubkey, _ = ssh_keypair
                                +
                                +    vm_spec = api_client.vms.Spec(cpu, mem)
                                +    vm_spec.add_image("disk-0", image["id"])
                                +
                                +    userdata = yaml.safe_load(vm_spec.user_data)
                                +    userdata["ssh_authorized_keys"] = [pubkey]
                                +    vm_spec.user_data = yaml.dump(userdata)
                                +
                                +    _, data = api_client.vms.create(name, vm_spec)
                                +    deadline = datetime.now() + timedelta(seconds=timeout_secs)
                                +    while deadline > datetime.now():
                                +        _, data = api_client.vms.get(name)
                                +        if "Running" == data.get("status", {}).get("printableStatus"):
                                +            break
                                +        sleep(1)
                                +    else:
                                +        raise AssertionError(f"timed out waiting for {name} to transition to Running")
                                +
                                +    return name, image["user"]
                                +
                                def delete_vm(name, api_client, timeout_secs)
                                +
                                + +Expand source code + +
                                def delete_vm(name, api_client, timeout_secs):
                                +    code, data = api_client.vms.get(name)
                                +    if code == 404:
                                +        return
                                +
                                +    vm_spec = api_client.vms.Spec.from_dict(data)
                                +
                                +    api_client.vms.delete(name)
                                +    deadline = datetime.now() + timedelta(seconds=timeout_secs)
                                +    while deadline > datetime.now():
                                +        code, data = api_client.vms.get_status(name)
                                +        if 404 == code:
                                +            break
                                +        sleep(3)
                                +
                                +    for vol in vm_spec.volumes:
                                +        vol_name = vol["volume"]["persistentVolumeClaim"]["claimName"]
                                +        api_client.volumes.delete(vol_name)
                                +
                                def image(api_client, image_opensuse, wait_timeout)
                                +
                                + +Expand source code + +
                                @pytest.fixture(scope="module")
                                +def image(api_client, image_opensuse, wait_timeout):
                                +    unique_image_id = unique_name("image")
                                +    display_name = f"{unique_image_id}-{image_opensuse.name}"
                                +    code, data = api_client.images.create_by_url(unique_image_id,
                                +                                                 image_opensuse.url,
                                +                                                 display_name=display_name)
                                +
                                +    assert 201 == code, (code, data)
                                +
                                +    deadline = datetime.now() + timedelta(seconds=wait_timeout)
                                +    while deadline > datetime.now():
                                +        code, data = api_client.images.get(unique_image_id)
                                +        if 100 == data.get('status', {}).get('progress', 0):
                                +            break
                                +        sleep(3)
                                +    else:
                                +        raise AssertionError(
                                +            "Failed to create Image with error:\n"
                                +            f"Status({code}): {data}"
                                +        )
                                +
                                +    yield dict(id=f"{data['metadata']['namespace']}/{unique_image_id}",
                                +               user=image_opensuse.ssh_user)
                                +
                                +    code, data = api_client.images.delete(unique_image_id)
                                +
                                def restored_from_snapshot_name()
                                +
                                + +Expand source code + +
                                @pytest.fixture(scope="class")
                                +def restored_from_snapshot_name():
                                +    return unique_name("vm-from-snapshot")
                                +
                                def restored_from_snapshot_name_2()
                                +
                                + +Expand source code + +
                                @pytest.fixture(scope="class")
                                +def restored_from_snapshot_name_2():
                                +    return unique_name("vm-from-snapshot-2")
                                +
                                -def restored_from_snapshot_vm(api_client, restored_from_snapshot_name, vm_snapshot_name, source_vm, host_shell, vm_shell, ssh_keypair, wait_timeout) +def restored_from_snapshot_vm(api_client,
                                restored_from_snapshot_name,
                                vm_snapshot_name,
                                source_vm,
                                host_shell,
                                vm_shell,
                                ssh_keypair,
                                wait_timeout)
                                +
                                + +Expand source code + +
                                @pytest.fixture(scope="class")
                                +def restored_from_snapshot_vm(api_client, restored_from_snapshot_name,
                                +                              vm_snapshot_name, source_vm, host_shell,
                                +                              vm_shell, ssh_keypair, wait_timeout):
                                +    name, ssh_user = source_vm
                                +    start_vm(name, api_client, wait_timeout)
                                +
                                +    def modify(sh):
                                +        _, _ = sh.exec_command("echo 5678 > test.txt && sync")
                                +
                                +    vm_shell_do(name, api_client,
                                +                host_shell, vm_shell,
                                +                ssh_user, ssh_keypair,
                                +                modify, wait_timeout)
                                +
                                +    # Just to wait for `sync`
                                +    sleep(2)
                                +
                                +    stop_vm(name, api_client, wait_timeout)
                                +
                                +    spec = api_client.vm_snapshots.RestoreSpec.for_new(restored_from_snapshot_name)
                                +    code, data = api_client.vm_snapshots.restore(vm_snapshot_name, spec)
                                +    assert 201 == code
                                +
                                +    deadline = datetime.now() + timedelta(seconds=wait_timeout)
                                +    while deadline > datetime.now():
                                +        code, data = api_client.vms.get(restored_from_snapshot_name)
                                +        if 200 == code and "Running" == data.get("status", {}).get("printableStatus"):
                                +            break
                                +        print("waiting for restored vm to be running")
                                +        sleep(3)
                                +    else:
                                +        raise AssertionError(f"timed out waiting to restore into new VM"
                                +                             f"{restored_from_snapshot_name}")
                                +
                                +    yield restored_from_snapshot_name, ssh_user
                                +
                                +    delete_vm(restored_from_snapshot_name, api_client, wait_timeout)
                                +
                                -def restored_vm_2(api_client, restored_from_snapshot_name_2, vm_snapshot_name, source_vm, host_shell, vm_shell, ssh_keypair, wait_timeout) +def restored_vm_2(api_client,
                                restored_from_snapshot_name_2,
                                vm_snapshot_name,
                                source_vm,
                                host_shell,
                                vm_shell,
                                ssh_keypair,
                                wait_timeout)
                                +
                                + +Expand source code + +
                                @pytest.fixture(scope="class")
                                +def restored_vm_2(api_client, restored_from_snapshot_name_2,
                                +                  vm_snapshot_name, source_vm,
                                +                  host_shell, vm_shell,
                                +                  ssh_keypair, wait_timeout):
                                +    name, ssh_user = source_vm
                                +
                                +    start_vm(name, api_client, wait_timeout)
                                +
                                +    def modify(sh):
                                +        _, _ = sh.exec_command("echo 99999999 > test.txt && sync")
                                +
                                +    vm_shell_do(name, api_client,
                                +                host_shell, vm_shell,
                                +                ssh_user, ssh_keypair,
                                +                modify, wait_timeout)
                                +
                                +    # Just to wait for `sync`
                                +    sleep(2)
                                +
                                +    stop_vm(name, api_client, wait_timeout)
                                +
                                +    spec = api_client.vm_snapshots.RestoreSpec.for_new(restored_from_snapshot_name_2)
                                +    code, data = api_client.vm_snapshots.restore(vm_snapshot_name, spec)
                                +    assert 201 == code
                                +
                                +    deadline = datetime.now() + timedelta(seconds=wait_timeout)
                                +    while deadline > datetime.now():
                                +        code, data = api_client.vms.get(restored_from_snapshot_name_2)
                                +        if 200 == code and "Running" == data.get("status", {}).get("printableStatus"):
                                +            break
                                +        print("waiting for restored vm to be running")
                                +        sleep(3)
                                +    else:
                                +        raise AssertionError(f"timed out waiting to restore into new VM"
                                +                             f"{restored_from_snapshot_name_2}")
                                +
                                +    yield restored_from_snapshot_name_2, ssh_user
                                +
                                +    delete_vm(restored_from_snapshot_name_2, api_client, wait_timeout)
                                +
                                def source_vm(sourcevm_name, api_client, ssh_keypair, image, wait_timeout)
                                +
                                + +Expand source code + +
                                @pytest.fixture(scope="class")
                                +def source_vm(sourcevm_name, api_client, ssh_keypair, image, wait_timeout):
                                +    yield create_vm(sourcevm_name, api_client, ssh_keypair, image, wait_timeout)
                                +    delete_vm(sourcevm_name, api_client, wait_timeout)
                                +
                                def sourcevm_name()
                                +
                                + +Expand source code + +
                                @pytest.fixture(scope="class")
                                +def sourcevm_name():
                                +    return unique_name("source-vm")
                                +
                                def start_vm(name, api_client, timeout_secs)
                                +
                                + +Expand source code + +
                                def start_vm(name, api_client, timeout_secs):
                                +    _, data = api_client.vms.start(name)
                                +    deadline = datetime.now() + timedelta(seconds=timeout_secs)
                                +    while deadline > datetime.now():
                                +        _, data = api_client.vms.get(name)
                                +        status = data.get("status", {}).get("printableStatus")
                                +        if "Running" == status:
                                +            return
                                +        sleep(1)
                                +
                                +    raise AssertionError(f"timed out trying to start {name}")
                                +
                                def stop_vm(name, api_client, timeout_secs)
                                +
                                + +Expand source code + +
                                def stop_vm(name, api_client, timeout_secs):
                                +    _, data = api_client.vms.stop(name)
                                +    deadline = datetime.now() + timedelta(seconds=timeout_secs)
                                +    while deadline > datetime.now():
                                +        _, data = api_client.vms.get(name)
                                +        status = data.get("status", {}).get("printableStatus")
                                +        if "Stopped" == status:
                                +            return
                                +        sleep(1)
                                +
                                +    raise AssertionError(f"timed out trying to stop {name}")
                                +
                                def unique_name(name)
                                +
                                + +Expand source code + +
                                def unique_name(name):
                                +    return f"{datetime.now().strftime('%m%S%f')}-{name}"
                                +
                                def vm_shell_do(name, api_client, host_shell, vm_shell, user, ssh_keypair, action, wait_timeout)
                                +
                                + +Expand source code + +
                                def vm_shell_do(name, api_client, host_shell, vm_shell, user, ssh_keypair, action, wait_timeout):
                                +    _, privatekey = ssh_keypair
                                +
                                +    deadline = datetime.now() + timedelta(seconds=wait_timeout)
                                +    while deadline > datetime.now():
                                +        code, data = api_client.vms.get_status(name)
                                +        if 200 == code:
                                +            phase = data.get("status", {}).get("phase")
                                +            conds = data.get("status", {}).get("conditions", [{}])
                                +            if ("Running" == phase
                                +                    and "AgentConnected" == conds[-1].get("type")
                                +                    and data["status"].get("interfaces")):
                                +                break
                                +            sleep(3)
                                +
                                +        vm_ip = next(iface["ipAddress"] for iface in data["status"]["interfaces"]
                                +                     if iface["name"] == "default")
                                +
                                +        code, data = api_client.hosts.get(data["status"]["nodeName"])
                                +        host_ip = next(addr["address"] for addr in data["status"]["addresses"]
                                +                       if addr["type"] == "InternalIP")
                                +
                                +        with host_shell.login(host_ip, jumphost=True) as h:
                                +            vm_sh = vm_shell(user, pkey=privatekey)
                                +
                                +            deadline = datetime.now() + timedelta(seconds=wait_timeout)
                                +            while deadline > datetime.now():
                                +                try:
                                +                    vm_sh.connect(vm_ip, jumphost=h.client)
                                +                except ChannelException as e:
                                +                    print(e)
                                +                    sleep(3)
                                +                else:
                                +                    break
                                +            else:
                                +                raise AssertionError(f"Unable to login to {name}")
                                +
                                +            with vm_sh as sh:
                                +                action(sh)
                                +
                                def vm_snapshot_2_name()
                                +
                                + +Expand source code + +
                                @pytest.fixture(scope="class")
                                +def vm_snapshot_2_name():
                                +    return "vm-snapshot-2"
                                +
                                def vm_snapshot_name()
                                +
                                + +Expand source code + +
                                @pytest.fixture(scope="class")
                                +def vm_snapshot_name():
                                +    return "vm-snapshot"
                                +

                                @@ -132,7 +445,6 @@

                                Classes

                                class TestVMSnapshot
                                -
                                Expand source code @@ -451,6 +763,7 @@

                                Classes

                                else: raise AssertionError(f"timed out waiting for {volumesnapshotname} to be deleted")
                                +

                                Class variables

                                var pytestmark
                                @@ -464,6 +777,44 @@

                                Methods

                                def test_create_vm_snapshot_while_pvc_detached(self, api_client, vm_snapshot_2_name, source_vm, wait_timeout)
                                +
                                + +Expand source code + +
                                @pytest.mark.dependency(name="snapshot_created_from_detached_source_vm_pvc",
                                +                        depends=["detached_source_vm_pvc"])
                                +def test_create_vm_snapshot_while_pvc_detached(self, api_client,
                                +                                               vm_snapshot_2_name, source_vm, wait_timeout):
                                +    """
                                +    Test that a VM snapshot can be created when the source
                                +    PVC is detached.
                                +
                                +    Prerequisites:
                                +    The original VM (`source-vm`) exists and is stopped (so that
                                +    the PVC is detached.)
                                +    """
                                +    name, _ = source_vm
                                +
                                +    stop_vm(name, api_client, wait_timeout)
                                +
                                +    code, _ = api_client.vm_snapshots.create(name, vm_snapshot_2_name)
                                +    assert 201 == code
                                +
                                +    deadline = datetime.now() + timedelta(seconds=wait_timeout)
                                +    while deadline > datetime.now():
                                +        code, data = api_client.vm_snapshots.get(vm_snapshot_2_name)
                                +        if data.get("status", {}).get("readyToUse"):
                                +            break
                                +        print(f"waiting for {vm_snapshot_2_name} to be ready")
                                +        sleep(3)
                                +    else:
                                +        raise AssertionError(f"timed out waiting for {vm_snapshot_2_name} to be ready")
                                +
                                +    code, data = api_client.vm_snapshots.get(vm_snapshot_2_name)
                                +
                                +    assert 200 == code
                                +    assert data.get("status", {}).get("readyToUse") is True
                                +

                                Test that a VM snapshot can be created when the source PVC is detached.

                                Prerequisites: @@ -474,12 +825,99 @@

                                Methods

                                def test_replace_is_rejected_when_deletepolicy_is_retain(self, api_client, source_vm, vm_snapshot_name, wait_timeout)
                                +
                                + +Expand source code + +
                                @pytest.mark.dependency(depends=["source_vm_snapshot"])
                                +def test_replace_is_rejected_when_deletepolicy_is_retain(self, api_client,
                                +                                                         source_vm, vm_snapshot_name,
                                +                                                         wait_timeout):
                                +    name, _ = source_vm
                                +
                                +    stop_vm(name, api_client, wait_timeout)
                                +
                                +    """
                                +    Test that the Harvester API rejects a `replace`
                                +    VirtualMachineRestore where the deletePolicy is
                                +    not `retain`.
                                +
                                +    Prequisites:
                                +    1. The original VM (`source-vm`) and snapshot (`vm-snapshot`)
                                +    from the first test case.
                                +    """
                                +    spec = api_client.backups.RestoreSpec.for_existing(delete_volumes=True)
                                +    code, data = api_client.vm_snapshots.restore(vm_snapshot_name, spec)
                                +
                                +    reason = data.get("message")
                                +
                                +    wantmsg = "Delete policy with backup type snapshot"
                                +    " for replacing VM is not supported"
                                +
                                +    assert wantmsg in reason
                                +    assert 422 == code
                                +
                                -def test_replace_vm_with_vm_snapshot(self, api_client, source_vm, vm_snapshot_name, ssh_keypair, host_shell, vm_shell, wait_timeout) +def test_replace_vm_with_vm_snapshot(self,
                                api_client,
                                source_vm,
                                vm_snapshot_name,
                                ssh_keypair,
                                host_shell,
                                vm_shell,
                                wait_timeout)
                                +
                                + +Expand source code + +
                                @pytest.mark.dependency(name="replaced_source_vm", depends=["source_vm_snapshot"])
                                +def test_replace_vm_with_vm_snapshot(self, api_client,
                                +                                     source_vm, vm_snapshot_name,
                                +                                     ssh_keypair, host_shell,
                                +                                     vm_shell, wait_timeout):
                                +    """
                                +    Test that the original virtual machine can be replaced
                                +    from its original snapshot (`vm-snapshot`) and that
                                +    the snapshot's data contains the well-known file (`test.txt`)
                                +    and its expected contents (`123`).
                                +
                                +    Prerequisites:
                                +    `vm-snapshot` VM snapshot exists.
                                +    """
                                +    name, ssh_user = source_vm
                                +    start_vm(name, api_client, wait_timeout)
                                +
                                +    def modify(sh):
                                +        _, _ = sh.exec_command("rm -f test.txt && sync")
                                +
                                +    vm_shell_do(name, api_client,
                                +                host_shell, vm_shell,
                                +                ssh_user, ssh_keypair,
                                +                modify, wait_timeout)
                                +
                                +    # Just to wait for `sync`
                                +    sleep(2)
                                +
                                +    stop_vm(name, api_client, wait_timeout)
                                +
                                +    spec = api_client.backups.RestoreSpec.for_existing(delete_volumes=False)
                                +    code, data = api_client.vm_snapshots.restore(vm_snapshot_name, spec)
                                +
                                +    deadline = datetime.now() + timedelta(seconds=wait_timeout)
                                +    while deadline > datetime.now():
                                +        code, data = api_client.vms.get(name)
                                +        if 200 == code and "Running" == data.get("status", {}).get("printableStatus"):
                                +            break
                                +        print("waiting for restored vm to be running")
                                +        sleep(3)
                                +    assert "Running" == data.get("status", {}).get("printableStatus")
                                +
                                +    def actassert(sh):
                                +        out, _ = sh.exec_command("cat test.txt")
                                +        assert "123" in out
                                +
                                +    vm_shell_do(name, api_client,
                                +                host_shell, vm_shell,
                                +                ssh_user, ssh_keypair,
                                +                actassert, wait_timeout)
                                +

                                Test that the original virtual machine can be replaced from its original snapshot (vm-snapshot) and that the snapshot's data contains the well-known file (test.txt) @@ -491,6 +929,41 @@

                                Methods

                                def test_restore_from_vm_snapshot_while_pvc_detached_from_source(self, api_client, restored_vm_2, host_shell, vm_shell, ssh_keypair, wait_timeout)
                                +
                                + +Expand source code + +
                                @pytest.mark.dependency(name="detached_source_vm_pvc", depends=["replaced_source_vm"])
                                +def test_restore_from_vm_snapshot_while_pvc_detached_from_source(self,
                                +                                                                 api_client,
                                +                                                                 restored_vm_2,
                                +                                                                 host_shell,
                                +                                                                 vm_shell,
                                +                                                                 ssh_keypair,
                                +                                                                 wait_timeout):
                                +    """
                                +    Test that a new virtual machine can be created from a
                                +    VM snapshot created from a source PersistentVolumeClaim
                                +    that is now detached.
                                +
                                +    Prerequisites:
                                +    The original VM (`source-vm`) exists and is stopped (so that
                                +    the PVC is detached.)
                                +
                                +    The original snapshot (`vm-snapshot`) exists.
                                +    """
                                +
                                +    name, ssh_user = restored_vm_2
                                +
                                +    def actassert(sh):
                                +        out, _ = sh.exec_command("cat test.txt")
                                +        assert "123" in out
                                +
                                +    vm_shell_do(name, api_client,
                                +                host_shell, vm_shell,
                                +                ssh_user, ssh_keypair,
                                +                actassert, wait_timeout)
                                +

                                Test that a new virtual machine can be created from a VM snapshot created from a source PersistentVolumeClaim that is now detached.

                                @@ -500,9 +973,40 @@

                                Methods

                                The original snapshot (vm-snapshot) exists.

                                -def test_restore_into_new_vm_from_vm_snapshot(self, api_client, restored_from_snapshot_vm, ssh_keypair, host_shell, vm_shell, wait_timeout) +def test_restore_into_new_vm_from_vm_snapshot(self,
                                api_client,
                                restored_from_snapshot_vm,
                                ssh_keypair,
                                host_shell,
                                vm_shell,
                                wait_timeout)
                                +
                                + +Expand source code + +
                                @pytest.mark.dependency(depends=["source_vm_snapshot"])
                                +def test_restore_into_new_vm_from_vm_snapshot(self, api_client,
                                +                                              restored_from_snapshot_vm,
                                +                                              ssh_keypair, host_shell,
                                +                                              vm_shell, wait_timeout):
                                +    """
                                +    Test that restoring the `vm-snapshot` into a new virtual
                                +    machine results in a virtual machine with the expected
                                +    well-known file (`test.txt`) with the expected contents
                                +    (`123`).
                                +
                                +    Prerequisites:
                                +    1. The source VM from the first test case and its
                                +       snapshot (`vm-snapshot`).
                                +    """
                                +
                                +    name, ssh_user = restored_from_snapshot_vm
                                +
                                +    def actassert(sh):
                                +        out, _ = sh.exec_command("cat test.txt")
                                +        assert "123" in out
                                +
                                +    vm_shell_do(name, api_client,
                                +                host_shell, vm_shell,
                                +                ssh_user, ssh_keypair,
                                +                actassert, wait_timeout)
                                +

                                Test that restoring the vm-snapshot into a new virtual machine results in a virtual machine with the expected well-known file (test.txt) with the expected contents @@ -512,9 +1016,55 @@

                                Methods

                                snapshot (vm-snapshot).

                                -def test_vm_snapshot_create(self, api_client, source_vm, vm_snapshot_name, host_shell, vm_shell, ssh_keypair, wait_timeout) +def test_vm_snapshot_create(self,
                                api_client,
                                source_vm,
                                vm_snapshot_name,
                                host_shell,
                                vm_shell,
                                ssh_keypair,
                                wait_timeout)
                                +
                                + +Expand source code + +
                                @pytest.mark.dependency(name="source_vm_snapshot")
                                +def test_vm_snapshot_create(self, api_client,
                                +                            source_vm, vm_snapshot_name,
                                +                            host_shell, vm_shell,
                                +                            ssh_keypair, wait_timeout):
                                +    """
                                +    Test that the VM snapshot can be created.
                                +
                                +    Prerequisite:
                                +    A virtual machine has been created and is running.
                                +    """
                                +    name, ssh_user = source_vm
                                +
                                +    def action(sh):
                                +        _, _ = sh.exec_command("echo 123 > test.txt")
                                +        _, _ = sh.exec_command("sync")
                                +
                                +    vm_shell_do(name, api_client,
                                +                host_shell, vm_shell,
                                +                ssh_user, ssh_keypair,
                                +                action, wait_timeout)
                                +
                                +    # Since `sync` isn't actually synchronous, wait a couple of
                                +    # seconds to let the I/O flush to disk.
                                +    sleep(2)
                                +
                                +    code, _ = api_client.vm_snapshots.create(name, vm_snapshot_name)
                                +    assert 201 == code
                                +
                                +    deadline = datetime.now() + timedelta(seconds=wait_timeout)
                                +    while deadline > datetime.now():
                                +        code, data = api_client.vm_snapshots.get(vm_snapshot_name)
                                +        if data.get("status", {}).get("readyToUse"):
                                +            break
                                +        print(f"waiting for {vm_snapshot_name} to be ready")
                                +        sleep(3)
                                +    else:
                                +        raise AssertionError(f"timed out waiting for {vm_snapshot_name} to be ready")
                                +
                                +    assert 200 == code
                                +    assert data.get("status", {}).get("readyToUse") is True
                                +

                                Test that the VM snapshot can be created.

                                Prerequisite: A virtual machine has been created and is running.

                                @@ -523,6 +1073,47 @@

                                Methods

                                def test_vm_snapshots_are_cleaned_up_after_source_vm_deleted(self, api_client, source_vm, vm_snapshot_name, wait_timeout)
                                +
                                + +Expand source code + +
                                @pytest.mark.dependency(name="cleaned_up_after_vm_delete")
                                +def test_vm_snapshots_are_cleaned_up_after_source_vm_deleted(self, api_client,
                                +                                                             source_vm, vm_snapshot_name,
                                +                                                             wait_timeout):
                                +    """
                                +    Test that VM snapshots are removed when the VM they correspond
                                +    to have been deleted.
                                +
                                +    Prerequisites:
                                +    The original VM (`source-vm`) exists and so does its first
                                +    snapshot (`vm-snapshot`).
                                +
                                +    Assert that the snapshot exists, then delete the VM
                                +    and assert that the snapshot has been removed.
                                +    """
                                +
                                +    code, _ = api_client.vm_snapshots.get(vm_snapshot_name)
                                +    assert 200 == code
                                +
                                +    name, _ = source_vm
                                +
                                +    code, _ = api_client.vms.delete(name)
                                +    assert 200 == code
                                +
                                +    def wait_for_snapshot_to_disappear(snapshot):
                                +        deadline = datetime.now() + timedelta(seconds=wait_timeout)
                                +        while deadline > datetime.now():
                                +            code, _ = api_client.vm_snapshots.get(snapshot)
                                +            if code == 404:
                                +                return
                                +            sleep(1)
                                +        else:
                                +            AssertionError(f"timeout while waiting for {snapshot}"
                                +                           f" to be deleted after its VM was deleted")
                                +
                                +    wait_for_snapshot_to_disappear(vm_snapshot_name)
                                +

                                Test that VM snapshots are removed when the VM they correspond to have been deleted.

                                Prerequisites: @@ -535,12 +1126,81 @@

                                Methods

                                def test_volume_snapshot_not_exist(self, api_client)
                                +
                                + +Expand source code + +
                                @pytest.mark.dependency(depends=["source_vm_snapshot"])
                                +def test_volume_snapshot_not_exist(self, api_client):
                                +    ''' ref: https://github.com/harvester/tests/issues/524 '''
                                +    code, data = api_client.vol_snapshots.get()
                                +
                                +    assert 200 == code, (code, data)
                                +    assert not data['data'], (code, data)
                                +
                                def test_volume_snapshots_are_cleaned_up_after_source_volume_deleted(self, api_client, source_vm, wait_timeout)
                                +
                                + +Expand source code + +
                                @pytest.mark.dependency(depends=["snapshot_created_from_detached_source_vm_pvc",
                                +                                 "cleaned_up_after_vm_delete"])
                                +def test_volume_snapshots_are_cleaned_up_after_source_volume_deleted(self, api_client,
                                +                                                                     source_vm, wait_timeout):
                                +    """
                                +    Test that any volume snapshots that result from taking
                                +    a VM snapshot while the PVC is detached are cleaned up
                                +    after the volume is deleted.
                                +
                                +    Prerequisites:
                                +    The volume from the original VM (`source-vm`) exists
                                +    and is not attached because the original VM was replaced
                                +    and the deletePolicy was `retain`.
                                +    """
                                +
                                +    # First, assert that the expected volume exists.
                                +    name, _ = source_vm
                                +    volumename = f"{name}-disk-0"
                                +
                                +    code, _ = api_client.volumes.get(volumename)
                                +    assert 200 == code
                                +
                                +    # And assert that it has a volume snapshot associated with it.
                                +    volumesnapshotname = f"vm-snapshot-volume-{volumename}"
                                +
                                +    code, data = api_client.vol_snapshots.get(volumesnapshotname)
                                +    assert 200 == code
                                +
                                +    ownerpvc = data.get("spec", {}).get("source", {}).get("persistentVolumeClaimName")
                                +    assert volumename == ownerpvc
                                +
                                +    # Then delete the volume and wait for it to disappear.
                                +    code, _ = api_client.volumes.delete(volumename)
                                +    deadline = datetime.now() + timedelta(seconds=wait_timeout)
                                +    while deadline > datetime.now():
                                +        code, _ = api_client.volumes.get(volumename)
                                +        if code == 404:
                                +            break
                                +        sleep(1)
                                +    else:
                                +        raise AssertionError(f"timed out waiting for {volumename} to be deleted")
                                +
                                +    # Finally, wait for the volume snapshot to be cleaned up
                                +    # automatically.
                                +    code, _ = api_client.vol_snapshots.get(volumesnapshotname)
                                +    while deadline > datetime.now():
                                +        code, _ = api_client.vol_snapshots.get(volumesnapshotname)
                                +        if code == 404:
                                +            break
                                +        sleep(1)
                                +    else:
                                +        raise AssertionError(f"timed out waiting for {volumesnapshotname} to be deleted")
                                +

                                Test that any volume snapshots that result from taking a VM snapshot while the PVC is detached are cleaned up after the volume is deleted.

                                @@ -606,7 +1266,7 @@

                                -

                                Generated by pdoc 0.11.1.

                                +

                                Generated by pdoc 0.11.5.

                                diff --git a/backend/integrations/test_4_vm_template.html b/backend/integrations/test_4_vm_template.html index cacf5e247..b41893d03 100644 --- a/backend/integrations/test_4_vm_template.html +++ b/backend/integrations/test_4_vm_template.html @@ -3,19 +3,30 @@ - + harvester_e2e_tests.integrations.test_4_vm_template API documentation - + @@ -37,12 +48,84 @@

                                Functions

                                def image(api_client, image_opensuse, unique_name, wait_timeout)
                                +
                                + +Expand source code + +
                                @pytest.fixture(scope="module")
                                +def image(api_client, image_opensuse, unique_name, wait_timeout):
                                +    unique_image_id = f'image-{unique_name}'
                                +    code, data = api_client.images.create_by_url(
                                +        unique_image_id, image_opensuse.url, display_name=f"{unique_name}-{image_opensuse.name}"
                                +    )
                                +
                                +    assert 201 == code, (code, data)
                                +
                                +    endtime = datetime.now() + timedelta(seconds=wait_timeout)
                                +    while endtime > datetime.now():
                                +        code, data = api_client.images.get(unique_image_id)
                                +        if 100 == data.get('status', {}).get('progress', 0):
                                +            break
                                +        sleep(3)
                                +    else:
                                +        raise AssertionError(
                                +            "Failed to create Image with error:\n"
                                +            f"Status({code}): {data}"
                                +        )
                                +
                                +    yield dict(id=f"{data['metadata']['namespace']}/{unique_image_id}",
                                +               user=image_opensuse.ssh_user)
                                +
                                +    code, data = api_client.images.delete(unique_image_id)
                                +
                                def stopped_vm(api_client, ssh_keypair, wait_timeout, image, unique_name)
                                +
                                + +Expand source code + +
                                @pytest.fixture(scope="class")
                                +def stopped_vm(api_client, ssh_keypair, wait_timeout, image, unique_name):
                                +    unique_name = f"stopped-{datetime.now().strftime('%m%S%f')}-{unique_name}"
                                +    cpu, mem = 1, 2
                                +    pub_key, pri_key = ssh_keypair
                                +    vm_spec = api_client.vms.Spec(cpu, mem)
                                +    vm_spec.add_image("disk-0", image['id'])
                                +    vm_spec.run_strategy = "Halted"
                                +
                                +    userdata = yaml.safe_load(vm_spec.user_data)
                                +    userdata['ssh_authorized_keys'] = [pub_key]
                                +    vm_spec.user_data = yaml.dump(userdata)
                                +
                                +    code, data = api_client.vms.create(unique_name, vm_spec)
                                +    endtime = datetime.now() + timedelta(seconds=wait_timeout)
                                +    while endtime > datetime.now():
                                +        code, data = api_client.vms.get(unique_name)
                                +        if "Stopped" == data.get('status', {}).get('printableStatus'):
                                +            break
                                +        sleep(1)
                                +
                                +    yield unique_name, image['user']
                                +
                                +    code, data = api_client.vms.get(unique_name)
                                +    vm_spec = api_client.vms.Spec.from_dict(data)
                                +
                                +    api_client.vms.delete(unique_name)
                                +    endtime = datetime.now() + timedelta(seconds=wait_timeout)
                                +    while endtime > datetime.now():
                                +        code, data = api_client.vms.get_status(unique_name)
                                +        if 404 == code:
                                +            break
                                +        sleep(3)
                                +
                                +    for vol in vm_spec.volumes:
                                +        vol_name = vol['volume']['persistentVolumeClaim']['claimName']
                                +        api_client.volumes.delete(vol_name)
                                +

                                @@ -54,7 +137,6 @@

                                Classes

                                class TestVMTemplate
                                -
                                Expand source code @@ -182,12 +264,139 @@

                                Classes

                                code, data = api_client.templates.delete(unique_name) assert 200 == code, (code, data)
                                +

                                Methods

                                def test_create_template_with_data(self, api_client, vm_shell_from_host, vm_checker, ssh_keypair, wait_timeout, stopped_vm)
                                +
                                + +Expand source code + +
                                def test_create_template_with_data(
                                +    self, api_client, vm_shell_from_host, vm_checker, ssh_keypair, wait_timeout, stopped_vm
                                +):
                                +    """ ref: https://github.com/harvester/tests/issues/1194
                                +    Steps:
                                +        1. Create VM and write some data
                                +        2. Create new template and keep data from the VM
                                +        3. Create new VM from the template
                                +        4. Check data consitency
                                +    Expected result:
                                +        - VM should created and operate normally
                                +        - Template should created successfully
                                +        - New VM should able to be created and operate normally
                                +        - Data in new VM should consistent with old one
                                +    """
                                +
                                +    unique_name, ssh_user = stopped_vm
                                +    pub_key, pri_key = ssh_keypair
                                +
                                +    code, data = api_client.vms.start(unique_name)
                                +    assert 204 == code, (code, data)
                                +    vm_got_ips, (code, data) = vm_checker.wait_ip_addresses(unique_name, ["default"])
                                +    assert vm_got_ips, (
                                +        f"Failed to Start VM({unique_name}) with errors:\n"
                                +        f"Status: {data.get('status')}\n"
                                +        f"API Status({code}): {data}"
                                +    )
                                +
                                +    # Login to VM and write some data
                                +    vm_ip = next(iface['ipAddress'] for iface in data['status']['interfaces']
                                +                 if iface['name'] == 'default')
                                +    code, data = api_client.hosts.get(data['status']['nodeName'])
                                +    host_ip = next(addr['address'] for addr in data['status']['addresses']
                                +                   if addr['type'] == 'InternalIP')
                                +    with vm_shell_from_host(host_ip, vm_ip, ssh_user, pkey=pri_key) as sh:
                                +        cloud_inited, (out, err) = vm_checker.wait_cloudinit_done(sh)
                                +        assert cloud_inited, (
                                +            f"VM {unique_name} Started {vm_checker.wait_timeout} seconds"
                                +            f", but cloud-init still in {out}"
                                +        )
                                +        out, err = sh.exec_command(
                                +            "dd if=/dev/urandom of=./generate_file bs=1M count=512; sync"
                                +        )
                                +        assert not out, (out, err)
                                +        vm1_md5, err = sh.exec_command(
                                +            "md5sum ./generate_file > ./generate_file.md5; cat ./generate_file.md5; sync"
                                +        )
                                +        assert not err, (vm1_md5, err)
                                +
                                +    # generate VM template with data
                                +    code, data = api_client.vms.create_template(unique_name, unique_name, keep_data=True)
                                +    assert 204 == code, (code, data)
                                +
                                +    endtime = datetime.now() + timedelta(seconds=wait_timeout)
                                +    while endtime > datetime.now():
                                +        try:
                                +            code, data = api_client.templates.get(unique_name)
                                +            assert 200 == code, (code, data)
                                +            ns, name = data['spec']['defaultVersionId'].split('/')
                                +        except (AssertionError, ValueError):
                                +            # ValueError: version is not created yet, so `defaultVersionId` will be empty
                                +            pass
                                +        else:
                                +            code, data = api_client.templates.get_version(name, ns)
                                +            conds = data.get('status', {}).get('conditions', [])
                                +            if conds and all('True' == c['status'] for c in conds):
                                +                tmpl_spec = api_client.templates.Spec.from_dict(data)
                                +                break
                                +        sleep(5)
                                +    else:
                                +        raise AssertionError(
                                +            "Failed to create template with status:\n"
                                +            f"{data.get('status')}\n"
                                +            f"API Status({code}): {data}"
                                +        )
                                +
                                +    # Create new VM from the template
                                +    tmpl_vm_name = f"tmpl-{unique_name}"
                                +    code, data = api_client.vms.create(tmpl_vm_name, tmpl_spec)
                                +    assert 201 == code, (code, data)
                                +
                                +    vm_got_ips, (code, data) = vm_checker.wait_ip_addresses(tmpl_vm_name, ["default"])
                                +    assert vm_got_ips, (
                                +        f"Failed to Start VM({tmpl_vm_name}) with errors:\n"
                                +        f"Status: {data.get('status')}\n"
                                +        f"API Status({code}): {data}"
                                +    )
                                +
                                +    # Login to VM and check the data is consistent
                                +    vm_ip = next(iface['ipAddress'] for iface in data['status']['interfaces']
                                +                 if iface['name'] == 'default')
                                +    code, data = api_client.hosts.get(data['status']['nodeName'])
                                +    host_ip = next(addr['address'] for addr in data['status']['addresses']
                                +                   if addr['type'] == 'InternalIP')
                                +    with vm_shell_from_host(host_ip, vm_ip, ssh_user, pkey=pri_key) as sh:
                                +        cloud_inited, (out, err) = vm_checker.wait_cloudinit_done(sh)
                                +        assert cloud_inited, (
                                +            f"VM {tmpl_vm_name} Started {vm_checker.wait_timeout} seconds"
                                +            f", but cloud-init still in {out}"
                                +        )
                                +        out, err = sh.exec_command("md5sum -c ./generate_file.md5")
                                +        assert not err, (out, err)
                                +        vm2_md5, err = sh.exec_command("cat ./generate_file.md5")
                                +        assert not err, (vm2_md5, err)
                                +        assert vm1_md5 == vm2_md5
                                +
                                +    # teardown
                                +    api_client.vms.delete(tmpl_vm_name)
                                +    endtime = datetime.now() + timedelta(seconds=wait_timeout)
                                +    while endtime > datetime.now():
                                +        code, data = api_client.vms.get_status(tmpl_vm_name)
                                +        if 404 == code:
                                +            break
                                +        sleep(3)
                                +
                                +    for vol in tmpl_spec.volumes:
                                +        vol_name = vol['volume']['persistentVolumeClaim']['claimName']
                                +        api_client.volumes.delete(vol_name)
                                +
                                +    code, data = api_client.templates.delete(unique_name)
                                +    assert 200 == code, (code, data)
                                +

                                ref: https://github.com/harvester/tests/issues/1194

                                Steps

                                  @@ -239,7 +448,7 @@

                                  -

                                  Generated by pdoc 0.11.1.

                                  +

                                  Generated by pdoc 0.11.5.

                                  diff --git a/backend/integrations/test_5_vm_networks.html b/backend/integrations/test_5_vm_networks.html index 09a3c7a66..193cc61d5 100644 --- a/backend/integrations/test_5_vm_networks.html +++ b/backend/integrations/test_5_vm_networks.html @@ -3,19 +3,30 @@ - + harvester_e2e_tests.integrations.test_5_vm_networks API documentation - + @@ -37,36 +48,248 @@

                                  Functions

                                  def cluster_network(vlan_nic, api_client, unique_name)
                                  +
                                  + +Expand source code + +
                                  @pytest.fixture(scope='module')
                                  +def cluster_network(vlan_nic, api_client, unique_name):
                                  +    code, data = api_client.clusternetworks.get_config()
                                  +    assert 200 == code, (code, data)
                                  +
                                  +    node_key = 'network.harvesterhci.io/matched-nodes'
                                  +    cnet_nodes = dict()  # cluster_network: items
                                  +    for cfg in data['items']:
                                  +        if vlan_nic in cfg['spec']['uplink']['nics']:
                                  +            nodes = json.loads(cfg['metadata']['annotations'][node_key])
                                  +            cnet_nodes.setdefault(cfg['spec']['clusterNetwork'], []).extend(nodes)
                                  +
                                  +    code, data = api_client.hosts.get()
                                  +    assert 200 == code, (code, data)
                                  +    all_nodes = set(n['id'] for n in data['data'])
                                  +    try:
                                  +        # vlad_nic configured on specific cluster network, reuse it
                                  +        yield next(cnet for cnet, nodes in cnet_nodes.items() if all_nodes == set(nodes))
                                  +        return None
                                  +    except StopIteration:
                                  +        configured_nodes = reduce(add, cnet_nodes.values(), [])
                                  +        if any(n in configured_nodes for n in all_nodes):
                                  +            raise AssertionError(
                                  +                "Not all nodes' VLAN NIC {vlan_nic} are available.\n"
                                  +                f"VLAN NIC configured nodes: {configured_nodes}\n"
                                  +                f"All nodes: {all_nodes}\n"
                                  +            )
                                  +
                                  +    # Create cluster network
                                  +    cnet = f"cnet-{datetime.strptime(unique_name, '%Hh%Mm%Ss%f-%m-%d').strftime('%H%M%S')}"
                                  +    created = []
                                  +    code, data = api_client.clusternetworks.create(cnet)
                                  +    assert 201 == code, (code, data)
                                  +    while all_nodes:
                                  +        node = all_nodes.pop()
                                  +        code, data = api_client.clusternetworks.create_config(node, cnet, vlan_nic, hostname=node)
                                  +        assert 201 == code, (
                                  +            f"Failed to create cluster config for {node}\n"
                                  +            f"Created: {created}\t Remaining: {all_nodes}\n"
                                  +            f"API Status({code}): {data}"
                                  +        )
                                  +        created.append(node)
                                  +
                                  +    yield cnet
                                  +
                                  +    # Teardown
                                  +    deleted = {name: api_client.clusternetworks.delete_config(name) for name in created}
                                  +    failed = [(name, code, data) for name, (code, data) in deleted.items() if 200 != code]
                                  +    if failed:
                                  +        fmt = "Unable to delete VLAN Config {} with error ({}): {}"
                                  +        raise AssertionError(
                                  +            "\n".join(fmt.format(name, code, data) for (name, code, data) in failed)
                                  +        )
                                  +
                                  +    code, data = api_client.clusternetworks.delete(cnet)
                                  +    assert 200 == code, (code, data)
                                  +
                                  def gen_ifconfig()
                                  +
                                  + +Expand source code + +
                                  @pytest.fixture(scope="session")
                                  +def gen_ifconfig():
                                  +    # eth/eno/ens(idx) | enp(idx)s[0-9]
                                  +    pattern = r"(?:(e(?:th|no|ns))(\d+)|(enp)(\d+)(s\d+))"
                                  +
                                  +    def replace_to(idx):
                                  +        def _repl(match):
                                  +            p1, idx1, p2, idx2, tail = match.groups()
                                  +            return f"{p1}{int(idx1)+idx}" if not tail else f"{p2}{int(idx2)+idx}{tail}"
                                  +        return _repl
                                  +
                                  +    def generate_ifconfig(ifname, idx=0):
                                  +        return {
                                  +            "type": "physical",
                                  +            "name": re.sub(pattern, replace_to(idx), ifname),
                                  +            "subnets": [dict(type="dhcp")]
                                  +        }
                                  +
                                  +    return generate_ifconfig
                                  +
                                  def image(api_client, image_opensuse, unique_name, wait_timeout)
                                  +
                                  + +Expand source code + +
                                  @pytest.fixture(scope="module")
                                  +def image(api_client, image_opensuse, unique_name, wait_timeout):
                                  +    unique_image_id = f'image-{unique_name}'
                                  +    code, data = api_client.images.create_by_url(
                                  +        unique_image_id, image_opensuse.url, display_name=f"{unique_name}-{image_opensuse.name}"
                                  +    )
                                  +
                                  +    assert 201 == code, (code, data)
                                  +
                                  +    endtime = datetime.now() + timedelta(seconds=wait_timeout)
                                  +    while endtime > datetime.now():
                                  +        code, data = api_client.images.get(unique_image_id)
                                  +        if 100 == data.get('status', {}).get('progress', 0):
                                  +            break
                                  +        sleep(3)
                                  +    else:
                                  +        raise AssertionError(
                                  +            "Failed to create Image with error:\n"
                                  +            f"Status({code}): {data}"
                                  +        )
                                  +
                                  +    yield dict(id=f"{data['metadata']['namespace']}/{unique_image_id}",
                                  +               user=image_opensuse.ssh_user)
                                  +
                                  +    code, data = api_client.images.delete(unique_image_id)
                                  +
                                  def minimal_vm(api_client, ssh_keypair, wait_timeout, unique_name, vm_checker, image)
                                  +
                                  + +Expand source code + +
                                  @pytest.fixture(scope="class")
                                  +def minimal_vm(api_client, ssh_keypair, wait_timeout, unique_name, vm_checker, image):
                                  +    unique_vm_name = f"{datetime.now().strftime('%m%S%f')}-{unique_name}"
                                  +    cpu, mem = 1, 2
                                  +    pub_key, _ = ssh_keypair
                                  +    vm_spec = api_client.vms.Spec(cpu, mem)
                                  +    vm_spec.add_image("disk-0", image['id'])
                                  +
                                  +    userdata = yaml.safe_load(vm_spec.user_data)
                                  +    userdata['ssh_authorized_keys'] = [pub_key]
                                  +    vm_spec.user_data = yaml.dump(userdata)
                                  +
                                  +    code, data = api_client.vms.create(unique_vm_name, vm_spec)
                                  +    assert 201 == code, (code, data)
                                  +    vm_started, (code, data) = vm_checker.wait_interfaces(unique_vm_name)
                                  +
                                  +    yield unique_vm_name, image['user']
                                  +
                                  +    code, data = api_client.vms.get(unique_vm_name)
                                  +    vm_spec = api_client.vms.Spec.from_dict(data)
                                  +    vm_deleted, (code, data) = vm_checker.wait_deleted(unique_vm_name)
                                  +    for vol in vm_spec.volumes:
                                  +        vol_name = vol['volume']['persistentVolumeClaim']['claimName']
                                  +        api_client.volumes.delete(vol_name)
                                  +
                                  def two_mirror_vms(api_client, ssh_keypair, unique_name, vm_checker, image, vm_network)
                                  +
                                  + +Expand source code + +
                                  @pytest.fixture(scope="class")
                                  +def two_mirror_vms(api_client, ssh_keypair, unique_name, vm_checker, image, vm_network):
                                  +    cpu, mem = 1, 2
                                  +    pub_key, pri_key = ssh_keypair
                                  +    vm_spec = api_client.vms.Spec(cpu, mem)
                                  +    vm_spec.add_image("disk-0", image['id'])
                                  +    vm_spec.mgmt_network = False
                                  +    vm_spec.add_network('nic-1', f"{vm_network['namespace']}/{vm_network['name']}")
                                  +
                                  +    userdata = yaml.safe_load(vm_spec.user_data)
                                  +    userdata['ssh_authorized_keys'] = [pub_key]
                                  +    vm_spec.user_data = yaml.dump(userdata)
                                  +    vm_names = [f"vm{idx}-{unique_name}" for idx in range(1, 3)]
                                  +
                                  +    for vm_name in vm_names:
                                  +        code, data = api_client.vms.create(vm_name, vm_spec)
                                  +        assert 201 == code, (code, data)
                                  +
                                  +    yield vm_names, image['user']
                                  +
                                  +    params = dict(removedDisks="disk-0", propagationPolicy="Foreground")
                                  +    for vm_name in vm_names:
                                  +        vm_checker.wait_deleted(vm_name, params=params)
                                  +
                                  def vm_network(api_client, unique_name, wait_timeout, cluster_network, vlan_id)
                                  +
                                  + +Expand source code + +
                                  @pytest.fixture(scope="module")
                                  +def vm_network(api_client, unique_name, wait_timeout, cluster_network, vlan_id):
                                  +    code, data = api_client.networks.create(
                                  +        unique_name, vlan_id, cluster_network=cluster_network
                                  +    )
                                  +    assert 201 == code, (code, data)
                                  +    endtime = datetime.now() + timedelta(seconds=wait_timeout)
                                  +    while endtime > datetime.now():
                                  +        code, data = api_client.networks.get(unique_name)
                                  +        annotations = data['metadata'].get('annotations', {})
                                  +        if 200 == code and annotations.get('network.harvesterhci.io/route'):
                                  +            route = json.loads(annotations['network.harvesterhci.io/route'])
                                  +            if route['cidr']:
                                  +                break
                                  +        sleep(3)
                                  +    else:
                                  +        raise AssertionError(
                                  +            "VM network created but route info not available\n"
                                  +            f"API Status({code}): {data}"
                                  +        )
                                  +
                                  +    yield dict(name=unique_name, cidr=route['cidr'], namespace=data['metadata']['namespace'])
                                  +
                                  +    code, data = api_client.networks.delete(unique_name)
                                  +    endtime = datetime.now() + timedelta(seconds=wait_timeout)
                                  +    while endtime > datetime.now():
                                  +        code, data = api_client.networks.get(unique_name)
                                  +        if 404 == code:
                                  +            break
                                  +        sleep(3)
                                  +    else:
                                  +        raise AssertionError(
                                  +            f"Failed to remote VM network {unique_name} after {wait_timeout}s\n"
                                  +            f"API Status({code}): {data}"
                                  +        )
                                  +

                                @@ -78,7 +301,6 @@

                                Classes

                                class TestVMNetwork
                                -
                                Expand source code @@ -228,6 +450,7 @@

                                Classes

                                f"Unable to login to VM via VLAN IP {src_ip}" ) from ex
                                +

                                Class variables

                                var pytestmark
                                @@ -238,21 +461,174 @@

                                Class variables

                                Methods

                                -def test_add_vlan(self, api_client, ssh_keypair, vm_mgmt_static, vm_checker, vm_shell_from_host, vm_network, minimal_vm, gen_ifconfig) +def test_add_vlan(self,
                                api_client,
                                ssh_keypair,
                                vm_mgmt_static,
                                vm_checker,
                                vm_shell_from_host,
                                vm_network,
                                minimal_vm,
                                gen_ifconfig)
                                +
                                + +Expand source code + +
                                @pytest.mark.dependency(name="add_vlan")
                                +def test_add_vlan(
                                +    self, api_client, ssh_keypair, vm_mgmt_static, vm_checker, vm_shell_from_host, vm_network,
                                +    minimal_vm, gen_ifconfig
                                +):
                                +    # clean cloud-init for rerun, and get the correct ifname
                                +    (unique_vm_name, ssh_user), (_, pri_key) = minimal_vm, ssh_keypair
                                +    vm_got_ips, (code, data) = vm_checker.wait_ip_addresses(unique_vm_name, ['default'])
                                +    assert vm_got_ips, (
                                +        f"Failed to Start VM({unique_vm_name}) with errors:\n"
                                +        f"Status: {data.get('status')}\n"
                                +        f"API Status({code}): {data}"
                                +    )
                                +    vm_ip = next(iface['ipAddress'] for iface in data['status']['interfaces']
                                +                 if iface['name'] == 'default')
                                +    code, data = api_client.hosts.get(data['status']['nodeName'])
                                +    host_ip = next(addr['address'] for addr in data['status']['addresses']
                                +                   if addr['type'] == 'InternalIP')
                                +    with vm_shell_from_host(host_ip, vm_ip, ssh_user, pkey=pri_key) as sh:
                                +        cloud_inited, (out, err) = vm_checker.wait_cloudinit_done(sh)
                                +        assert cloud_inited, (out, err)
                                +        out, err = sh.exec_command("sudo cloud-init clean")
                                +        out, err = sh.exec_command("sudo cloud-init status")
                                +        assert "not run" in out, (out, err)
                                +        out, err = sh.exec_command("ip --json a s")
                                +        assert not err
                                +    ifname = next(i['ifname'] for i in json.loads(out) if i['link_type'] != 'loopback')
                                +    # https://cloudinit.readthedocs.io/en/22.4.2/topics/network-config-format-v1.html#subnet-ip
                                +    # and https://harvesterhci.io/kb/multiple-nics-vm-connectivity/#cloud-init-config
                                +    nic_config = [gen_ifconfig(ifname, idx=i) for i in range(2)]
                                +    nic_config[0]['subnets'] = [vm_mgmt_static]
                                +
                                +    # add vlan NIC and network data then restart VM
                                +    code, data = api_client.vms.get(unique_vm_name)
                                +    vm_spec = api_client.vms.Spec.from_dict(data)
                                +    vm_spec.add_network('nic-1', f"{vm_network['namespace']}/{vm_network['name']}")
                                +    vm_spec.network_data = "#cloud-config\n" + yaml.dump({
                                +        "version": 1,
                                +        "config": nic_config
                                +    })
                                +    code, data = api_client.vms.update(unique_vm_name, vm_spec)
                                +    assert 200 == code, (code, data)
                                +    vm_restarted, ctx = vm_checker.wait_restarted(unique_vm_name)
                                +    assert vm_restarted, (
                                +        f"Failed to Restart VM({unique_vm_name}),"
                                +        f" timed out while executing {ctx.callee!r}"
                                +    )
                                +    vm_got_ips, (code, data) = vm_checker.wait_ip_addresses(unique_vm_name, ['default'])
                                +    assert vm_got_ips, (
                                +        f"Failed to Start VM({unique_vm_name}) with errors:\n"
                                +        f"Status: {data.get('status')}\n"
                                +        f"API Status({code}): {data}"
                                +    )
                                +    vm_ip = next(iface['ipAddress'] for iface in data['status']['interfaces']
                                +                 if iface['name'] == 'default')
                                +    code, data = api_client.hosts.get(data['status']['nodeName'])
                                +    host_ip = next(addr['address'] for addr in data['status']['addresses']
                                +                   if addr['type'] == 'InternalIP')
                                +    with vm_shell_from_host(host_ip, vm_ip, ssh_user, pkey=pri_key) as sh:
                                +        cloud_inited, (out, err) = vm_checker.wait_cloudinit_done(sh)
                                +        assert cloud_inited and not err, (out, err)
                                +        out, err = sh.exec_command("ip --json -4 a s")
                                +    ips = [j['local'] for i in json.loads(out) for j in i['addr_info']]
                                +    vlan_ip_range = ip_network(vm_network['cidr'])
                                +
                                +    def get_vlan_ip(ctx):
                                +        if ctx.callee == 'vm.get_status':
                                +            return all(iface.get('ipAddress') for iface in ctx.data['status']['interfaces']
                                +                       if iface['name'] != 'default')
                                +        return True
                                +    # ???: status data from API will have delay a bit
                                +    vm_got_ips, (code, data) = vm_checker.wait_interfaces(unique_vm_name, callback=get_vlan_ip)
                                +    assert vm_got_ips, (code, data)
                                +    vm_vlan_ip = next(iface['ipAddress'] for iface in data['status']['interfaces']
                                +                      if iface['name'] != 'default')
                                +    assert ip_address(vm_vlan_ip) in vlan_ip_range and vm_vlan_ip in ips
                                +
                                def test_ssh_connection(self, api_client, ssh_keypair, vm_checker, vm_network, minimal_vm)
                                +
                                + +Expand source code + +
                                @pytest.mark.dependency(depends=["add_vlan"])
                                +def test_ssh_connection(
                                +    self, api_client, ssh_keypair, vm_checker, vm_network, minimal_vm
                                +):
                                +    (unique_vm_name, ssh_user), (_, pri_key) = minimal_vm, ssh_keypair
                                +    vm_started, (code, data) = vm_checker.wait_interfaces(unique_vm_name)
                                +    assert vm_started, (
                                +        f"Failed to Start VM({unique_vm_name}) with errors:\n"
                                +        f"Status: {data.get('status')}\n"
                                +        f"API Status({code}): {data}"
                                +    )
                                +    vm_ip = next(iface['ipAddress'] for iface in data['status']['interfaces']
                                +                 if iface['name'] != 'default')
                                +    try:
                                +        with vm_checker.wait_ssh_connected(vm_ip, ssh_user, pkey=pri_key) as sh:
                                +            out, err = sh.exec_command("ip -brief a s")
                                +            assert vm_ip in out and not err
                                +    except AssertionError as ex:
                                +        raise ex
                                +    except Exception as ex:
                                +        raise AssertionError(
                                +            f"Unable to login to VM via VLAN IP {vm_ip}"
                                +        ) from ex
                                +
                                def test_vms_on_same_vlan(self, api_client, ssh_keypair, vm_checker, vm_network, two_mirror_vms)
                                +
                                + +Expand source code + +
                                def test_vms_on_same_vlan(
                                +    self, api_client, ssh_keypair, vm_checker, vm_network, two_mirror_vms
                                +):
                                +    _, pri_key = ssh_keypair
                                +    vm_names, ssh_user = two_mirror_vms
                                +
                                +    def get_vlan_ip(ctx):
                                +        if ctx.callee == 'vm.get_status':
                                +            return all(iface.get('ipAddress') for iface in ctx.data['status']['interfaces']
                                +                       if iface['name'] != 'default')
                                +        return True
                                +    # Verify VM having IP which belongs to VLAN
                                +    vm_info, vlan_ip_range = [], ip_network(vm_network['cidr'])
                                +    for vm_name in vm_names:
                                +        vm_got_ips, (code, data) = vm_checker.wait_interfaces(vm_name, callback=get_vlan_ip)
                                +        assert vm_got_ips, (
                                +            f"Failed to Start VM({vm_name}) with errors:\n"
                                +            f"Status: {data.get('status')}\n"
                                +            f"API Status({code}): {data}"
                                +        )
                                +        vm_ip = next(iface['ipAddress'] for iface in data['status']['interfaces']
                                +                     if iface['name'] != 'default')
                                +        assert ip_address(vm_ip) in vlan_ip_range
                                +        vm_info.append((vm_name, vm_ip))
                                +
                                +    # verify Ping from each
                                +    for (src_name, src_ip), (dst_name, dst_ip) in zip(vm_info, vm_info[::-1]):
                                +        try:
                                +            with vm_checker.wait_ssh_connected(src_ip, ssh_user, pkey=pri_key) as sh:
                                +                out, err = sh.exec_command(f"ping -c5 {dst_ip}")
                                +                assert '100% packet loss' not in out, (
                                +                    f"Failed to ping VM({dst_name!r}, {dst_ip}) <- VM({src_name!r}, {src_ip})"
                                +                )
                                +        except AssertionError as ex:
                                +            raise ex
                                +        except Exception as ex:
                                +            raise AssertionError(
                                +                f"Unable to login to VM via VLAN IP {src_ip}"
                                +            ) from ex
                                +
                                @@ -297,7 +673,7 @@

                                -

                                Generated by pdoc 0.11.1.

                                +

                                Generated by pdoc 0.11.5.

                                diff --git a/backend/integrations/test_5_vm_networks_interact.html b/backend/integrations/test_5_vm_networks_interact.html index 33dfd6473..44973c8ae 100644 --- a/backend/integrations/test_5_vm_networks_interact.html +++ b/backend/integrations/test_5_vm_networks_interact.html @@ -3,19 +3,30 @@ - + harvester_e2e_tests.integrations.test_5_vm_networks_interact API documentation - + @@ -37,36 +48,217 @@

                                Functions

                                def check_vm_ip_exists(api_client, vm_name, wait_timeout)
                                +
                                + +Expand source code + +
                                def check_vm_ip_exists(api_client, vm_name, wait_timeout):
                                +    endtime = datetime.now() + timedelta(seconds=wait_timeout)
                                +
                                +    while endtime > datetime.now():
                                +        code, data = api_client.vms.get_status(vm_name)
                                +        assert 200 == code, (f"Failed to get specific vm content: {code}, {data}")
                                +        if 'ipAddress' in data['status']['interfaces'][0]:
                                +            break
                                +        sleep(5)
                                +    else:
                                +        raise AssertionError(
                                +            f"Failed to get VM {vm_name} IP address, exceed the given timed out\n"
                                +            f"Still got {code} with {data}"
                                +        )
                                +
                                def check_vm_running(api_client, vm_name, wait_timeout)
                                +
                                + +Expand source code + +
                                def check_vm_running(api_client, vm_name, wait_timeout):
                                +    endtime = datetime.now() + timedelta(seconds=wait_timeout)
                                +
                                +    while endtime > datetime.now():
                                +        code, data = api_client.vms.get(vm_name)
                                +        vm_fields = data['metadata']['fields']
                                +
                                +        assert 200 == code, (code, data)
                                +        if vm_fields[2] == 'Running':
                                +            break
                                +        sleep(5)
                                +    else:
                                +        raise AssertionError(
                                +            f"Failed to create VM {vm_name} in Running status, exceed given timeout\n"
                                +            f"Still got {code} with {data}"
                                +        )
                                +
                                def client()
                                +
                                + +Expand source code + +
                                @pytest.fixture(scope="session")
                                +def client():
                                +    client = paramiko.client.SSHClient()
                                +    client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
                                +    yield client
                                +    client.close()
                                +
                                def cluster_network(vlan_nic, api_client, unique_name)
                                +
                                + +Expand source code + +
                                @pytest.fixture(scope='module')
                                +def cluster_network(vlan_nic, api_client, unique_name):
                                +    code, data = api_client.clusternetworks.get_config()
                                +    assert 200 == code, (code, data)
                                +
                                +    node_key = 'network.harvesterhci.io/matched-nodes'
                                +    cnet_nodes = dict()  # cluster_network: items
                                +    for cfg in data['items']:
                                +        if vlan_nic in cfg['spec']['uplink']['nics']:
                                +            nodes = json.loads(cfg['metadata']['annotations'][node_key])
                                +            cnet_nodes.setdefault(cfg['spec']['clusterNetwork'], []).extend(nodes)
                                +
                                +    code, data = api_client.hosts.get()
                                +    assert 200 == code, (code, data)
                                +    all_nodes = set(n['id'] for n in data['data'])
                                +    try:
                                +        # vlad_nic configured on specific cluster network, reuse it
                                +        yield next(cnet for cnet, nodes in cnet_nodes.items() if all_nodes == set(nodes))
                                +        return None
                                +    except StopIteration:
                                +        configured_nodes = reduce(add, cnet_nodes.values(), [])
                                +        if any(n in configured_nodes for n in all_nodes):
                                +            raise AssertionError(
                                +                "Not all nodes' VLAN NIC {vlan_nic} are available.\n"
                                +                f"VLAN NIC configured nodes: {configured_nodes}\n"
                                +                f"All nodes: {all_nodes}\n"
                                +            )
                                +
                                +    # Create cluster network
                                +    cnet = f"cnet-{datetime.strptime(unique_name, '%Hh%Mm%Ss%f-%m-%d').strftime('%H%M%S')}"
                                +    created = []
                                +    code, data = api_client.clusternetworks.create(cnet)
                                +    assert 201 == code, (code, data)
                                +    while all_nodes:
                                +        node = all_nodes.pop()
                                +        code, data = api_client.clusternetworks.create_config(node, cnet, vlan_nic, hostname=node)
                                +        assert 201 == code, (
                                +            f"Failed to create cluster config for {node}\n"
                                +            f"Created: {created}\t Remaining: {all_nodes}\n"
                                +            f"API Status({code}): {data}"
                                +        )
                                +        created.append(node)
                                +
                                +    yield cnet
                                +
                                +    # Teardown
                                +    deleted = {name: api_client.clusternetworks.delete_config(name) for name in created}
                                +    failed = [(name, code, data) for name, (code, data) in deleted.items() if 200 != code]
                                +    if failed:
                                +        fmt = "Unable to delete VLAN Config {} with error ({}): {}"
                                +        raise AssertionError(
                                +            "\n".join(fmt.format(name, code, data) for (name, code, data) in failed)
                                +        )
                                +
                                +    code, data = api_client.clusternetworks.delete(cnet)
                                +    assert 200 == code, (code, data)
                                +
                                def create_image_url(api_client, display_name, image_url, wait_timeout)
                                +
                                + +Expand source code + +
                                def create_image_url(api_client, display_name, image_url, wait_timeout):
                                +    code, data = api_client.images.create_by_url(display_name, image_url)
                                +
                                +    assert 201 == code, (code, data)
                                +    image_spec = data.get('spec')
                                +
                                +    assert display_name == image_spec.get('displayName')
                                +    assert "download" == image_spec.get('sourceType')
                                +
                                +    endtime = datetime.now() + timedelta(seconds=wait_timeout)
                                +
                                +    while endtime > datetime.now():
                                +        code, data = api_client.images.get(display_name)
                                +        image_status = data.get('status', {})
                                +
                                +        assert 200 == code, (code, data)
                                +        if image_status.get('progress') == 100:
                                +            break
                                +        sleep(5)
                                +    else:
                                +        raise AssertionError(
                                +            f"Failed to download image {display_name} with {wait_timeout} timed out\n"
                                +            f"Still got {code} with {data}"
                                +        )
                                +
                                def vm_network(api_client, unique_name, wait_timeout, cluster_network, vlan_id)
                                +
                                + +Expand source code + +
                                @pytest.fixture(scope="module")
                                +def vm_network(api_client, unique_name, wait_timeout, cluster_network, vlan_id):
                                +    code, data = api_client.networks.create(
                                +        unique_name, vlan_id, cluster_network=cluster_network
                                +    )
                                +    assert 201 == code, (code, data)
                                +    endtime = datetime.now() + timedelta(seconds=wait_timeout)
                                +    while endtime > datetime.now():
                                +        code, data = api_client.networks.get(unique_name)
                                +        annotations = data['metadata'].get('annotations', {})
                                +        if 200 == code and annotations.get('network.harvesterhci.io/route'):
                                +            route = json.loads(annotations['network.harvesterhci.io/route'])
                                +            if route['cidr']:
                                +                break
                                +        sleep(3)
                                +    else:
                                +        raise AssertionError(
                                +            "VM network created but route info not available\n"
                                +            f"API Status({code}): {data}"
                                +        )
                                +
                                +    yield dict(name=unique_name, cidr=route['cidr'], namespace=data['metadata']['namespace'])
                                +
                                +    code, data = api_client.networks.delete(unique_name)
                                +    endtime = datetime.now() + timedelta(seconds=wait_timeout)
                                +    while endtime > datetime.now():
                                +        code, data = api_client.networks.get(unique_name)
                                +        if 404 == code:
                                +            break
                                +        sleep(3)
                                +    else:
                                +        raise AssertionError(
                                +            f"Failed to remote VM network {unique_name} after {wait_timeout}s\n"
                                +            f"API Status({code}): {data}"
                                +        )
                                +

                                @@ -78,7 +270,6 @@

                                Classes

                                class TestBackendNetwork
                                -
                                Expand source code @@ -853,6 +1044,7 @@

                                Classes

                                shlex.quote(part) for part in split_command)) return _stdout, _stderr
                                +

                                Class variables

                                var pytestmark
                                @@ -863,21 +1055,209 @@

                                Class variables

                                Methods

                                -def ssh_client(self, client, dest_ip, username, password, command, timeout, allow_agent=False, look_for_keys=False) +def ssh_client(self,
                                client,
                                dest_ip,
                                username,
                                password,
                                command,
                                timeout,
                                allow_agent=False,
                                look_for_keys=False)
                                +
                                + +Expand source code + +
                                def ssh_client(self, client, dest_ip, username, password, command, timeout,
                                +               allow_agent=False, look_for_keys=False):
                                +    client.connect(dest_ip, username=username, password=password,
                                +                   allow_agent=allow_agent, look_for_keys=look_for_keys,
                                +                   timeout=timeout)
                                +
                                +    split_command = shlex.split(command)
                                +    _stdin, _stdout, _stderr = client.exec_command(' '.join(
                                +        shlex.quote(part) for part in split_command), get_pty=True)
                                +    return _stdout, _stderr
                                +
                                -def ssh_jumpstart(self, client, dest_ip, client_ip, client_user, client_password, dest_user, dest_password, command, allow_agent=False, look_for_keys=False) +def ssh_jumpstart(self,
                                client,
                                dest_ip,
                                client_ip,
                                client_user,
                                client_password,
                                dest_user,
                                dest_password,
                                command,
                                allow_agent=False,
                                look_for_keys=False)
                                +
                                + +Expand source code + +
                                def ssh_jumpstart(self, client, dest_ip, client_ip, client_user, client_password,
                                +                  dest_user, dest_password, command, allow_agent=False, look_for_keys=False):
                                +    client.connect(client_ip, username=client_user, password=client_password,
                                +                   allow_agent=allow_agent, look_for_keys=look_for_keys)
                                +
                                +    client_transport = client.get_transport()
                                +    dest_addr = (dest_ip, 22)
                                +    client_addr = (client_ip, 22)
                                +    client_channel = client_transport.open_channel("direct-tcpip", dest_addr, client_addr)
                                +
                                +    jumpstart = paramiko.SSHClient()
                                +    jumpstart.set_missing_host_key_policy(paramiko.AutoAddPolicy())
                                +    jumpstart.connect(dest_ip, username=dest_user, password=dest_password, sock=client_channel)
                                +
                                +    split_command = shlex.split(command)
                                +    _stdin, _stdout, _stderr = jumpstart.exec_command(' '.join(
                                +        shlex.quote(part) for part in split_command))
                                +    return _stdout, _stderr
                                +
                                -def test_delete_vlan_from_multiple(self, api_client, request, client, unique_name, image_opensuse, vm_network, wait_timeout, host_shell, vm_checker) +def test_delete_vlan_from_multiple(self,
                                api_client,
                                request,
                                client,
                                unique_name,
                                image_opensuse,
                                vm_network,
                                wait_timeout,
                                host_shell,
                                vm_checker)
                                +
                                + +Expand source code + +
                                @pytest.mark.p0
                                +@pytest.mark.networks
                                +def test_delete_vlan_from_multiple(
                                +    self, api_client, request, client, unique_name, image_opensuse, vm_network, wait_timeout,
                                +    host_shell, vm_checker
                                +):
                                +    """
                                +    Manual test plan reference:
                                +    https://harvester.github.io/tests/manual/network/delete-vlan-network-form/
                                +
                                +    Steps:
                                +    1. Create an external VLAN network
                                +    2. Make sure that the network is set to the management network with masquerade as the type
                                +    3. Add another external VLAN management
                                +    4. Create VM
                                +    5. Wait until the VM boot in running state
                                +    6. Delete the external VLAN from VM
                                +    7. Check can ping the VM on the management network
                                +    8. Check can't SSH to VM with management network from external host
                                +    """
                                +
                                +    vip = request.config.getoption('--endpoint').strip('https://')
                                +
                                +    # Check image exists
                                +    code, data = api_client.images.get(image_opensuse.name)
                                +
                                +    if code == 404:
                                +        create_image_url(api_client, image_opensuse.name, image_opensuse.url, wait_timeout)
                                +
                                +    spec = api_client.vms.Spec(1, 2)
                                +    spec.user_data += cloud_user_data.format(password=vm_credential["password"])
                                +
                                +    # Add network data to trigger DHCP on multiple NICs
                                +    spec.network_data += cloud_network_data
                                +
                                +    vm_name = unique_name + "-delete-vlan"
                                +
                                +    # Add image
                                +    spec.add_image(image_opensuse.name, "default/" + image_opensuse.name)
                                +
                                +    # Add external vlan network
                                +    spec.add_network("nic-1", f"{vm_network['namespace']}/{vm_network['name']}")
                                +
                                +    # Create VM
                                +    code, data = api_client.vms.create(vm_name, spec)
                                +    assert 201 == code, (f"Failed to create vm with error: {code}, {data}")
                                +
                                +    # Check VM start in running state
                                +    check_vm_running(api_client, vm_name, wait_timeout)
                                +
                                +    # Check have 2 NICs and wait until all ip address exists
                                +    endtime = datetime.now() + timedelta(seconds=wait_timeout)
                                +
                                +    while endtime > datetime.now():
                                +        code, data = api_client.vms.get_status(vm_name)
                                +        assert 200 == code, (f"Failed to get specific vm content: {code}, {data}")
                                +        if len(data['status']['interfaces']) == 2:
                                +            if 'ipAddress' in data['status']['interfaces'][0]:
                                +                if 'ipAddress' in data['status']['interfaces'][1]:
                                +                    break
                                +            sleep(5)
                                +
                                +    else:
                                +        raise AssertionError(
                                +            f"Failed to get multiple IPs on VM: {vm_name}, exceed the given timed out\n"
                                +            f"Still got {code} with {data}"
                                +        )
                                +
                                +    # get data from running VM and transfer to spec
                                +    code, data = api_client.vms.get(vm_name)
                                +    assert 200 == code, (f"Failed to get specific vm content: {code}, {data}")
                                +    spec = spec.from_dict(data)
                                +
                                +    spec.networks = []
                                +    spec.mgmt_network = True
                                +
                                +    code, data = api_client.vms.update(vm_name, spec)
                                +    assert 200 == code, (f"Failed to update specific vm with spec: {code}, {data}")
                                +
                                +    code, data = api_client.vms.restart(vm_name)
                                +    assert 204 == code, (f"Failed to restart specific vm: {code}, {data}")
                                +
                                +    # Check VM start in running state
                                +    check_vm_running(api_client, vm_name, wait_timeout)
                                +
                                +    # Check until VM ip address exists
                                +    check_vm_ip_exists(api_client, vm_name, wait_timeout)
                                +
                                +    endtime = datetime.now() + timedelta(seconds=wait_timeout)
                                +    ip_addresses = []
                                +
                                +    while endtime > datetime.now():
                                +        code, data = api_client.vms.get_status(vm_name)
                                +        assert 200 == code, (f"Failed to get specific vm status: {code}, {data}")
                                +        if 'interfaces' in data['status']:
                                +            interfaces_data = data['status']['interfaces']
                                +            ip_addresses = []
                                +
                                +            interfaces = data['status']['interfaces']
                                +
                                +            if len(interfaces) == 1 and 'ipAddress' in interfaces[0]:
                                +                ip_addresses.append(interfaces_data[0]['ipAddress'])
                                +
                                +                if 'default' in interfaces_data[0]['name']:
                                +                    break
                                +        sleep(5)
                                +    else:
                                +        raise AssertionError(
                                +            f"Failed to get VM {vm_name} IP address, exceed the given timed out\n"
                                +            f"Still got {code} with {data}"
                                +        )
                                +
                                +    # Ping management ip address
                                +    mgmt_ip = ip_addresses[0]
                                +
                                +    ping_command = "ping -c 50 {0}".format(mgmt_ip)
                                +
                                +    with host_shell.login(vip) as sh:
                                +        stdout, stderr = sh.exec_command(ping_command)
                                +
                                +    assert stdout.find(f"64 bytes from {mgmt_ip}") > 0, (
                                +        f"Failed to ping VM management IP {mgmt_ip} "
                                +        f"on management interface from Harvester node: {code}, {data}")
                                +
                                +    # #Check should not SSH to management ip address from external host
                                +    command = ['/usr/bin/ssh', '-o', 'ConnectTimeout=5', mgmt_ip]
                                +
                                +    with pytest.raises(subprocess.CalledProcessError) as ex:
                                +        subprocess.check_output(command, stderr=subprocess.STDOUT,
                                +                                shell=False, encoding="utf-8")
                                +
                                +    # OpenSSH returns the return code of the program that was executed on
                                +    # the remote, unless there was an error for SSH itself, in which case
                                +    # it returns 255
                                +    assert ex.value.returncode == 255, ("Failed: should not be able to SSH"
                                +                                        " to VM on management interface"
                                +                                        f" {mgmt_ip} from external host")
                                +
                                +    # teardown
                                +    code, data = api_client.vms.get(vm_name)
                                +    vm_spec = api_client.vms.Spec.from_dict(data)
                                +    vm_checker.wait_deleted(vm_name)
                                +    for vol in vm_spec.volumes:
                                +        vol_name = vol['volume']['persistentVolumeClaim']['claimName']
                                +        api_client.volumes.delete(vol_name)
                                +

                                Manual test plan reference: https://harvester.github.io/tests/manual/network/delete-vlan-network-form/

                                Steps: @@ -891,9 +1271,110 @@

                                Methods

                                8. Check can't SSH to VM with management network from external host

                                -def test_mgmt_network_connection(self, api_client, request, client, image_opensuse, unique_name, wait_timeout, host_shell, vm_shell_from_host, vm_checker) +def test_mgmt_network_connection(self,
                                api_client,
                                request,
                                client,
                                image_opensuse,
                                unique_name,
                                wait_timeout,
                                host_shell,
                                vm_shell_from_host,
                                vm_checker)
                                +
                                + +Expand source code + +
                                @pytest.mark.p0
                                +@pytest.mark.networks
                                +def test_mgmt_network_connection(
                                +    self, api_client, request, client, image_opensuse, unique_name, wait_timeout,
                                +    host_shell, vm_shell_from_host, vm_checker
                                +):
                                +    """
                                +    Manual test plan reference:
                                +    https://harvester.github.io/tests/manual/network/validate-network-management-network/
                                +
                                +
                                +    Steps:
                                +    1. Create a new VM
                                +    2. Make sure that the network is set to the management network with masquerade as the type
                                +    3. Wait until the VM boot in running state
                                +    4. Check can ping VM with management network from Harvester node
                                +    5. Check can SSH to VM with management network from Harvester node
                                +    6. Check can't SSH to VM with management network from external host
                                +    """
                                +    vip = request.config.getoption('--endpoint').strip('https://')
                                +    vm_user, vm_passwd = vm_credential['user'], vm_credential['password']
                                +
                                +    # Check image exists
                                +    code, data = api_client.images.get(image_opensuse.name)
                                +
                                +    if code == 404:
                                +        create_image_url(api_client, image_opensuse.name, image_opensuse.url, wait_timeout)
                                +
                                +    # Update AllowTcpForwarding for ssh jumpstart
                                +
                                +    spec = api_client.vms.Spec(1, 2)
                                +
                                +    spec.user_data += cloud_user_data.format(password=vm_passwd)
                                +
                                +    vm_name = unique_name + "-mgmt"
                                +    # Create VM
                                +    spec.add_image(image_opensuse.name, "default/" + image_opensuse.name)
                                +
                                +    code, data = api_client.vms.create(vm_name, spec)
                                +    assert 201 == code, (f"Failed to create vm with error: {code}, {data}")
                                +
                                +    # Check VM start in running state
                                +    check_vm_running(api_client, vm_name, wait_timeout)
                                +
                                +    # Check until VM ip address exists
                                +    check_vm_ip_exists(api_client, vm_name, wait_timeout)
                                +
                                +    # Get VM interface ipAddresses
                                +    code, data = api_client.vms.get_status(vm_name)
                                +    assert 200 == code, (f"Failed to get specific vm content: {code}, {data}")
                                +
                                +    interfaces_data = data['status']['interfaces']
                                +
                                +    assert 1 == len(interfaces_data), (
                                +        f"Failed: get more than one interface: {interfaces_data}"
                                +    )
                                +
                                +    mgmt_ip = interfaces_data[0]['ipAddress']
                                +
                                +    # Ping management ip address from Harvester node
                                +    with host_shell.login(vip) as sh:
                                +        stdout, stderr = sh.exec_command(f"ping -c 50 {mgmt_ip}")
                                +
                                +    assert stdout.find(f"64 bytes from {mgmt_ip}") > 0, (
                                +        f"Failed to ping VM management IP {mgmt_ip} "
                                +        f"on management interface from Harvester node")
                                +
                                +    # SSH to management ip address and execute command from Harvester node
                                +    with vm_shell_from_host(vip, mgmt_ip, vm_user, vm_passwd) as sh:
                                +        stdout, stderr = sh.exec_command("ls")
                                +
                                +    assert stdout.find("bin") == 0, (
                                +        f"Failed to ssh to VM management IP {mgmt_ip} "
                                +        f"on management interface from Harvester node")
                                +
                                +    # Check should not SSH to management ip address from external host
                                +    command = ['/usr/bin/ssh', '-o', 'ConnectTimeout=5', mgmt_ip]
                                +
                                +    with pytest.raises(subprocess.CalledProcessError) as ex:
                                +        subprocess.check_output(command, stderr=subprocess.STDOUT,
                                +                                shell=False, encoding="utf-8")
                                +
                                +    # OpenSSH returns the return code of the program that was executed on
                                +    # the remote, unless there was an error for SSH itself, in which case
                                +    # it returns 255
                                +    assert ex.value.returncode == 255, ("Failed: should not be able to SSH"
                                +                                        " to VM on management interface"
                                +                                        f" {mgmt_ip} from external host")
                                +
                                +    # teardown
                                +    code, data = api_client.vms.get(vm_name)
                                +    vm_spec = api_client.vms.Spec.from_dict(data)
                                +    vm_checker.wait_deleted(vm_name)
                                +    for vol in vm_spec.volumes:
                                +        vol_name = vol['volume']['persistentVolumeClaim']['claimName']
                                +        api_client.volumes.delete(vol_name)
                                +

                                Manual test plan reference: https://harvester.github.io/tests/manual/network/validate-network-management-network/

                                Steps: @@ -905,9 +1386,144 @@

                                Methods

                                6. Check can't SSH to VM with management network from external host

                                -def test_mgmt_to_vlan_connection(self, api_client, request, client, unique_name, image_opensuse, vm_network, wait_timeout, vm_checker) +def test_mgmt_to_vlan_connection(self,
                                api_client,
                                request,
                                client,
                                unique_name,
                                image_opensuse,
                                vm_network,
                                wait_timeout,
                                vm_checker)
                                +
                                + +Expand source code + +
                                @pytest.mark.p0
                                +@pytest.mark.networks
                                +def test_mgmt_to_vlan_connection(self, api_client, request, client, unique_name,
                                +                                 image_opensuse, vm_network, wait_timeout, vm_checker):
                                +    """
                                +    Manual test plan reference:
                                +    https://harvester.github.io/tests/manual/network/edit-network-form-change-management-to-vlan/
                                +
                                +
                                +    Steps:
                                +    1. Create an external VLAN network
                                +    2. Create a new VM
                                +    3. Make sure that the network is set to the management network with masquerade as the type
                                +    4. Wait until the VM boot in running state
                                +    5. Edit VM and change management network to external VLAN with bridge type
                                +    6. Check VM should save and reboot
                                +    7. Check can ping the VM from an external network host
                                +    8. Check can ssh to the VM from an external network host
                                +    """
                                +
                                +    # Check image exists
                                +    code, data = api_client.images.get(image_opensuse.name)
                                +
                                +    if code == 404:
                                +        create_image_url(api_client, image_opensuse.name, image_opensuse.url, wait_timeout)
                                +
                                +    spec = api_client.vms.Spec(1, 2)
                                +    spec.user_data += cloud_user_data.format(password=vm_credential["password"])
                                +    vm_name = unique_name + "-mgmt-vlan"
                                +    # Create VM
                                +    spec.add_image(image_opensuse.name, "default/" + image_opensuse.name)
                                +    code, data = api_client.vms.create(vm_name, spec)
                                +    assert 201 == code, (f"Failed to create vm with error: {code}, {data}")
                                +
                                +    # Check VM start in running state
                                +    check_vm_running(api_client, vm_name, wait_timeout)
                                +
                                +    # Check until VM ip address exists
                                +    check_vm_ip_exists(api_client, vm_name, wait_timeout)
                                +
                                +    # get data from running VM and transfer to spec
                                +    code, data = api_client.vms.get(vm_name)
                                +    assert 200 == code, (f"Failed to get specific vm content: {code}, {data}")
                                +
                                +    spec = spec.from_dict(data)
                                +
                                +    # Switch to vlan network
                                +    spec.mgmt_network = False
                                +
                                +    spec.add_network("nic-1", f"{vm_network['namespace']}/{vm_network['name']}")
                                +
                                +    # Update VM spec
                                +    code, data = api_client.vms.update(vm_name, spec)
                                +    assert 200 == code, (f"Failed to update specific vm with spec: {code}, {data}")
                                +
                                +    code, data = api_client.vms.restart(vm_name)
                                +    assert 204 == code, (f"Failed to restart specific vm: {code}, {data}")
                                +
                                +    # Check VM start in running state
                                +    check_vm_running(api_client, vm_name, wait_timeout)
                                +
                                +    # Check until VM ip address exists
                                +    check_vm_ip_exists(api_client, vm_name, wait_timeout)
                                +
                                +    # Get VM interface ipAddresses
                                +    code, data = api_client.vms.get_status(vm_name)
                                +    assert 200 == code, (f"Failed to get specific vm content: {code}, {data}")
                                +
                                +    interfaces_data = data['status']['interfaces']
                                +
                                +    assert 1 == len(interfaces_data), (
                                +        f"Failed: get more than one interface: {interfaces_data}"
                                +    )
                                +
                                +    # Determine by vlan network Name
                                +    endtime = datetime.now() + timedelta(seconds=wait_timeout)
                                +    ip_addresses = []
                                +
                                +    while endtime > datetime.now():
                                +        code, data = api_client.vms.get_status(vm_name)
                                +        assert 200 == code, (f"Failed to get specific vm content: {code}, {data}")
                                +        if 'interfaces' in data['status']:
                                +            interfaces_data = data['status']['interfaces']
                                +            ip_addresses = []
                                +            for interface in interfaces_data:
                                +                # Check the ipAddress in digital format
                                +                if (
                                +                    'ipAddress' in interface and
                                +                    interface['ipAddress'].replace('.', '').isdigit()
                                +                ):
                                +                    ip_addresses.append(interface['ipAddress'])
                                +
                                +            if len(ip_addresses) > 0:
                                +                if 'nic-1' in interface['name']:
                                +                    break
                                +        sleep(5)
                                +    else:
                                +        raise AssertionError(
                                +            f"Failed to get VM {vm_name} IP address, exceed given timeout\n"
                                +            f"Still got {code} with {data}"
                                +        )
                                +
                                +    # Ping vlan ip address
                                +    vlan_ip = ip_addresses[0]
                                +
                                +    command = ['/usr/bin/ping', '-c', '50', vlan_ip]
                                +
                                +    result = subprocess.check_output(command, shell=False, encoding="utf-8")
                                +
                                +    assert result.find(f"64 bytes from {vlan_ip}") > 0, (
                                +        f"Failed to ping VM external vlan IP {vlan_ip} "
                                +        f"on vlan interface from external host")
                                +
                                +    # SSH to vlan ip address and execute command
                                +    _stdout, _stderr = self.ssh_client(
                                +        client, vlan_ip, vm_credential["user"], vm_credential["password"], 'ls', wait_timeout)
                                +
                                +    stdout = _stdout.read().decode('ascii').strip("\n")
                                +
                                +    assert stdout.find("bin") == 0, (
                                +        f"Failed to ssh to VM external vlan IP {vlan_ip}"
                                +        f"on vlan interface from external host")
                                +
                                +    # teardown
                                +    code, data = api_client.vms.get(vm_name)
                                +    vm_spec = api_client.vms.Spec.from_dict(data)
                                +    vm_checker.wait_deleted(vm_name)
                                +    for vol in vm_spec.volumes:
                                +        vol_name = vol['volume']['persistentVolumeClaim']['claimName']
                                +        api_client.volumes.delete(vol_name)
                                +

                                Manual test plan reference: https://harvester.github.io/tests/manual/network/edit-network-form-change-management-to-vlan/

                                Steps: @@ -921,9 +1537,160 @@

                                Methods

                                8. Check can ssh to the VM from an external network host

                                -def test_reboot_vlan_connection(self, api_client, request, unique_name, image_opensuse, vm_network, wait_timeout, vm_checker) +def test_reboot_vlan_connection(self,
                                api_client,
                                request,
                                unique_name,
                                image_opensuse,
                                vm_network,
                                wait_timeout,
                                vm_checker)
                                +
                                + +Expand source code + +
                                @pytest.mark.p0
                                +@pytest.mark.networks
                                +@pytest.mark.dependency(name="reboot_vlan_connection",
                                +                        depends=["vlan_network_connection"])
                                +def test_reboot_vlan_connection(self, api_client, request, unique_name,
                                +                                image_opensuse, vm_network, wait_timeout, vm_checker):
                                +    """
                                +    Manual test plan reference:
                                +    https://harvester.github.io/tests/manual/network/negative-vlan-after-reboot/
                                +
                                +
                                +    Steps:
                                +    1. Create an external VLAN network
                                +    2. Create a new VM and add the external vlan network
                                +    3. Check can ping external VLAN IP
                                +    4. Reboot VM
                                +    5. Ping VM during reboot
                                +    6. Check can't ping VM during reboot
                                +    7. Check the VM should reboot
                                +    8. Ping VM after reboot
                                +    9. Check can ping VM
                                +    """
                                +    vm_name = unique_name + "-reboot-vlan"
                                +
                                +    # Check image exists
                                +    code, data = api_client.images.get(image_opensuse.name)
                                +
                                +    if code == 404:
                                +        create_image_url(api_client, image_opensuse.name, image_opensuse.url, wait_timeout)
                                +
                                +    spec = api_client.vms.Spec(1, 2, mgmt_network=False)
                                +    spec.user_data += cloud_user_data.format(password=vm_credential["password"])
                                +
                                +    # Create VM
                                +    spec.add_image(image_opensuse.name, "default/" + image_opensuse.name)
                                +
                                +    spec.add_network("nic-1", f"{vm_network['namespace']}/{vm_network['name']}")
                                +
                                +    code, data = api_client.vms.create(vm_name, spec)
                                +    assert 201 == code, (f"Failed to create vm with error: {code}, {data}")
                                +
                                +    # Check VM start in running state
                                +    check_vm_running(api_client, vm_name, wait_timeout)
                                +
                                +    # Check until VM ip address exists
                                +    check_vm_ip_exists(api_client, vm_name, wait_timeout)
                                +
                                +    # Get VM interface ipAddresses
                                +    code, data = api_client.vms.get_status(vm_name)
                                +    assert 200 == code, (f"Failed to get specific vm content: {code}, {data}")
                                +
                                +    interfaces_data = data['status']['interfaces']
                                +
                                +    assert 1 == len(interfaces_data), (
                                +        f"Failed: get more than one interface: {interfaces_data}"
                                +    )
                                +
                                +    assert "nic-1" == interfaces_data[0]['name'], (
                                +        f"Failed: Network name did not match to added vlan: {interfaces_data}"
                                +    )
                                +
                                +    vlan_ip = interfaces_data[0]['ipAddress']
                                +
                                +    # Check can ping vlan ip
                                +
                                +    command = ['/usr/bin/ping', '-c', '10', vlan_ip]
                                +
                                +    result = subprocess.check_output(command, shell=False, encoding="utf-8")
                                +
                                +    assert result.find(f"64 bytes from {vlan_ip}") > 0, (
                                +        f"Failed to ping VM external vlan IP {vlan_ip} "
                                +        f"on vlan interface from external host")
                                +
                                +    # Restart VM
                                +    code, data = api_client.vms.restart(vm_name)
                                +    assert 204 == code, (f"Failed to reboot vm with error: {code}, {data}")
                                +
                                +    # Check VM start in Starting state
                                +    endtime = datetime.now() + timedelta(seconds=wait_timeout)
                                +
                                +    while endtime > datetime.now():
                                +        code, data = api_client.vms.get(vm_name)
                                +        assert 200 == code, (f"Failed to get specific vm content: {code}, {data}")
                                +        vm_fields = data['metadata']['fields']
                                +
                                +        if vm_fields[2] == 'Starting':
                                +            break
                                +        sleep(5)
                                +    else:
                                +        raise AssertionError(
                                +            f"Failed to restart VM {vm_name} in Starting status, exceed given timeout\n"
                                +            f"Still got {code} with {data}"
                                +        )
                                +
                                +    # Check can't ping vlan ip during reboot
                                +
                                +    command = ['/usr/bin/ping', '-c', '10', vlan_ip]
                                +
                                +    process = subprocess.run(command, stdout=subprocess.PIPE,
                                +                             stderr=subprocess.PIPE, universal_newlines=True)
                                +
                                +    result = process.stdout
                                +
                                +    assert result.find(f"64 bytes from {vlan_ip}") < 0, (
                                +        f"Failed: since can ping VM external vlan IP {vlan_ip} "
                                +        f"on vlan interface from external host during reboot")
                                +
                                +    # Check VM start in running state
                                +    check_vm_running(api_client, vm_name, wait_timeout)
                                +
                                +    # Check until VM ip address exists
                                +    check_vm_ip_exists(api_client, vm_name, wait_timeout)
                                +
                                +    # Get VM interface ipAddresses
                                +    code, data = api_client.vms.get_status(vm_name)
                                +    assert 200 == code, (f"Failed to get specific vm content: {code}, {data}")
                                +
                                +    interfaces_data = data['status']['interfaces']
                                +
                                +    assert 1 == len(interfaces_data), (
                                +        f"Failed: get more than one interface: {interfaces_data}"
                                +    )
                                +
                                +    assert "nic-1" == interfaces_data[0]['name'], (
                                +        f"Failed: Network name did not match to added vlan: {interfaces_data}"
                                +    )
                                +
                                +    vlan_ip = interfaces_data[0]['ipAddress']
                                +
                                +    # Ping vlan ip address
                                +
                                +    command = ['/usr/bin/ping', '-c', '10', vlan_ip]
                                +
                                +    result = subprocess.check_output(command, shell=False, encoding="utf-8")
                                +
                                +    assert result.find(f"64 bytes from {vlan_ip}") > 0, (
                                +        f"Failed to ping VM external vlan IP {vlan_ip} "
                                +        f"on vlan interface from external host")
                                +
                                +    # teardown
                                +    code, data = api_client.vms.get(vm_name)
                                +    vm_spec = api_client.vms.Spec.from_dict(data)
                                +    vm_checker.wait_deleted(vm_name)
                                +    for vol in vm_spec.volumes:
                                +        vol_name = vol['volume']['persistentVolumeClaim']['claimName']
                                +        api_client.volumes.delete(vol_name)
                                +

                                Manual test plan reference: https://harvester.github.io/tests/manual/network/negative-vlan-after-reboot/

                                Steps: @@ -938,9 +1705,97 @@

                                Methods

                                9. Check can ping VM

                                -def test_vlan_network_connection(self, api_client, request, client, unique_name, image_opensuse, vm_network, wait_timeout, vm_checker) +def test_vlan_network_connection(self,
                                api_client,
                                request,
                                client,
                                unique_name,
                                image_opensuse,
                                vm_network,
                                wait_timeout,
                                vm_checker)
                                +
                                + +Expand source code + +
                                @pytest.mark.p0
                                +@pytest.mark.networks
                                +@pytest.mark.dependency(name="vlan_network_connection")
                                +def test_vlan_network_connection(self, api_client, request, client, unique_name,
                                +                                 image_opensuse, vm_network, wait_timeout, vm_checker):
                                +    """
                                +    Manual test plan reference:
                                +    https://harvester.github.io/tests/manual/network/validate-network-external-vlan/
                                +
                                +
                                +    Steps:
                                +    1. Create an external VLAN network
                                +    2. Create a new VM and set the external vlan network to it
                                +    3. Check can ping external VLAN IP from external host
                                +    4. Check can SSH to VM from external IP from external host
                                +    """
                                +    vm_name = unique_name + "-vlan"
                                +
                                +    # Check image exists
                                +    code, data = api_client.images.get(image_opensuse.name)
                                +
                                +    if code == 404:
                                +        create_image_url(api_client, image_opensuse.name, image_opensuse.url, wait_timeout)
                                +
                                +    spec = api_client.vms.Spec(1, 2, mgmt_network=False)
                                +    spec.user_data += cloud_user_data.format(password=vm_credential["password"])
                                +
                                +    # Create VM
                                +    spec.add_image(image_opensuse.name, "default/" + image_opensuse.name)
                                +
                                +    spec.add_network("nic-1", f"{vm_network['namespace']}/{vm_network['name']}")
                                +
                                +    code, data = api_client.vms.create(vm_name, spec)
                                +    assert 201 == code, (f"Failed to create vm with error: {code}, {data}")
                                +
                                +    # Check VM start in running state
                                +    check_vm_running(api_client, vm_name, wait_timeout)
                                +
                                +    # Check until VM ip address exists
                                +    check_vm_ip_exists(api_client, vm_name, wait_timeout)
                                +
                                +    # Get VM interface ipAddresses
                                +    code, data = api_client.vms.get_status(vm_name)
                                +    assert 200 == code, (f"Failed to get specific vm content: {code}, {data}")
                                +
                                +    interfaces_data = data['status']['interfaces']
                                +
                                +    assert 1 == len(interfaces_data), (
                                +        f"Failed: get more than one interface: {interfaces_data}"
                                +    )
                                +
                                +    assert "nic-1" == interfaces_data[0]['name'], (
                                +        f"Failed: Network name did not match to added vlan: {interfaces_data}"
                                +    )
                                +
                                +    vlan_ip = interfaces_data[0]['ipAddress']
                                +
                                +    # Ping vlan ip address from external host
                                +    command = ['/usr/bin/ping', '-c', '50', vlan_ip]
                                +
                                +    result = subprocess.check_output(command, shell=False, encoding="utf-8")
                                +
                                +    assert result.find(f"64 bytes from {vlan_ip}") > 0, (
                                +        f"Failed to ping VM external vlan IP {vlan_ip} "
                                +        f"on vlan interface from external host")
                                +
                                +    # SSH to vlan ip address and execute command from external host
                                +    _stdout, _stderr = self.ssh_client(
                                +        client, vlan_ip, vm_credential["user"], vm_credential["password"], 'ls', wait_timeout)
                                +
                                +    stdout = _stdout.read().decode('ascii').strip("\n")
                                +
                                +    assert stdout.find("bin") == 0, (
                                +        f"Failed to ssh to VM external vlan IP {vlan_ip}"
                                +        f"on vlan interface from external host")
                                +
                                +    # teardown
                                +    code, data = api_client.vms.get(vm_name)
                                +    vm_spec = api_client.vms.Spec.from_dict(data)
                                +    vm_checker.wait_deleted(vm_name)
                                +    for vol in vm_spec.volumes:
                                +        vol_name = vol['volume']['persistentVolumeClaim']['claimName']
                                +        api_client.volumes.delete(vol_name)
                                +

                                Manual test plan reference: https://harvester.github.io/tests/manual/network/validate-network-external-vlan/

                                Steps: @@ -950,9 +1805,144 @@

                                Methods

                                4. Check can SSH to VM from external IP from external host

                                -def test_vlan_to_mgmt_connection(self, api_client, request, client, unique_name, image_opensuse, vm_network, wait_timeout, host_shell, vm_shell_from_host, vm_checker) +def test_vlan_to_mgmt_connection(self,
                                api_client,
                                request,
                                client,
                                unique_name,
                                image_opensuse,
                                vm_network,
                                wait_timeout,
                                host_shell,
                                vm_shell_from_host,
                                vm_checker)
                                +
                                + +Expand source code + +
                                @pytest.mark.p0
                                +@pytest.mark.networks
                                +def test_vlan_to_mgmt_connection(
                                +    self, api_client, request, client, unique_name, image_opensuse, vm_network, wait_timeout,
                                +    host_shell, vm_shell_from_host, vm_checker
                                +):
                                +    """
                                +    Manual test plan reference:
                                +    https://harvester.github.io/tests/manual/network/edit-network-form-change-management-to-vlan/
                                +
                                +
                                +    Steps:
                                +    1. Create an external VLAN network
                                +    2. Create a new VM
                                +    3. Make sure that the network is set to the vlan network with bridge as the type
                                +    4. Wait until the VM boot in running state
                                +    5. Edit VM and change from external VLAN to management network
                                +    6. Check VM should save and reboot
                                +    7. Check can ping VM with management network from Harvester node
                                +    8. Check can SSH to VM with management network from Harvester node
                                +    9. Check can't SSH to VM with management network from external host
                                +    """
                                +
                                +    vip = request.config.getoption('--endpoint').strip('https://')
                                +    vm_user, vm_passwd = vm_credential['user'], vm_credential['password']
                                +
                                +    # Check image exists
                                +    code, data = api_client.images.get(image_opensuse.name)
                                +
                                +    if code == 404:
                                +        create_image_url(api_client, image_opensuse.name, image_opensuse.url, wait_timeout)
                                +
                                +    spec = api_client.vms.Spec(1, 2, mgmt_network=False)
                                +    spec.user_data += cloud_user_data.format(password=vm_passwd)
                                +    vm_name = unique_name + "-vlan-mgmt"
                                +
                                +    # Create VM
                                +    spec.add_image(image_opensuse.name, "default/" + image_opensuse.name)
                                +    spec.add_network("default", f"{vm_network['namespace']}/{vm_network['name']}")
                                +
                                +    code, data = api_client.vms.create(vm_name, spec)
                                +    assert 201 == code, (f"Failed to create vm with error: {code}, {data}")
                                +
                                +    # Check VM start in running state
                                +    check_vm_running(api_client, vm_name, wait_timeout)
                                +
                                +    # Check until VM ip address exists
                                +    check_vm_ip_exists(api_client, vm_name, wait_timeout)
                                +
                                +    # get data from running VM and transfer to spec
                                +    code, data = api_client.vms.get(vm_name)
                                +    assert 200 == code, (f"Failed to get specific vm content: {code}, {data}")
                                +    spec = spec.from_dict(data)
                                +
                                +    spec.networks = []
                                +    spec.mgmt_network = True
                                +
                                +    code, data = api_client.vms.update(vm_name, spec)
                                +    assert 200 == code, (f"Failed to update specific vm with spec: {code}, {data}")
                                +
                                +    code, data = api_client.vms.restart(vm_name)
                                +    assert 204 == code, (f"Failed to restart specific vm: {code}, {data}")
                                +
                                +    # Check VM start in running state
                                +    check_vm_running(api_client, vm_name, wait_timeout)
                                +
                                +    # Check until VM ip address exists
                                +    check_vm_ip_exists(api_client, vm_name, wait_timeout)
                                +
                                +    endtime = datetime.now() + timedelta(seconds=wait_timeout)
                                +
                                +    while endtime > datetime.now():
                                +        code, data = api_client.vms.get_status(vm_name)
                                +        assert 200 == code, (f"Failed to get specific vm status: {code}, {data}")
                                +
                                +        if 'interfaces' in data['status']:
                                +            interfaces_data = data['status']['interfaces']
                                +            ip_addresses = []
                                +            if 'ipAddress' in data['status']['interfaces'][0]:
                                +
                                +                if 'default' in interfaces_data[0]['name']:
                                +                    if 'domain, guest-agent' not in interfaces_data[0]['infoSource']:
                                +                        ip_addresses.append(interfaces_data[0]['ipAddress'])
                                +                        break
                                +            sleep(5)
                                +    else:
                                +        raise AssertionError(
                                +            f"Failed to get VM {vm_name} IP address, exceed the given timed out\n"
                                +            f"Still got {code} with {data}"
                                +        )
                                +
                                +    # Check can ping management ip address from Harvester node
                                +    mgmt_ip = ip_addresses[0]
                                +
                                +    with host_shell.login(vip) as sh:
                                +        stdout, stderr = sh.exec_command(f"ping -c 50 {mgmt_ip}")
                                +
                                +    assert stdout.find(f"64 bytes from {mgmt_ip}") > 0, (
                                +        f"Failed to ping VM management IP {mgmt_ip} "
                                +        f"on management interface from Harvester node")
                                +
                                +    # Check can ssh to host and execute command from Harvester node
                                +    with vm_shell_from_host(vip, mgmt_ip, vm_user, vm_passwd) as sh:
                                +        stdout, stderr = sh.exec_command("ls")
                                +
                                +    assert stdout.find("bin") == 0, (
                                +        f"Failed to ssh to VM management IP {mgmt_ip} "
                                +        f"on management interface from Harvester node")
                                +
                                +    # Check should not SSH to management ip address from external host
                                +    command = ['/usr/bin/ssh', '-o', 'ConnectTimeout=5', mgmt_ip]
                                +
                                +    with pytest.raises(subprocess.CalledProcessError) as ex:
                                +        subprocess.check_output(command, stderr=subprocess.STDOUT,
                                +                                shell=False, encoding="utf-8")
                                +
                                +    # OpenSSH returns the return code of the program that was executed on
                                +    # the remote, unless there was an error for SSH itself, in which case
                                +    # it returns 255
                                +    assert ex.value.returncode == 255, ("Failed: should not be able to SSH"
                                +                                        " to VM on management interface"
                                +                                        f" {mgmt_ip} from external host")
                                +
                                +    # teardown
                                +    code, data = api_client.vms.get(vm_name)
                                +    vm_spec = api_client.vms.Spec.from_dict(data)
                                +    vm_checker.wait_deleted(vm_name)
                                +    for vol in vm_spec.volumes:
                                +        vol_name = vol['volume']['persistentVolumeClaim']['claimName']
                                +        api_client.volumes.delete(vol_name)
                                +

                                Manual test plan reference: https://harvester.github.io/tests/manual/network/edit-network-form-change-management-to-vlan/

                                Steps: @@ -1013,7 +2003,7 @@

                                -

                                Generated by pdoc 0.11.1.

                                +

                                Generated by pdoc 0.11.5.

                                diff --git a/backend/integrations/test_9_rancher_integration.html b/backend/integrations/test_9_rancher_integration.html index 6351b9adf..5d85ab733 100644 --- a/backend/integrations/test_9_rancher_integration.html +++ b/backend/integrations/test_9_rancher_integration.html @@ -3,19 +3,30 @@ - + harvester_e2e_tests.integrations.test_9_rancher_integration API documentation - + @@ -37,78 +48,423 @@

                                Functions

                                def csi_deployment(unique_name)
                                +
                                + +Expand source code + +
                                @pytest.fixture(scope='class')
                                +def csi_deployment(unique_name):
                                +    yield {
                                +        "namespace": "default",
                                +        "name": f"csi-{unique_name}",
                                +        "image": "nginx:latest",
                                +        "pvc": f"pvc-{unique_name}"
                                +    }
                                +
                                def harvester_cloud_credential(api_client, rancher_api_client, harvester_mgmt_cluster, unique_name)
                                +
                                + +Expand source code + +
                                @pytest.fixture(scope='module')
                                +def harvester_cloud_credential(api_client, rancher_api_client,
                                +                               harvester_mgmt_cluster, unique_name):
                                +    code, data = rancher_api_client.clusters.generate_kubeconfig(
                                +        harvester_mgmt_cluster['id']
                                +    )
                                +    assert 200 == code, (
                                +        f"Failed to create kubconfig with error: {code}, {data}"
                                +    )
                                +    harvester_kubeconfig = data['config']
                                +
                                +    code, data = rancher_api_client.cloud_credentials.create(
                                +        unique_name,
                                +        harvester_kubeconfig,
                                +        harvester_mgmt_cluster['id']
                                +    )
                                +    assert 201 == code, (
                                +        f"Failed to create cloud credential with error: {code}, {data}"
                                +    )
                                +
                                +    code, data = rancher_api_client.cloud_credentials.get(data['id'])
                                +    assert 200 == code, (
                                +        f"Failed to get cloud credential {data['id']} with error: {code}, {data}"
                                +    )
                                +
                                +    yield data
                                +
                                +    rancher_api_client.cloud_credentials.delete(data['id'])
                                +
                                def harvester_mgmt_cluster(api_client, rancher_api_client, unique_name, polling_for)
                                +
                                + +Expand source code + +
                                @pytest.fixture(scope='module')
                                +def harvester_mgmt_cluster(api_client, rancher_api_client, unique_name, polling_for):
                                +    """ Rancher creates Harvester entry (Import Existing)
                                +    """
                                +    cluster_name = f"hvst-{unique_name}"
                                +
                                +    code, data = rancher_api_client.mgmt_clusters.create_harvester(cluster_name)
                                +    assert 201 == code, (
                                +         f"Failed to create Harvester entry {cluster_name} with error: {code}, {data}"
                                +    )
                                +
                                +    code, data = polling_for(
                                +        f"finding clusterName in MgmtCluster {cluster_name}",
                                +        lambda code, data: data.get('status', {}).get('clusterName'),
                                +        rancher_api_client.mgmt_clusters.get, cluster_name
                                +    )
                                +
                                +    yield {
                                +        "name": cluster_name,
                                +        "id": data['status']['clusterName']     # e.g. c-m-n6bsktxb
                                +    }
                                +
                                +    rancher_api_client.mgmt_clusters.delete(cluster_name)
                                +    updates = dict(value="")
                                +    api_client.settings.update("cluster-registration-url", updates)
                                +

                                Rancher creates Harvester entry (Import Existing)

                                def ip_pool(request, api_client, unique_name, vlan_network)
                                +
                                + +Expand source code + +
                                @pytest.fixture(scope="module")
                                +def ip_pool(request, api_client, unique_name, vlan_network):
                                +    name = f"ippool-{unique_name}"
                                +    ip_pool_subnet = request.config.getoption('--ip-pool-subnet')
                                +    ip_pool_start = request.config.getoption('--ip-pool-start')
                                +    ip_pool_end = request.config.getoption('--ip-pool-end')
                                +
                                +    code, data = api_client.ippools.create(
                                +        name, ip_pool_subnet, ip_pool_start, ip_pool_end, vlan_network["id"]
                                +    )
                                +    assert 201 == code, (
                                +        f"Failed to create ip pool {name} with error: {code}, {data}"
                                +    )
                                +
                                +    yield {
                                +        "name": name,
                                +        "subnet": ip_pool_subnet
                                +    }
                                +
                                +    api_client.ippools.delete(name)
                                +
                                def lb_service(request, api_client, unique_name, nginx_deployment, ip_pool)
                                +
                                + +Expand source code + +
                                @pytest.fixture(scope='function', params=["dhcp", "pool"])
                                +def lb_service(request, api_client, unique_name, nginx_deployment, ip_pool):
                                +    namespace = "default"
                                +    name = f"lb-{unique_name}-{request.param}"
                                +    data = {
                                +        "type": "service",
                                +        "metadata": {
                                +            "namespace": namespace,
                                +            "name": name,
                                +            "annotations": {
                                +                "cloudprovider.harvesterhci.io/ipam": request.param
                                +            }
                                +        },
                                +        "spec": {
                                +            "type": "LoadBalancer",
                                +            "sessionAffinity": None,
                                +            "ports": [
                                +                {
                                +                    "name": "http",
                                +                    "port": 8080,
                                +                    "protocol": "TCP",
                                +                    "targetPort": 80
                                +                }
                                +            ],
                                +            "selector": {
                                +                "name": nginx_deployment["name"]
                                +            }
                                +        }
                                +    }
                                +
                                +    yield {
                                +        "namespace": namespace,
                                +        "name": name,
                                +        "data": data
                                +    }
                                +
                                +    code, data = api_client.loadbalancers.get()
                                +    assert 200 == code, (code, data)
                                +    lbs = data["data"]
                                +    for lb in lbs:
                                +        if name in lb["id"]:
                                +            api_client.loadbalancers.delete(lb["id"])
                                +            break
                                +
                                def machine_count(request)
                                +
                                + +Expand source code + +
                                @pytest.fixture(scope="module",
                                +                params=[1,
                                +                        pytest.param(3, marks=pytest.mark.skip(reason="Skip for low I/O env."))])
                                +def machine_count(request):
                                +    return request.param
                                +
                                def nginx_deployment(unique_name)
                                +
                                + +Expand source code + +
                                @pytest.fixture(scope='class')
                                +def nginx_deployment(unique_name):
                                +    return {
                                +        "namespace": "default",
                                +        "name": f"nginx-{unique_name}",
                                +        "image": "nginx:latest"
                                +    }
                                +
                                def rke1_cluster(unique_name, rancher_api_client, machine_count, rke1_version)
                                +
                                + +Expand source code + +
                                @pytest.fixture(scope='class')
                                +def rke1_cluster(unique_name, rancher_api_client, machine_count, rke1_version):
                                +    name = f"rke1-{unique_name}-{machine_count}"
                                +    yield {
                                +        "name": name,
                                +        "id": "",    # set in Test_RKE1::test_create_rke1, e.g. c-m-n6bsktxb
                                +        "machine_count": machine_count,
                                +        "k8s_version": rke1_version
                                +    }
                                +
                                +    rancher_api_client.mgmt_clusters.delete(name)
                                +
                                def rke2_cluster(unique_name, rancher_api_client, machine_count, rke2_version)
                                +
                                + +Expand source code + +
                                @pytest.fixture(scope='class')
                                +def rke2_cluster(unique_name, rancher_api_client, machine_count, rke2_version):
                                +    name = f"rke2-{unique_name}-{machine_count}"
                                +    yield {
                                +        "name": name,
                                +        "id": "",    # set in Test_RKE2::test_create_rke2, e.g. c-m-n6bsktxb
                                +        "machine_count": machine_count,
                                +        "k8s_version": rke2_version
                                +    }
                                +
                                +    rancher_api_client.mgmt_clusters.delete(name)
                                +
                                def test_add_project_owner_user(api_client, rancher_api_client, unique_name, wait_timeout, harvester_mgmt_cluster)
                                +
                                + +Expand source code + +
                                @pytest.mark.p1
                                +@pytest.mark.rancher
                                +@pytest.mark.dependency(depends=["import_harvester"])
                                +def test_add_project_owner_user(api_client, rancher_api_client, unique_name, wait_timeout,
                                +                                harvester_mgmt_cluster):
                                +    cluster_id = harvester_mgmt_cluster['id']
                                +    username, password = f"user-{unique_name}", unique_name
                                +
                                +    spec = rancher_api_client.users.Spec(password)
                                +    # create user
                                +    code, data = rancher_api_client.users.create(username, spec)
                                +    assert 201 == code, (
                                +        f"Failed to create user {username!r}\n"
                                +        f"API Status({code}): {data}"
                                +    )
                                +    uid, upids = data['id'], data['principalIds']
                                +
                                +    # add role `user` to user
                                +    code, data = rancher_api_client.users.add_role(uid, 'user')
                                +    assert 201 == code, (
                                +        f"Failed to add role 'user' for user {username!r}\n"
                                +        f"API Status({code}): {data}"
                                +    )
                                +
                                +    # Get `Default` project's uid
                                +    cluster_api = rancher_api_client.clusters.explore(cluster_id)
                                +    code, data = cluster_api.projects.get_by_name('Default')
                                +    assert 200 == code, (code, data)
                                +    project_id = data['id']
                                +    # add user to `Default` project as *project-owner*
                                +    code, data = cluster_api.project_members.create(project_id, upids[0], "project-owner")
                                +    assert 201 == code, (code, data)
                                +    proj_muid = data['id']
                                +
                                +    # Login as the user
                                +    endpoint = rancher_api_client.endpoint
                                +    user_rapi = rancher_api_client.login(endpoint, username, password, ssl_verify=False)
                                +    user_capi = user_rapi.clusters.explore(cluster_id)
                                +    # Check user can only view the project he joined
                                +    code, data = user_capi.projects.get()
                                +    assert 200 == code, (code, data)
                                +    assert 1 == len(data['data']), (code, data)
                                +
                                +    # teardown
                                +    cluster_api.project_members.delete(proj_muid)
                                +    rancher_api_client.users.delete(uid)
                                +
                                def test_import_harvester(api_client, rancher_api_client, harvester_mgmt_cluster, polling_for)
                                +
                                + +Expand source code + +
                                @pytest.mark.p0
                                +@pytest.mark.rancher
                                +@pytest.mark.dependency(name="import_harvester")
                                +def test_import_harvester(api_client, rancher_api_client, harvester_mgmt_cluster, polling_for):
                                +    # Get cluster registration URL in Rancher's Virtualization Management
                                +    code, data = polling_for(
                                +        f"registration URL for the imported harvester {harvester_mgmt_cluster['name']}",
                                +        lambda code, data: 200 == code and data.get('manifestUrl'),
                                +        rancher_api_client.cluster_registration_tokens.get, harvester_mgmt_cluster['id']
                                +    )
                                +
                                +    # Set cluster-registration-url on Harvester
                                +    updates = dict(value=data['manifestUrl'])
                                +    code, data = api_client.settings.update("cluster-registration-url", updates)
                                +    assert 200 == code, (
                                +        f"Failed to update Harvester's settings `cluster-registration-url`"
                                +        f" with error: {code}, {data}"
                                +    )
                                +
                                +    # Check Cluster becomes `active` in Rancher's Virtualization Management
                                +    polling_for(
                                +        "harvester to be ready",
                                +        lambda code, data:
                                +            "active" == data['metadata']['state']['name'] and
                                +            "Ready" in data['metadata']['state']['message'],
                                +        rancher_api_client.mgmt_clusters.get, harvester_mgmt_cluster['name']
                                +    )
                                +
                                def ubuntu_image(api_client, unique_name, image_ubuntu, polling_for)
                                +
                                + +Expand source code + +
                                @pytest.fixture(scope="module")
                                +def ubuntu_image(api_client, unique_name, image_ubuntu, polling_for):
                                +    name = f"ubuntu-{unique_name}"
                                +
                                +    code, data = api_client.images.create_by_url(name, image_ubuntu.url)
                                +    assert 201 == code, (
                                +        f"Failed to upload ubuntu image with error: {code}, {data}"
                                +    )
                                +
                                +    code, data = polling_for(
                                +        f"image {name} to be ready",
                                +        lambda code, data: data.get('status', {}).get('progress', None) == 100,
                                +        api_client.images.get, name
                                +    )
                                +    namespace = data['metadata']['namespace']
                                +    name = data['metadata']['name']
                                +
                                +    yield {
                                +        "ssh_user": "ubuntu",
                                +        "id": f"{namespace}/{name}"
                                +    }
                                +
                                +    api_client.images.delete(name)
                                +
                                def vlan_network(request, api_client)
                                +
                                + +Expand source code + +
                                @pytest.fixture(scope='module')
                                +def vlan_network(request, api_client):
                                +    vlan_nic = request.config.getoption('--vlan-nic')
                                +    vlan_id = request.config.getoption('--vlan-id')
                                +    assert -1 != vlan_id, "Rancher integration test needs VLAN"
                                +
                                +    api_client.clusternetworks.create(vlan_nic)
                                +    api_client.clusternetworks.create_config(vlan_nic, vlan_nic, vlan_nic)
                                +
                                +    network_name = f'vlan-network-{vlan_id}'
                                +    code, data = api_client.networks.get(network_name)
                                +    if code != 200:
                                +        code, data = api_client.networks.create(network_name, vlan_id, cluster_network=vlan_nic)
                                +        assert 201 == code, (
                                +            f"Failed to create network-attachment-definition {network_name} \
                                +                with error {code}, {data}"
                                +        )
                                +    namespace = data['metadata']['namespace']
                                +    name = data['metadata']['name']
                                +
                                +    yield {
                                +        "name": name,
                                +        "id": f"{namespace}/{name}"
                                +    }
                                +
                                +    api_client.networks.delete(network_name)
                                +

                                @@ -120,7 +476,6 @@

                                Classes

                                class TestRKE1
                                -
                                Expand source code @@ -473,6 +828,7 @@

                                Classes

                                timeout=rancher_wait_timeout )
                                +

                                Class variables

                                var pytestmark
                                @@ -486,54 +842,431 @@

                                Methods

                                def test_cloud_provider_chart(self, rancher_api_client, rke1_cluster, polling_for)
                                +
                                + +Expand source code + +
                                @pytest.mark.dependency(depends=["create_rke1"], name="cloud_provider_chart")
                                +def test_cloud_provider_chart(self, rancher_api_client, rke1_cluster, polling_for):
                                +    chart, deployment = "harvester-cloud-provider", "harvester-cloud-provider"
                                +    polling_for(
                                +        f"chart {chart} to be create",
                                +        lambda code, data:
                                +            201 == code,
                                +        rancher_api_client.charts.create,
                                +            rke1_cluster['id'], "kube-system", chart,
                                +        timeout=60
                                +    )
                                +    # Polling on creation for possible 500 error in Rancher Apps
                                +    # * https://github.com/rancher/rancher/issues/37610
                                +    # * https://github.com/rancher/rancher/issues/43036
                                +
                                +    polling_for(
                                +        f"chart {chart} to be ready",
                                +        lambda code, data:
                                +            200 == code and
                                +            "deployed" == data.get("metadata", {}).get("state", {}).get("name"),
                                +        rancher_api_client.charts.get,
                                +            rke1_cluster['id'], "kube-system", chart
                                +    )
                                +    polling_for(
                                +        f"deployment {deployment} to be ready",
                                +        lambda code, data:
                                +            200 == code and
                                +            "active" == data.get("metadata", {}).get("state", {}).get("name"),
                                +        rancher_api_client.cluster_deployments.get,
                                +            rke1_cluster['id'], "kube-system", deployment
                                +    )
                                +
                                def test_create_pvc(self, rancher_api_client, harvester_mgmt_cluster, unique_name, polling_for)
                                +
                                + +Expand source code + +
                                @pytest.mark.dependency(depends=["create_rke1"])
                                +def test_create_pvc(self, rancher_api_client, harvester_mgmt_cluster,
                                +                    unique_name, polling_for):
                                +    cluster_id = harvester_mgmt_cluster['id']
                                +    capi = rancher_api_client.clusters.explore(cluster_id)
                                +
                                +    # Create PVC
                                +    size = "1Gi"
                                +    spec = capi.pvcs.Spec(size)
                                +    code, data = capi.pvcs.create(unique_name, spec)
                                +    assert 201 == code, (code, data)
                                +
                                +    # Verify PVC is created
                                +    code, data = polling_for(
                                +        f"PVC {unique_name} to be in Bound phase",
                                +        lambda code, data: "Bound" == data['status'].get('phase'),
                                +        capi.pvcs.get, unique_name
                                +    )
                                +
                                +    # Verify the PV for created PVC
                                +    pv_code, pv_data = capi.pvs.get(data['spec']['volumeName'])
                                +    assert 200 == pv_code, (
                                +        f"Relevant PV is NOT available for created PVC's PV({data['spec']['volumeName']})\n"
                                +        f"Response data of PV: {data}"
                                +    )
                                +
                                +    # Verify size of the PV is aligned to requested size of PVC
                                +    assert size == pv_data['spec']['capacity']['storage'], (
                                +        "Size of the PV is NOT aligned to requested size of PVC,"
                                +        f" expected: {size}, PV's size: {pv_data['spec']['capacity']['storage']}\n"
                                +        f"Response data of PV: {data}"
                                +    )
                                +
                                +    # Verify PVC's size
                                +    created_spec = capi.pvcs.Spec.from_dict(data)
                                +    assert size == spec.size, (
                                +        f"Size is NOT correct in created PVC, expected: {size}, created: {spec.size}\n"
                                +        f"Response data: {data}"
                                +    )
                                +
                                +    # Verify the storage class exists
                                +    sc_code, sc_data = capi.scs.get(created_spec.storage_cls)
                                +    assert 200 == sc_code, (
                                +        f"Storage Class is NOT exists for created PVC\n"
                                +        f"Created PVC Spec: {data}\n"
                                +        f"SC Status({sc_code}): {sc_data}"
                                +    )
                                +
                                +    # verify the storage class is marked `default`
                                +    assert 'true' == sc_data['metadata']['annotations'][capi.scs.DEFAULT_KEY], (
                                +        f"Storage Class is NOT the DEFAULT for created PVC\n"
                                +        f"Requested Storage Class: {spec.storage_cls!r}"
                                +        f"Created PVC Spec: {data}\n"
                                +        f"SC Status({sc_code}): {sc_data}"
                                +    )
                                +
                                +    # teardown
                                +    capi.pvcs.delete(unique_name)
                                +
                                -def test_create_rke1(self, rancher_api_client, unique_name, harvester_mgmt_cluster, rancher_wait_timeout, rke1_cluster, harvester_cloud_credential, ubuntu_image, vlan_network, polling_for) +def test_create_rke1(self,
                                rancher_api_client,
                                unique_name,
                                harvester_mgmt_cluster,
                                rancher_wait_timeout,
                                rke1_cluster,
                                harvester_cloud_credential,
                                ubuntu_image,
                                vlan_network,
                                polling_for)
                                +
                                + +Expand source code + +
                                @pytest.mark.dependency(depends=["import_harvester"], name="create_rke1")
                                +def test_create_rke1(self, rancher_api_client, unique_name, harvester_mgmt_cluster,
                                +                     rancher_wait_timeout,
                                +                     rke1_cluster, harvester_cloud_credential,
                                +                     ubuntu_image, vlan_network, polling_for):
                                +    code, data = rancher_api_client.kube_configs.create(
                                +        rke1_cluster['name'],
                                +        harvester_mgmt_cluster['id']
                                +    )
                                +    assert 200 == code, f"Failed to create harvester kubeconfig with error: {code}, {data}"
                                +    assert data.strip(), f"Harvester kubeconfig should not be empty: {code}, {data}"
                                +    kubeconfig = data
                                +
                                +    code, data = rancher_api_client.node_templates.create(
                                +        name=unique_name,
                                +        cpus=2,
                                +        mems=4,
                                +        disks=40,
                                +        image_id=ubuntu_image['id'],
                                +        network_id=vlan_network['name'],
                                +        ssh_user=ubuntu_image['ssh_user'],
                                +        cloud_credential_id=harvester_cloud_credential['id'],
                                +        user_data=(
                                +            "#cloud-config\n"
                                +            "password: test\n"
                                +            "chpasswd:\n"
                                +            "    expire: false\n"
                                +            "ssh_pwauth: true\n"
                                +        ),
                                +    )
                                +    assert 201 == code, (
                                +        f"Failed to create NodeTemplate {unique_name} with error: {code}, {data}"
                                +    )
                                +
                                +    node_template_id = data['id']
                                +
                                +    code, data = rancher_api_client.clusters.create(
                                +        rke1_cluster['name'], rke1_cluster['k8s_version'], kubeconfig
                                +    )
                                +    assert 201 == code, (
                                +        f"Failed to create cluster {rke1_cluster['name']} with error: {code}, {data}"
                                +    )
                                +
                                +    # update fixture value
                                +    rke1_cluster['id'] = data['id']
                                +
                                +    # check cluster created and ready for use
                                +    polling_for(
                                +        f"cluster {rke1_cluster['name']} to be ready",
                                +        lambda code, data:
                                +            200 == code and
                                +            "RKESecretsMigrated" in [c['type'] for c in data['conditions']],
                                +        rancher_api_client.clusters.get, rke1_cluster['id'],
                                +        timeout=rancher_wait_timeout
                                +    )
                                +
                                +    code, data = rancher_api_client.node_pools.create(
                                +        cluster_id=rke1_cluster['id'],
                                +        node_template_id=node_template_id,
                                +        hostname_prefix=f"{rke1_cluster['name']}-",
                                +        quantity=rke1_cluster['machine_count']
                                +    )
                                +    assert 201 == code, (
                                +        f"Failed to create NodePools for cluster {rke1_cluster['name']}\n"
                                +        f"API Status({code}): {data}"
                                +    )
                                +
                                +    polling_for(
                                +        f"MgmtCluster {rke1_cluster['name']} to be ready",
                                +        lambda code, data: code == 200 and data.get('status', {}).get('ready', False),
                                +        rancher_api_client.mgmt_clusters.get, rke1_cluster['id'],
                                +        timeout=rancher_wait_timeout
                                +    )
                                +
                                def test_csi_deployment(self, rancher_api_client, rke1_cluster, csi_deployment, polling_for)
                                +
                                + +Expand source code + +
                                @pytest.mark.dependency(depends=["csi_driver_chart"], name="csi_deployment")
                                +def test_csi_deployment(self, rancher_api_client, rke1_cluster, csi_deployment, polling_for):
                                +    # create pvc
                                +    code, data = rancher_api_client.pvcs.create(rke1_cluster['id'], csi_deployment['pvc'])
                                +    assert 201 == code, (
                                +        f"Fail to create {csi_deployment['pvc']} on {rke1_cluster['name']}\n"
                                +        f"API Response: {code}, {data}"
                                +    )
                                +
                                +    polling_for(
                                +        f"PVC {csi_deployment['pvc']} to be ready",
                                +        lambda code, data:
                                +            200 == code and
                                +            "bound" == data.get("metadata", {}).get("state", {}).get("name"),
                                +        rancher_api_client.pvcs.get, rke1_cluster['id'], csi_deployment['pvc']
                                +    )
                                +
                                +    # deployment with csi
                                +    code, data = rancher_api_client.cluster_deployments.create(
                                +        rke1_cluster['id'], csi_deployment['namespace'],
                                +        csi_deployment['name'], csi_deployment['image'], csi_deployment['pvc']
                                +    )
                                +    assert 201 == code, (
                                +        f"Fail to deploy {csi_deployment['name']} on {rke1_cluster['name']}\n"
                                +        f"API Response: {code}, {data}"
                                +    )
                                +
                                +    polling_for(
                                +        f"deployment {csi_deployment['name']} to be ready",
                                +        lambda code, data:
                                +            200 == code and
                                +            "active" == data.get("metadata", {}).get("state", {}).get("name"),
                                +        rancher_api_client.cluster_deployments.get,
                                +            rke1_cluster['id'], csi_deployment['namespace'], csi_deployment['name']
                                +    )
                                +
                                def test_csi_driver_chart(self, rancher_api_client, rke1_cluster, polling_for)
                                +
                                + +Expand source code + +
                                @pytest.mark.dependency(depends=["create_rke1"], name="csi_driver_chart")
                                +def test_csi_driver_chart(self, rancher_api_client, rke1_cluster, polling_for):
                                +    chart, deployment = "harvester-csi-driver", "harvester-csi-driver-controllers"
                                +    polling_for(
                                +        f"chart {chart} to be create",
                                +        lambda code, data:
                                +            201 == code,
                                +        rancher_api_client.charts.create,
                                +            rke1_cluster['id'], "kube-system", chart,
                                +        timeout=60
                                +    )
                                +    # Polling on creation for possible 500 error in Rancher Apps
                                +    # * https://github.com/rancher/rancher/issues/37610
                                +    # * https://github.com/rancher/rancher/issues/43036
                                +
                                +    polling_for(
                                +        f"chart {chart} to be ready",
                                +        lambda code, data:
                                +            200 == code and
                                +            "deployed" == data.get("metadata", {}).get("state", {}).get("name"),
                                +        rancher_api_client.charts.get,
                                +            rke1_cluster['id'], "kube-system", chart
                                +    )
                                +    polling_for(
                                +        f"deployment {deployment} to be ready",
                                +        lambda code, data:
                                +            200 == code and
                                +            "active" == data.get("metadata", {}).get("state", {}).get("name"),
                                +        rancher_api_client.cluster_deployments.get,
                                +            rke1_cluster['id'], "kube-system", deployment
                                +    )
                                +
                                def test_delete_deployment(self, rancher_api_client, rke1_cluster, csi_deployment, polling_for)
                                +
                                + +Expand source code + +
                                @pytest.mark.dependency(depends=["csi_deployment"])
                                +def test_delete_deployment(self, rancher_api_client, rke1_cluster, csi_deployment,
                                +                           polling_for):
                                +    code, data = rancher_api_client.cluster_deployments.delete(
                                +        rke1_cluster['id'], csi_deployment['namespace'], csi_deployment['name']
                                +    )
                                +    assert 204 == code, (
                                +        f"Failed to delete deployment {csi_deployment['name']} with error: {code}, {data}"
                                +    )
                                +
                                +    polling_for(
                                +        f"deployment {csi_deployment['name']} to be deleted",
                                +        lambda code, data:
                                +            code == 404,
                                +        rancher_api_client.cluster_deployments.get,
                                +            rke1_cluster['id'], csi_deployment['namespace'], csi_deployment['name']
                                +    )
                                +
                                +    # teardown
                                +    rancher_api_client.pvcs.delete(rke1_cluster['id'], csi_deployment['pvc'])
                                +
                                def test_delete_rke1(self, api_client, rancher_api_client, rke1_cluster, rancher_wait_timeout, polling_for)
                                +
                                + +Expand source code + +
                                @pytest.mark.dependency(depends=["create_rke1"])
                                +def test_delete_rke1(self, api_client, rancher_api_client, rke1_cluster,
                                +                     rancher_wait_timeout, polling_for):
                                +    code, data = rancher_api_client.mgmt_clusters.delete(rke1_cluster['id'])
                                +    assert 200 == code, (
                                +        f"Failed to delete RKE2 MgmtCluster {rke1_cluster['name']} with error: {code}, {data}"
                                +    )
                                +
                                +    def _remaining_vm_cnt() -> int:
                                +        # in RKE1, when the cluster is deleted, VMs may still in Terminating status
                                +        code, data = api_client.vms.get()
                                +        remaining_vm_cnt = 0
                                +        for d in data.get('data', []):
                                +            vm_name = d.get('metadata', {}).get('name', "")
                                +            if vm_name.startswith(f"{rke1_cluster['name']}-"):
                                +                remaining_vm_cnt += 1
                                +        return remaining_vm_cnt
                                +
                                +    polling_for(
                                +        f"cluster {rke1_cluster['name']} to be deleted",
                                +        lambda code, data: code == 404 and _remaining_vm_cnt() == 0,
                                +        rancher_api_client.clusters.get, rke1_cluster['id'],
                                +        timeout=rancher_wait_timeout
                                +    )
                                +
                                def test_deploy_nginx(self, rancher_api_client, rke1_cluster, nginx_deployment, polling_for)
                                +
                                + +Expand source code + +
                                @pytest.mark.dependency(depends=["cloud_provider_chart"], name="deploy_nginx")
                                +def test_deploy_nginx(self, rancher_api_client, rke1_cluster, nginx_deployment, polling_for):
                                +    code, data = rancher_api_client.cluster_deployments.create(
                                +        rke1_cluster['id'], nginx_deployment['namespace'],
                                +        nginx_deployment['name'], nginx_deployment['image']
                                +    )
                                +    assert 201 == code, (
                                +        f"Fail to deploy {nginx_deployment['name']} on {rke1_cluster['name']}\n"
                                +        f"API Response: {code}, {data}"
                                +    )
                                +
                                +    polling_for(
                                +        f"deployment {nginx_deployment['name']} to be ready",
                                +        lambda code, data:
                                +            200 == code and
                                +            "active" == data.get("metadata", {}).get("state", {}).get("name"),
                                +        rancher_api_client.cluster_deployments.get,
                                +            rke1_cluster['id'], nginx_deployment['namespace'], nginx_deployment['name']
                                +    )
                                +
                                def test_load_balancer_service(self, rancher_api_client, rke1_cluster, nginx_deployment, lb_service, polling_for)
                                +
                                + +Expand source code + +
                                @pytest.mark.dependency(depends=["deploy_nginx"])
                                +def test_load_balancer_service(self, rancher_api_client, rke1_cluster, nginx_deployment,
                                +                               lb_service, polling_for):
                                +    # create LB service
                                +    code, data = rancher_api_client.cluster_services.create(
                                +        rke1_cluster['id'], lb_service["data"]
                                +    )
                                +    assert 201 == code, (
                                +        f"Fail to create {lb_service['name']} for {nginx_deployment['name']}\n"
                                +        f"API Response: {code}, {data}"
                                +    )
                                +
                                +    # check service active
                                +    code, data = polling_for(
                                +        f"service {lb_service['name']} to be ready",
                                +        lambda code, data:
                                +            200 == code and
                                +            "active" == data.get("metadata", {}).get("state", {}).get("name"),
                                +        rancher_api_client.cluster_services.get, rke1_cluster['id'], lb_service['name']
                                +    )
                                +
                                +    # check Nginx can be queired via LB
                                +    try:
                                +        ingress_ip = data["status"]["loadBalancer"]["ingress"][0]['ip']
                                +        ingress_port = data['spec']['ports'][0]['port']
                                +        ingress_url = f"http://{ingress_ip}:{ingress_port}"
                                +    except Exception as e:
                                +        raise AssertionError(
                                +            f"Fail to get ingress info from {lb_service['name']}\n"
                                +            f"Got error: {e}\n"
                                +            f"Service data: {data}"
                                +        )
                                +    resp = rancher_api_client.session.get(ingress_url)
                                +    assert resp.ok and "Welcome to nginx" in resp.text, (
                                +        f"Fail to query load balancer {lb_service['name']}\n"
                                +        f"Got error: {resp.status_code}, {resp.text}\n"
                                +        f"Service data: {data}"
                                +    )
                                +
                                +    # teardown
                                +    rancher_api_client.cluster_services.delete(rke1_cluster['id'], lb_service["name"])
                                +
                                @@ -542,7 +1275,6 @@

                                Methods

                                class TestRKE2
                                -
                                Expand source code @@ -843,6 +1575,7 @@

                                Methods

                                remaining_vm_cnt += 1 assert 0 == remaining_vm_cnt, (f"Still have {remaining_vm_cnt} RKE2 VMs")
                                +

                                Class variables

                                var pytestmark
                                @@ -856,42 +1589,361 @@

                                Methods

                                def test_create_pvc(self, rancher_api_client, harvester_mgmt_cluster, unique_name, polling_for)
                                +
                                + +Expand source code + +
                                @pytest.mark.dependency(depends=["create_rke2"])
                                +def test_create_pvc(self, rancher_api_client, harvester_mgmt_cluster,
                                +                    unique_name, polling_for):
                                +    cluster_id = harvester_mgmt_cluster['id']
                                +    capi = rancher_api_client.clusters.explore(cluster_id)
                                +
                                +    # Create PVC
                                +    size = "1Gi"
                                +    spec = capi.pvcs.Spec(size)
                                +    code, data = capi.pvcs.create(unique_name, spec)
                                +    assert 201 == code, (code, data)
                                +
                                +    # Verify PVC is created
                                +    code, data = polling_for(
                                +        f"PVC {unique_name} to be in Bound phase",
                                +        lambda code, data: "Bound" == data['status'].get('phase'),
                                +        capi.pvcs.get, unique_name
                                +    )
                                +
                                +    # Verify the PV for created PVC
                                +    pv_code, pv_data = capi.pvs.get(data['spec']['volumeName'])
                                +    assert 200 == pv_code, (
                                +        f"Relevant PV is NOT available for created PVC's PV({data['spec']['volumeName']})\n"
                                +        f"Response data of PV: {data}"
                                +    )
                                +
                                +    # Verify size of the PV is aligned to requested size of PVC
                                +    assert size == pv_data['spec']['capacity']['storage'], (
                                +        "Size of the PV is NOT aligned to requested size of PVC,"
                                +        f" expected: {size}, PV's size: {pv_data['spec']['capacity']['storage']}\n"
                                +        f"Response data of PV: {data}"
                                +    )
                                +
                                +    # Verify PVC's size
                                +    created_spec = capi.pvcs.Spec.from_dict(data)
                                +    assert size == spec.size, (
                                +        f"Size is NOT correct in created PVC, expected: {size}, created: {spec.size}\n"
                                +        f"Response data: {data}"
                                +    )
                                +
                                +    # Verify the storage class exists
                                +    sc_code, sc_data = capi.scs.get(created_spec.storage_cls)
                                +    assert 200 == sc_code, (
                                +        f"Storage Class is NOT exists for created PVC\n"
                                +        f"Created PVC Spec: {data}\n"
                                +        f"SC Status({sc_code}): {sc_data}"
                                +    )
                                +
                                +    # verify the storage class is marked `default`
                                +    assert 'true' == sc_data['metadata']['annotations'][capi.scs.DEFAULT_KEY], (
                                +        f"Storage Class is NOT the DEFAULT for created PVC\n"
                                +        f"Requested Storage Class: {spec.storage_cls!r}"
                                +        f"Created PVC Spec: {data}\n"
                                +        f"SC Status({sc_code}): {sc_data}"
                                +    )
                                +
                                +    # teardown
                                +    capi.pvcs.delete(unique_name)
                                +
                                -def test_create_rke2(self, rancher_api_client, unique_name, harvester_mgmt_cluster, harvester_cloud_credential, rke2_cluster, ubuntu_image, vlan_network, rancher_wait_timeout, polling_for) +def test_create_rke2(self,
                                rancher_api_client,
                                unique_name,
                                harvester_mgmt_cluster,
                                harvester_cloud_credential,
                                rke2_cluster,
                                ubuntu_image,
                                vlan_network,
                                rancher_wait_timeout,
                                polling_for)
                                +
                                + +Expand source code + +
                                @pytest.mark.dependency(depends=["import_harvester"], name="create_rke2")
                                +def test_create_rke2(self, rancher_api_client, unique_name, harvester_mgmt_cluster,
                                +                     harvester_cloud_credential, rke2_cluster, ubuntu_image, vlan_network,
                                +                     rancher_wait_timeout, polling_for):
                                +    # Create Harvester kubeconfig for this RKE2 cluster
                                +    code, data = rancher_api_client.kube_configs.create(
                                +        rke2_cluster['name'],
                                +        harvester_mgmt_cluster['id']
                                +    )
                                +    assert 200 == code, (
                                +        f"Failed to create harvester kubeconfig for rke2 with error: {code}, {data}"
                                +    )
                                +    assert "" != data, (
                                +        f"Harvester kubeconfig for rke2 should not be empty: {code}, {data}"
                                +    )
                                +    kubeconfig = data
                                +
                                +    # Create credential for this RKE2 cluster
                                +    code, data = rancher_api_client.secrets.create(
                                +        name=unique_name,
                                +        data={
                                +            "credential": kubeconfig[1:-1].replace("\\n", "\n")
                                +        },
                                +        annotations={
                                +            "v2prov-secret-authorized-for-cluster": rke2_cluster['name'],
                                +            "v2prov-authorized-secret-deletes-on-cluster-removal": "true"
                                +        }
                                +    )
                                +    assert 201 == code, (
                                +        f"Failed to create secret with error: {code}, {data}"
                                +    )
                                +    cloud_provider_config_id = f"{data['metadata']['namespace']}:{data['metadata']['name']}"
                                +
                                +    # Create RKE2 cluster spec
                                +    code, data = rancher_api_client.harvester_configs.create(
                                +        name=unique_name,
                                +        cpus="2",
                                +        mems="4",
                                +        disks="40",
                                +        image_id=ubuntu_image['id'],
                                +        network_id=vlan_network['name'],
                                +        ssh_user=ubuntu_image['ssh_user'],
                                +        user_data=(
                                +            "#cloud-config\n"
                                +            "password: test\n"
                                +            "chpasswd:\n"
                                +            "    expire: false\n"
                                +            "ssh_pwauth: true\n"
                                +        ),
                                +    )
                                +    assert 201 == code, (
                                +        f"Failed to create harvester config with error: {code}, {data}"
                                +    )
                                +
                                +    # Create RKE2 cluster
                                +    code, data = rancher_api_client.mgmt_clusters.create(
                                +        name=rke2_cluster['name'],
                                +        cloud_provider_config_id=cloud_provider_config_id,
                                +        hostname_prefix=f"{rke2_cluster['name']}-",
                                +        harvester_config_name=unique_name,
                                +        k8s_version=rke2_cluster['k8s_version'],
                                +        cloud_credential_id=harvester_cloud_credential['id'],
                                +        quantity=rke2_cluster['machine_count']
                                +    )
                                +    assert 201 == code, (
                                +        f"Failed to create RKE2 MgmtCluster {unique_name} with error: {code}, {data}"
                                +    )
                                +
                                +    code, data = polling_for(
                                +        f"cluster {rke2_cluster['name']} to be ready",
                                +        lambda code, data:
                                +            "active" == data['metadata']['state']['name'] and
                                +            "Ready" in data['metadata']['state']['message'],
                                +        rancher_api_client.mgmt_clusters.get, rke2_cluster['name'],
                                +        timeout=rancher_wait_timeout
                                +    )
                                +
                                +    # update fixture value
                                +    rke2_cluster['id'] = data["status"]["clusterName"]
                                +
                                +    # Check deployments
                                +    testees = ["harvester-cloud-provider", "harvester-csi-driver-controllers"]
                                +    polling_for(
                                +        f"harvester deployments on {rke2_cluster['name']} to be ready",
                                +        lambda code, data:
                                +            200 == code and
                                +            "active" == data.get("metadata", {}).get("state", {}).get("name"),
                                +        rancher_api_client.cluster_deployments.get,
                                +            rke2_cluster['id'], "kube-system", testees
                                +    )
                                +
                                def test_csi_deployment(self, rancher_api_client, rke2_cluster, csi_deployment, polling_for)
                                +
                                + +Expand source code + +
                                @pytest.mark.dependency(depends=["create_rke2"], name="csi_deployment")
                                +def test_csi_deployment(self, rancher_api_client, rke2_cluster, csi_deployment, polling_for):
                                +    # create pvc
                                +    code, data = rancher_api_client.pvcs.create(rke2_cluster['id'], csi_deployment['pvc'])
                                +    assert 201 == code, (
                                +        f"Fail to create {csi_deployment['pvc']} on {rke2_cluster['name']}\n"
                                +        f"API Response: {code}, {data}"
                                +    )
                                +
                                +    polling_for(
                                +        f"PVC {csi_deployment['pvc']} to be ready",
                                +        lambda code, data:
                                +            200 == code and
                                +            "bound" == data.get("metadata", {}).get("state", {}).get("name"),
                                +        rancher_api_client.pvcs.get, rke2_cluster['id'], csi_deployment['pvc']
                                +    )
                                +
                                +    # deployment with csi
                                +    code, data = rancher_api_client.cluster_deployments.create(
                                +        rke2_cluster['id'], csi_deployment['namespace'],
                                +        csi_deployment['name'], csi_deployment['image'], csi_deployment['pvc']
                                +    )
                                +    assert 201 == code, (
                                +        f"Fail to deploy {csi_deployment['name']} on {rke2_cluster['name']}\n"
                                +        f"API Response: {code}, {data}"
                                +    )
                                +
                                +    polling_for(
                                +        f"deployment {csi_deployment['name']} to be ready",
                                +        lambda code, data:
                                +            200 == code and
                                +            "active" == data.get("metadata", {}).get("state", {}).get("name"),
                                +        rancher_api_client.cluster_deployments.get,
                                +            rke2_cluster['id'], csi_deployment['namespace'], csi_deployment['name']
                                +    )
                                +
                                def test_delete_deployment(self, rancher_api_client, rke2_cluster, csi_deployment, polling_for)
                                +
                                + +Expand source code + +
                                @pytest.mark.dependency(depends=["csi_deployment"])
                                +def test_delete_deployment(self, rancher_api_client, rke2_cluster, csi_deployment,
                                +                           polling_for):
                                +    code, data = rancher_api_client.cluster_deployments.delete(
                                +        rke2_cluster['id'], csi_deployment['namespace'], csi_deployment['name']
                                +    )
                                +    assert 204 == code, (
                                +        f"Failed to delete deployment {csi_deployment['name']} with error: {code}, {data}"
                                +    )
                                +
                                +    polling_for(
                                +        f"deployment {csi_deployment['name']} to be deleted",
                                +        lambda code, data:
                                +            code == 404,
                                +        rancher_api_client.cluster_deployments.get,
                                +            rke2_cluster['id'], csi_deployment['namespace'], csi_deployment['name']
                                +    )
                                +
                                +    # teardown
                                +    rancher_api_client.pvcs.delete(rke2_cluster['id'], csi_deployment['pvc'])
                                +
                                def test_delete_rke2(self, api_client, rancher_api_client, rke2_cluster, rancher_wait_timeout, polling_for)
                                +
                                + +Expand source code + +
                                @pytest.mark.dependency(depends=["create_rke2"])
                                +def test_delete_rke2(self, api_client, rancher_api_client, rke2_cluster,
                                +                     rancher_wait_timeout, polling_for):
                                +    code, data = rancher_api_client.mgmt_clusters.delete(rke2_cluster['name'])
                                +    assert 200 == code, (
                                +        f"Failed to delete RKE2 MgmtCluster {rke2_cluster['name']} with error: {code}, {data}"
                                +    )
                                +
                                +    polling_for(
                                +        f"cluster {rke2_cluster['name']} to be deleted",
                                +        lambda code, data: 404 == code,
                                +        rancher_api_client.mgmt_clusters.get, rke2_cluster['name'],
                                +        timeout=rancher_wait_timeout
                                +    )
                                +
                                +    code, data = api_client.vms.get()
                                +    remaining_vm_cnt = 0
                                +    for d in data.get('data', []):
                                +        vm_name = d.get('metadata', {}).get('name', "")
                                +        if vm_name.startswith(f"{rke2_cluster['name']}-"):
                                +            remaining_vm_cnt += 1
                                +    assert 0 == remaining_vm_cnt, (f"Still have {remaining_vm_cnt} RKE2 VMs")
                                +
                                def test_deploy_nginx(self, rancher_api_client, rke2_cluster, nginx_deployment, polling_for)
                                +
                                + +Expand source code + +
                                @pytest.mark.dependency(depends=["create_rke2"], name="deploy_nginx")
                                +def test_deploy_nginx(self, rancher_api_client, rke2_cluster, nginx_deployment, polling_for):
                                +    code, data = rancher_api_client.cluster_deployments.create(
                                +        rke2_cluster['id'], nginx_deployment['namespace'],
                                +        nginx_deployment['name'], nginx_deployment['image']
                                +    )
                                +    assert 201 == code, (
                                +        f"Fail to deploy {nginx_deployment['name']} on {rke2_cluster['name']}\n"
                                +        f"API Response: {code}, {data}"
                                +    )
                                +
                                +    polling_for(
                                +        f"deployment {nginx_deployment['name']} to be ready",
                                +        lambda code, data:
                                +            200 == code and
                                +            "active" == data.get("metadata", {}).get("state", {}).get("name"),
                                +        rancher_api_client.cluster_deployments.get,
                                +            rke2_cluster['id'], nginx_deployment['namespace'], nginx_deployment['name']
                                +    )
                                +
                                def test_load_balancer_service(self, rancher_api_client, rke2_cluster, nginx_deployment, lb_service, polling_for)
                                +
                                + +Expand source code + +
                                @pytest.mark.dependency(depends=["deploy_nginx"])
                                +def test_load_balancer_service(self, rancher_api_client, rke2_cluster, nginx_deployment,
                                +                               lb_service, polling_for):
                                +    # create LB service
                                +    code, data = rancher_api_client.cluster_services.create(
                                +        rke2_cluster['id'], lb_service["data"]
                                +    )
                                +    assert 201 == code, (
                                +        f"Fail to create {lb_service['name']} for {nginx_deployment['name']}\n"
                                +        f"API Response: {code}, {data}"
                                +    )
                                +
                                +    # check service active
                                +    code, data = polling_for(
                                +        f"service {lb_service['name']} to be ready",
                                +        lambda code, data:
                                +            200 == code and
                                +            "active" == data.get("metadata", {}).get("state", {}).get("name"),
                                +        rancher_api_client.cluster_services.get, rke2_cluster['id'], lb_service['name']
                                +    )
                                +
                                +    # check Nginx can be queired via LB
                                +    try:
                                +        ingress = data["status"]["loadBalancer"]["ingress"][0]
                                +        ingress_url = f"http://{ingress['ip']}:{ingress['ports'][0]['port']}"
                                +    except Exception as e:
                                +        raise AssertionError(
                                +            f"Fail to get ingress info from {lb_service['name']}\n"
                                +            f"Got error: {e}\n"
                                +            f"Service data: {data}"
                                +        )
                                +    resp = rancher_api_client.session.get(ingress_url)
                                +    assert resp.ok and "Welcome to nginx" in resp.text, (
                                +        f"Fail to query load balancer {lb_service['name']}\n"
                                +        f"Got error: {resp.status_code}, {resp.text}\n"
                                +        f"Service data: {data}"
                                +    )
                                +
                                +    # teardown
                                +    rancher_api_client.cluster_services.delete(rke2_cluster['id'], lb_service["name"])
                                +
                                @@ -962,7 +2014,7 @@

                                -

                                Generated by pdoc 0.11.1.

                                +

                                Generated by pdoc 0.11.5.

                                diff --git a/backend/integrations/test_upgrade.html b/backend/integrations/test_upgrade.html index 90c0f7c74..2a30168a0 100644 --- a/backend/integrations/test_upgrade.html +++ b/backend/integrations/test_upgrade.html @@ -3,19 +3,30 @@ - + harvester_e2e_tests.integrations.test_upgrade API documentation - + @@ -37,66 +48,467 @@

                                Functions

                                def cluster_network(vlan_nic, api_client, unique_name)
                                +
                                + +Expand source code + +
                                @pytest.fixture(scope='module')
                                +def cluster_network(vlan_nic, api_client, unique_name):
                                +    code, data = api_client.clusternetworks.get_config()
                                +    assert 200 == code, (code, data)
                                +
                                +    node_key = 'network.harvesterhci.io/matched-nodes'
                                +    cnet_nodes = dict()  # cluster_network: items
                                +    for cfg in data['items']:
                                +        if vlan_nic in cfg['spec']['uplink']['nics']:
                                +            nodes = json.loads(cfg['metadata']['annotations'][node_key])
                                +            cnet_nodes.setdefault(cfg['spec']['clusterNetwork'], []).extend(nodes)
                                +
                                +    code, data = api_client.hosts.get()
                                +    assert 200 == code, (code, data)
                                +    all_nodes = set(n['id'] for n in data['data'])
                                +    try:
                                +        # vlad_nic configured on specific cluster network, reuse it
                                +        yield next(cnet for cnet, nodes in cnet_nodes.items() if all_nodes == set(nodes))
                                +        return None
                                +    except StopIteration:
                                +        configured_nodes = reduce(add, cnet_nodes.values(), [])
                                +        if any(n in configured_nodes for n in all_nodes):
                                +            raise AssertionError(
                                +                "Not all nodes' VLAN NIC {vlan_nic} are available.\n"
                                +                f"VLAN NIC configured nodes: {configured_nodes}\n"
                                +                f"All nodes: {all_nodes}\n"
                                +            )
                                +
                                +    # Create cluster network
                                +    cnet = f"cnet-{datetime.strptime(unique_name, '%Hh%Mm%Ss%f-%m-%d').strftime('%H%M%S')}"
                                +    created = []
                                +    code, data = api_client.clusternetworks.create(cnet)
                                +    assert 201 == code, (code, data)
                                +    while all_nodes:
                                +        node = all_nodes.pop()
                                +        code, data = api_client.clusternetworks.create_config(node, cnet, vlan_nic, hostname=node)
                                +        assert 201 == code, (
                                +            f"Failed to create cluster config for {node}\n"
                                +            f"Created: {created}\t Remaining: {all_nodes}\n"
                                +            f"API Status({code}): {data}"
                                +        )
                                +        created.append(node)
                                +
                                +    yield cnet
                                +
                                +    # Teardown
                                +    deleted = {name: api_client.clusternetworks.delete_config(name) for name in created}
                                +    failed = [(name, code, data) for name, (code, data) in deleted.items() if 200 != code]
                                +    if failed:
                                +        fmt = "Unable to delete VLAN Config {} with error ({}): {}"
                                +        raise AssertionError(
                                +            "\n".join(fmt.format(name, code, data) for (name, code, data) in failed)
                                +        )
                                +
                                +    code, data = api_client.clusternetworks.delete(cnet)
                                +    assert 200 == code, (code, data)
                                +
                                def cluster_state(request, unique_name, api_client)
                                +
                                + +Expand source code + +
                                @pytest.fixture(scope="module")
                                +def cluster_state(request, unique_name, api_client):
                                +    class ClusterState:
                                +        vm1 = None
                                +        vm2 = None
                                +        vm3 = None
                                +        pass
                                +
                                +    state = ClusterState()
                                +
                                +    if request.config.getoption('--upgrade-target-version'):
                                +        state.version_verify = True
                                +        state.version = request.config.getoption('--upgrade-target-version')
                                +    else:
                                +        state.version_verify = False
                                +        state.version = f"version-{unique_name}"
                                +
                                +    return state
                                +
                                def config_backup_target(request, api_client, wait_timeout)
                                +
                                + +Expand source code + +
                                @pytest.fixture(scope="class")
                                +def config_backup_target(request, api_client, wait_timeout):
                                +    # multiple fixtures from `vm_backup_restore`
                                +    conflict_retries = 5
                                +    nfs_endpoint = request.config.getoption('--nfs-endpoint')
                                +    assert nfs_endpoint, f"NFS endpoint not configured: {nfs_endpoint}"
                                +    assert nfs_endpoint.startswith("nfs://"), (
                                +        f"NFS endpoint should starts with `nfs://`, not {nfs_endpoint}"
                                +    )
                                +    backup_type, config = ("NFS", dict(endpoint=nfs_endpoint))
                                +
                                +    code, data = api_client.settings.get('backup-target')
                                +    origin_spec = api_client.settings.BackupTargetSpec.from_dict(data)
                                +
                                +    spec = getattr(api_client.settings.BackupTargetSpec, backup_type)(**config)
                                +    # ???: when switching S3 -> NFS, update backup-target will easily hit resource conflict
                                +    # so we would need retries to apply the change.
                                +    for _ in range(conflict_retries):
                                +        code, data = api_client.settings.update('backup-target', spec)
                                +        if 409 == code and "Conflict" == data['reason']:
                                +            sleep(3)
                                +        else:
                                +            break
                                +    else:
                                +        raise AssertionError(
                                +            f"Unable to update backup-target after {conflict_retries} retried."
                                +            f"API Status({code}): {data}"
                                +        )
                                +    assert 200 == code, (
                                +        f'Failed to update backup target to {backup_type} with {config}\n'
                                +        f"API Status({code}): {data}"
                                +    )
                                +
                                +    yield spec
                                +
                                +    # remove unbound LH backupVolumes
                                +    code, data = api_client.lhbackupvolumes.get()
                                +    assert 200 == code, "Failed to list lhbackupvolumes"
                                +
                                +    check_names = []
                                +    for volume_data in data["items"]:
                                +        volume_name = volume_data["metadata"]["name"]
                                +        backup_name = volume_data["status"]["lastBackupName"]
                                +        if not backup_name:
                                +            api_client.lhbackupvolumes.delete(volume_name)
                                +            check_names.append(volume_name)
                                +
                                +    endtime = datetime.now() + timedelta(seconds=wait_timeout)
                                +    while endtime > datetime.now():
                                +        for name in check_names[:]:
                                +            code, data = api_client.lhbackupvolumes.get(name)
                                +            if 404 == code:
                                +                check_names.remove(name)
                                +        if not check_names:
                                +            break
                                +        sleep(3)
                                +    else:
                                +        raise AssertionError(
                                +            f"Failed to delete unbound lhbackupvolumes: {check_names}\n"
                                +            f"Last API Status({code}): {data}"
                                +            )
                                +
                                +    # restore to original backup-target and remove backups not belong to it
                                +    code, data = api_client.settings.update('backup-target', origin_spec)
                                +    code, data = api_client.backups.get()
                                +    assert 200 == code, "Failed to list backups"
                                +
                                +    check_names = []
                                +    for backup in data['data']:
                                +        endpoint = backup['status']['backupTarget'].get('endpoint')
                                +        if endpoint != origin_spec.value.get('endpoint'):
                                +            api_client.backups.delete(backup['metadata']['name'])
                                +            check_names.append(backup['metadata']['name'])
                                +
                                +    endtime = datetime.now() + timedelta(seconds=wait_timeout)
                                +    while endtime > datetime.now():
                                +        for name in check_names[:]:
                                +            code, data = api_client.backups.get(name)
                                +            if 404 == code:
                                +                check_names.remove(name)
                                +        if not check_names:
                                +            break
                                +        sleep(3)
                                +    else:
                                +        raise AssertionError(
                                +            f"Failed to delete backups: {check_names}\n"
                                +            f"Last API Status({code}): {data}"
                                +            )
                                +
                                def config_storageclass(request, api_client, unique_name, cluster_state)
                                +
                                + +Expand source code + +
                                @pytest.fixture(scope="module")
                                +def config_storageclass(request, api_client, unique_name, cluster_state):
                                +    replicas = request.config.getoption('--upgrade-sc-replicas') or 3
                                +
                                +    code, default_sc = api_client.scs.get_default()
                                +    assert 200 == code, (code, default_sc)
                                +
                                +    sc_name = f"new-sc-{replicas}-{unique_name}"
                                +    code, data = api_client.scs.create(sc_name, replicas)
                                +    assert 201 == code, (code, data)
                                +
                                +    code, data = api_client.scs.set_default(sc_name)
                                +    assert 200 == code, (code, data)
                                +
                                +    cluster_state.scs = (default_sc, data)
                                +    yield default_sc, data
                                +
                                +    code, data = api_client.scs.set_default(default_sc['metadata']['name'])
                                +    assert 200 == code, (code, data)
                                +
                                def harvester_crds(api_client)
                                +
                                + +Expand source code + +
                                @pytest.fixture(scope="module")
                                +def harvester_crds(api_client):
                                +    crds = {
                                +        "addons.harvesterhci.io": False,
                                +        "blockdevices.harvesterhci.io": False,
                                +        "keypairs.harvesterhci.io": False,
                                +        "preferences.harvesterhci.io": False,
                                +        "settings.harvesterhci.io": False,
                                +        "supportbundles.harvesterhci.io": False,
                                +        "upgrades.harvesterhci.io": False,
                                +        "versions.harvesterhci.io": False,
                                +        "virtualmachinebackups.harvesterhci.io": False,
                                +        "virtualmachineimages.harvesterhci.io": False,
                                +        "virtualmachinerestores.harvesterhci.io": False,
                                +        "virtualmachinetemplates.harvesterhci.io": False,
                                +        "virtualmachinetemplateversions.harvesterhci.io": False,
                                +
                                +        "clusternetworks.network.harvesterhci.io": False,
                                +        "linkmonitors.network.harvesterhci.io": False,
                                +        "nodenetworks.network.harvesterhci.io": False,
                                +        "vlanconfigs.network.harvesterhci.io": False,
                                +        "vlanstatuses.network.harvesterhci.io": False,
                                +
                                +        "ksmtuneds.node.harvesterhci.io": False,
                                +        "loadbalancers.loadbalancer.harvesterhci.io": False,
                                +    }
                                +
                                +    if api_client.cluster_version.release >= (1, 2, 0):
                                +        # removed after `v1.2.0` (network-controller v0.3.3)
                                +        # ref: https://github.com/harvester/network-controller-harvester/pull/85
                                +        crds.pop("nodenetworks.network.harvesterhci.io")
                                +
                                +    return crds
                                +
                                def image(api_client, image_ubuntu, unique_name, wait_timeout)
                                +
                                + +Expand source code + +
                                @pytest.fixture(scope="module")
                                +def image(api_client, image_ubuntu, unique_name, wait_timeout):
                                +    unique_image_id = f'image-{unique_name}'
                                +    code, data = api_client.images.create_by_url(
                                +        unique_image_id, image_ubuntu.url, display_name=f"{unique_name}-{image_ubuntu.name}"
                                +    )
                                +
                                +    assert 201 == code, (code, data)
                                +
                                +    endtime = datetime.now() + timedelta(seconds=wait_timeout)
                                +    while endtime > datetime.now():
                                +        code, data = api_client.images.get(unique_image_id)
                                +        if 100 == data.get('status', {}).get('progress', 0):
                                +            break
                                +        sleep(3)
                                +    else:
                                +        raise AssertionError(
                                +            "Failed to create Image with error:\n"
                                +            f"Status({code}): {data}"
                                +        )
                                +
                                +    yield dict(id=f"{data['metadata']['namespace']}/{unique_image_id}",
                                +               user=image_ubuntu.ssh_user)
                                +
                                +    code, data = api_client.images.delete(unique_image_id)
                                +
                                def interceptor(api_client)
                                +
                                + +Expand source code + +
                                @pytest.fixture(scope="module")
                                +def interceptor(api_client):
                                +    from inspect import getmembers, ismethod
                                +
                                +    class Interceptor:
                                +
                                +        def intercepts(self):
                                +            meths = getmembers(self, predicate=ismethod)
                                +            return [m for name, m in meths if name.startswith("intercept_")]
                                +
                                +        def check(self, data):
                                +            for func in self.intercepts():
                                +                func(data)
                                +
                                +    return Interceptor()
                                +
                                def logging_addon()
                                +
                                + +Expand source code + +
                                @pytest.fixture(scope="module")
                                +def logging_addon():
                                +    return SimpleNamespace(
                                +        namespace="cattle-logging-system",
                                +        name="rancher-logging",
                                +        enable_statuses=("deployed", "AddonDeploySuccessful"),
                                +        enable_toggled=False
                                +    )
                                +
                                def stopped_vm(request, api_client, ssh_keypair, wait_timeout, unique_name, image)
                                +
                                + +Expand source code + +
                                @pytest.fixture
                                +def stopped_vm(request, api_client, ssh_keypair, wait_timeout, unique_name, image):
                                +    unique_vm_name = f"{request.node.name.lstrip('test_').replace('_', '-')}-{unique_name}"
                                +    cpu, mem = 1, 2
                                +    pub_key, pri_key = ssh_keypair
                                +    vm_spec = api_client.vms.Spec(cpu, mem)
                                +    vm_spec.add_image("disk-0", image['id'])
                                +    vm_spec.run_strategy = "Halted"
                                +
                                +    userdata = yaml.safe_load(vm_spec.user_data)
                                +    userdata['ssh_authorized_keys'] = [pub_key]
                                +    vm_spec.user_data = yaml.dump(userdata)
                                +
                                +    code, data = api_client.vms.create(unique_vm_name, vm_spec)
                                +    assert 201 == code, (code, data)
                                +    endtime = datetime.now() + timedelta(seconds=wait_timeout)
                                +    while endtime > datetime.now():
                                +        code, data = api_client.vms.get(unique_vm_name)
                                +        if "Stopped" == data.get('status', {}).get('printableStatus'):
                                +            break
                                +        sleep(1)
                                +
                                +    yield unique_vm_name, image['user'], pri_key
                                +
                                +    code, data = api_client.vms.get(unique_vm_name)
                                +    vm_spec = api_client.vms.Spec.from_dict(data)
                                +
                                +    api_client.vms.delete(unique_vm_name)
                                +    endtime = datetime.now() + timedelta(seconds=wait_timeout)
                                +    while endtime > datetime.now():
                                +        code, data = api_client.vms.get_status(unique_vm_name)
                                +        if 404 == code:
                                +            break
                                +        sleep(3)
                                +
                                +    for vol in vm_spec.volumes:
                                +        vol_name = vol['volume']['persistentVolumeClaim']['claimName']
                                +        api_client.volumes.delete(vol_name)
                                +
                                def upgrade_target(request, unique_name)
                                +
                                + +Expand source code + +
                                @pytest.fixture(scope="module")
                                +def upgrade_target(request, unique_name):
                                +    version = request.config.getoption('--upgrade-target-version')
                                +    version = version or f"upgrade-{unique_name}"
                                +    iso_url = request.config.getoption('--upgrade-iso-url')
                                +    assert iso_url, "Target ISO URL should not be empty"
                                +    checksum = request.config.getoption("--upgrade-iso-checksum")
                                +    assert checksum, "Checksum for Target ISO should not be empty"
                                +
                                +    return version, iso_url, checksum
                                +
                                def vm_network(api_client, unique_name, wait_timeout, cluster_network, vlan_id, cluster_state)
                                +
                                + +Expand source code + +
                                @pytest.fixture(scope="module")
                                +def vm_network(api_client, unique_name, wait_timeout, cluster_network, vlan_id, cluster_state):
                                +    code, data = api_client.networks.create(
                                +        unique_name, vlan_id, cluster_network=cluster_network
                                +    )
                                +    assert 201 == code, (code, data)
                                +    endtime = datetime.now() + timedelta(seconds=wait_timeout)
                                +    while endtime > datetime.now():
                                +        code, data = api_client.networks.get(unique_name)
                                +        annotations = data['metadata'].get('annotations', {})
                                +        if 200 == code and annotations.get('network.harvesterhci.io/route'):
                                +            route = json.loads(annotations['network.harvesterhci.io/route'])
                                +            if route['cidr']:
                                +                break
                                +        sleep(3)
                                +    else:
                                +        raise AssertionError(
                                +            "VM network created but route info not available\n"
                                +            f"API Status({code}): {data}"
                                +        )
                                +
                                +    cluster_state.network = data
                                +    yield dict(name=unique_name, cidr=route['cidr'], namespace=data['metadata']['namespace'])
                                +
                                +    code, data = api_client.networks.delete(unique_name)
                                +    endtime = datetime.now() + timedelta(seconds=wait_timeout)
                                +    while endtime > datetime.now():
                                +        code, data = api_client.networks.get(unique_name)
                                +        if 404 == code:
                                +            break
                                +        sleep(3)
                                +    else:
                                +        raise AssertionError(
                                +            f"Failed to remove VM network {unique_name} after {wait_timeout}s\n"
                                +            f"API Status({code}): {data}"
                                +        )
                                +

                                @@ -108,7 +520,6 @@

                                Classes

                                class TestAnyNodesUpgrade
                                -
                                Expand source code @@ -734,6 +1145,7 @@

                                Classes

                                else: raise AssertionError(f"Upgrade related image(s) still available:\n{upgrade_images}")
                                +

                                Class variables

                                var pytestmark
                                @@ -747,6 +1159,54 @@

                                Methods

                                def test_perform_upgrade(self, api_client, unique_name, upgrade_target, upgrade_timeout, interceptor)
                                +
                                + +Expand source code + +
                                @pytest.mark.dependency(name="any_nodes_upgrade")
                                +def test_perform_upgrade(
                                +    self, api_client, unique_name, upgrade_target, upgrade_timeout, interceptor
                                +):
                                +    """
                                +    - perform upgrade
                                +    - check all nodes upgraded
                                +    """
                                +    # Check nodes counts
                                +    code, data = api_client.hosts.get()
                                +    assert code == 200, (code, data)
                                +    nodes = len(data['data'])
                                +
                                +    # create Upgrade version and start
                                +    skip_version_check = {"harvesterhci.io/skip-version-check": True}  # for test purpose
                                +    version, url, checksum = upgrade_target
                                +    version = f"{version}-{unique_name}"
                                +    code, data = api_client.versions.create(version, url, checksum)
                                +    assert 201 == code, f"Failed to create upgrade for {version}"
                                +    code, data = api_client.upgrades.create(version, annotations=skip_version_check)
                                +    assert 201 == code, f"Failed to start upgrade for {version}"
                                +    upgrade_name = data['metadata']['name']
                                +
                                +    # Check upgrade status
                                +    # TODO: check every upgrade stages
                                +    endtime = datetime.now() + timedelta(seconds=upgrade_timeout * nodes)
                                +    while endtime > datetime.now():
                                +        code, data = api_client.upgrades.get(upgrade_name)
                                +        if 200 != code:
                                +            continue
                                +        interceptor.check(data)
                                +        conds = dict((c['type'], c) for c in data.get('status', {}).get('conditions', []))
                                +        state = data.get('metadata', {}).get('labels', {}).get('harvesterhci.io/upgradeState')
                                +        if "Succeeded" == state and "True" == conds.get('Completed', {}).get('status'):
                                +            break
                                +        if any("False" == c['status'] for c in conds.values()):
                                +            raise AssertionError(f"Upgrade failed with conditions: {conds.values()}")
                                +        sleep(30)
                                +    else:
                                +        raise AssertionError(
                                +            f"Upgrade timed out with conditions: {conds.values()}\n"
                                +            f"API Status({code}): {data}"
                                +        )
                                +
                                • perform upgrade
                                • check all nodes upgraded
                                • @@ -756,54 +1216,358 @@

                                  Methods

                                  def test_preq_logging_pods(self, api_client, logging_addon, wait_timeout, sleep_timeout)
                                  +
                                  + +Expand source code + +
                                  @pytest.mark.dependency(name="preq_setup_logging")
                                  +def test_preq_logging_pods(self, api_client, logging_addon, wait_timeout, sleep_timeout):
                                  +    # logging is an addon instead of built-in since v1.2.0
                                  +    if api_client.cluster_version.release >= (1, 2, 0):
                                  +        addon = "/".join([logging_addon.namespace, logging_addon.name])
                                  +        code, data = api_client.addons.get(addon)
                                  +        assert 200 == code, (code, data)
                                  +
                                  +        if not data.get('status', {}).get('status') in logging_addon.enable_statuses:
                                  +            code, data = api_client.addons.enable(addon)
                                  +            assert 200 == code, (code, data)
                                  +            assert data.get('spec', {}).get('enabled', False), (code, data)
                                  +            logging_addon.enable_toggled = True
                                  +
                                  +            endtime = datetime.now() + timedelta(seconds=wait_timeout)
                                  +            while endtime > datetime.now():
                                  +                code, data = api_client.addons.get(addon)
                                  +                if data.get('status', {}).get('status') in logging_addon.enable_statuses:
                                  +                    break
                                  +                sleep(sleep_timeout)
                                  +            else:
                                  +                raise AssertionError(
                                  +                    f"Failed to enable addon {addon} with {wait_timeout} timed out\n"
                                  +                    f"API Status({code}): {data}"
                                  +                )
                                  +
                                  +    code, pods = api_client.get_pods(namespace=logging_addon.namespace)
                                  +    assert code == 200 and len(pods['data']) > 0, "No logging pods found"
                                  +
                                  +    fails = []
                                  +    for pod in pods['data']:
                                  +        phase = pod["status"]["phase"]
                                  +        if phase not in ("Running", "Succeeded"):
                                  +            fails.append((pod['metadata']['name'], phase))
                                  +    else:
                                  +        assert not fails, (
                                  +            "\n".join(f"Pod({n})'s phase({p}) is not expected." for n, p in fails)
                                  +        )
                                  +
                                  def test_preq_setup_storageclass(self, config_storageclass)
                                  +
                                  + +Expand source code + +
                                  @pytest.mark.dependency(name="preq_setup_storageclass")
                                  +def test_preq_setup_storageclass(self, config_storageclass):
                                  +    """ Be used to trigger the fixture to setup storageclass"""
                                  +

                                  Be used to trigger the fixture to setup storageclass

                                  def test_preq_setup_vmnetwork(self, vm_network)
                                  +
                                  + +Expand source code + +
                                  @pytest.mark.dependency(name="preq_setup_vmnetwork")
                                  +def test_preq_setup_vmnetwork(self, vm_network):
                                  +    ''' Be used to trigger the fixture to setup VM network '''
                                  +

                                  Be used to trigger the fixture to setup VM network

                                  -def test_preq_setup_vms(self, api_client, ssh_keypair, unique_name, vm_checker, vm_shell, vm_network, image, config_storageclass, config_backup_target, wait_timeout, cluster_state) +def test_preq_setup_vms(self,
                                  api_client,
                                  ssh_keypair,
                                  unique_name,
                                  vm_checker,
                                  vm_shell,
                                  vm_network,
                                  image,
                                  config_storageclass,
                                  config_backup_target,
                                  wait_timeout,
                                  cluster_state)
                                  +
                                  + +Expand source code + +
                                  @pytest.mark.dependency(name="preq_setup_vms")
                                  +def test_preq_setup_vms(
                                  +    self, api_client, ssh_keypair, unique_name, vm_checker, vm_shell, vm_network, image,
                                  +    config_storageclass, config_backup_target, wait_timeout, cluster_state
                                  +):
                                  +    # create new storage class, make it default
                                  +    # create 3 VMs:
                                  +    # - having the new storage class
                                  +    # - the VM that have some data written, take backup
                                  +    # - the VM restored from the backup
                                  +    pub_key, pri_key = ssh_keypair
                                  +    old_sc, new_sc = config_storageclass
                                  +    unique_vm_name = f"ug-vm-{unique_name}"
                                  +
                                  +    cpu, mem, size = 1, 2, 5
                                  +    vm_spec = api_client.vms.Spec(cpu, mem, mgmt_network=False)
                                  +    vm_spec.add_image('disk-0', image['id'], size=size)
                                  +    vm_spec.add_network('nic-1', f"{vm_network['namespace']}/{vm_network['name']}")
                                  +    userdata = yaml.safe_load(vm_spec.user_data)
                                  +    userdata['ssh_authorized_keys'] = [pub_key]
                                  +    vm_spec.user_data = yaml.dump(userdata)
                                  +
                                  +    code, data = api_client.vms.create(unique_vm_name, vm_spec)
                                  +    assert 201 == code, (code, data)
                                  +    vm_got_ips, (code, data) = vm_checker.wait_ip_addresses(unique_vm_name, ["nic-1"])
                                  +    assert vm_got_ips, (
                                  +        f"Failed to Start VM({unique_vm_name}) with errors:\n"
                                  +        f"Status: {data.get('status')}\n"
                                  +        f"API Status({code}): {data}"
                                  +    )
                                  +    vm_ip = next(iface['ipAddress'] for iface in data['status']['interfaces']
                                  +                 if iface['name'] == 'nic-1')
                                  +    # write data into VM
                                  +    endtime = datetime.now() + timedelta(seconds=wait_timeout)
                                  +    while endtime > datetime.now():
                                  +        try:
                                  +            with vm_shell.login(vm_ip, image['user'], pkey=pri_key) as sh:
                                  +                cloud_inited, (out, err) = vm_checker.wait_cloudinit_done(sh)
                                  +                assert cloud_inited and not err, (out, err)
                                  +                out, err = sh.exec_command(
                                  +                    "dd if=/dev/urandom of=./generate_file bs=1M count=1024; sync"
                                  +                )
                                  +                assert not out, (out, err)
                                  +                vm1_md5, err = sh.exec_command(
                                  +                    "md5sum ./generate_file > ./generate_file.md5; cat ./generate_file.md5"
                                  +                )
                                  +                assert not err, (vm1_md5, err)
                                  +                break
                                  +        except (SSHException, NoValidConnectionsError, TimeoutError):
                                  +            sleep(5)
                                  +    else:
                                  +        raise AssertionError("Timed out while writing data into VM")
                                  +
                                  +    # Take backup then check it's ready
                                  +    code, data = api_client.vms.backup(unique_vm_name, unique_vm_name)
                                  +    assert 204 == code, (code, data)
                                  +    endtime = datetime.now() + timedelta(seconds=wait_timeout)
                                  +    while endtime > datetime.now():
                                  +        code, backup = api_client.backups.get(unique_vm_name)
                                  +        if 200 == code and backup.get('status', {}).get('readyToUse'):
                                  +            break
                                  +        sleep(3)
                                  +    else:
                                  +        raise AssertionError(
                                  +            f'Timed-out waiting for the backup \'{unique_vm_name}\' to be ready.'
                                  +        )
                                  +    # restore into new VM
                                  +    restored_vm_name = f"r-{unique_vm_name}"
                                  +    spec = api_client.backups.RestoreSpec.for_new(restored_vm_name)
                                  +    code, data = api_client.backups.restore(unique_vm_name, spec)
                                  +    assert 201 == code, (code, data)
                                  +    # Check restore VM is created
                                  +    endtime = datetime.now() + timedelta(seconds=wait_timeout)
                                  +    while endtime > datetime.now():
                                  +        code, data = api_client.vms.get(restored_vm_name)
                                  +        if 200 == code:
                                  +            break
                                  +        sleep(3)
                                  +    else:
                                  +        raise AssertionError(
                                  +            f"restored VM {restored_vm_name} is not created"
                                  +        )
                                  +    vm_got_ips, (code, data) = vm_checker.wait_ip_addresses(restored_vm_name, ["nic-1"])
                                  +    assert vm_got_ips, (
                                  +        f"Failed to Start VM({restored_vm_name}) with errors:\n"
                                  +        f"Status: {data.get('status')}\n"
                                  +        f"API Status({code}): {data}"
                                  +    )
                                  +    # Check data consistency
                                  +    r_vm_ip = next(iface['ipAddress'] for iface in data['status']['interfaces']
                                  +                   if iface['name'] == 'nic-1')
                                  +    endtime = datetime.now() + timedelta(seconds=wait_timeout)
                                  +    while endtime > datetime.now():
                                  +        try:
                                  +            with vm_shell.login(r_vm_ip, image['user'], pkey=pri_key) as sh:
                                  +                cloud_inited, (out, err) = vm_checker.wait_cloudinit_done(sh)
                                  +                assert cloud_inited and not err, (out, err)
                                  +                out, err = sh.exec_command("md5sum -c ./generate_file.md5")
                                  +                assert not err, (out, err)
                                  +                vm2_md5, err = sh.exec_command("cat ./generate_file.md5")
                                  +                assert not err, (vm2_md5, err)
                                  +                assert vm1_md5 == vm2_md5
                                  +                out, err = sh.exec_command(
                                  +                    f"ping -c1 {vm_ip} > /dev/null && echo -n success || echo -n fail"
                                  +                )
                                  +                assert "success" == out and not err
                                  +                break
                                  +        except (SSHException, NoValidConnectionsError, ConnectionResetError, TimeoutError):
                                  +            sleep(5)
                                  +    else:
                                  +        raise AssertionError("Unable to login to restored VM to check data consistency")
                                  +
                                  +    # Create VM having additional volume with new storage class
                                  +    vm_spec.add_volume("vol-1", 5, storage_cls=new_sc['metadata']['name'])
                                  +    code, data = api_client.vms.create(f"sc-{unique_vm_name}", vm_spec)
                                  +    assert 201 == code, (code, data)
                                  +    vm_got_ips, (code, data) = vm_checker.wait_ip_addresses(f"sc-{unique_vm_name}", ["nic-1"])
                                  +    assert vm_got_ips, (
                                  +        f"Failed to Start VM(sc-{unique_vm_name}) with errors:\n"
                                  +        f"Status: {data.get('status')}\n"
                                  +        f"API Status({code}): {data}"
                                  +    )
                                  +
                                  +    # store into cluster's state
                                  +    names = [unique_vm_name, f"r-{unique_vm_name}", f"sc-{unique_vm_name}"]
                                  +    cluster_state.vms = dict(md5=vm1_md5, names=names, ssh_user=image['user'], pkey=pri_key)
                                  +
                                  def test_upgrade_image_deleted(self, api_client, wait_timeout)
                                  +
                                  + +Expand source code + +
                                  @pytest.mark.dependency(depends=["any_nodes_upgrade"])
                                  +def test_upgrade_image_deleted(self, api_client, wait_timeout):
                                  +    # max to wait 300s for the upgrade related volumes to be deleted
                                  +    endtime = datetime.now() + timedelta(seconds=min(wait_timeout / 5, 300))
                                  +    while endtime > datetime.now():
                                  +        code, data = api_client.images.get(namespace='harvester-system')
                                  +        upgrade_images = [image for image in data['items']
                                  +                          if 'upgrade' in image['spec']['displayName']]
                                  +        if not upgrade_images:
                                  +            break
                                  +    else:
                                  +        raise AssertionError(f"Upgrade related image(s) still available:\n{upgrade_images}")
                                  +
                                  def test_upgrade_vm_deleted(self, api_client, wait_timeout)
                                  +
                                  + +Expand source code + +
                                  @pytest.mark.dependency(depends=["any_nodes_upgrade"])
                                  +def test_upgrade_vm_deleted(self, api_client, wait_timeout):
                                  +    # max to wait 300s for the upgrade related VMs to be deleted
                                  +    endtime = datetime.now() + timedelta(seconds=min(wait_timeout / 5, 300))
                                  +    while endtime > datetime.now():
                                  +        code, data = api_client.vms.get(namespace='harvester-system')
                                  +        upgrade_vms = [vm for vm in data['data'] if 'upgrade' in vm['id']]
                                  +        if not upgrade_vms:
                                  +            break
                                  +    else:
                                  +        raise AssertionError(f"Upgrade related VM still available:\n{upgrade_vms}")
                                  +
                                  def test_upgrade_volume_deleted(self, api_client, wait_timeout)
                                  +
                                  + +Expand source code + +
                                  @pytest.mark.dependency(depends=["any_nodes_upgrade"])
                                  +def test_upgrade_volume_deleted(self, api_client, wait_timeout):
                                  +    # max to wait 300s for the upgrade related volumes to be deleted
                                  +    endtime = datetime.now() + timedelta(seconds=min(wait_timeout / 5, 300))
                                  +    while endtime > datetime.now():
                                  +        code, data = api_client.volumes.get(namespace='harvester-system')
                                  +        upgrade_vols = [vol for vol in data['data']
                                  +                        if 'upgrade' in vol['id'] and not vol['id'].endswith('log-archive')]
                                  +        if not upgrade_vols:
                                  +            break
                                  +    else:
                                  +        raise AssertionError(f"Upgrade related volume(s) still available:\n{upgrade_vols}")
                                  +
                                  def test_verify_audit_log(self, api_client, host_shell, wait_timeout)
                                  +
                                  + +Expand source code + +
                                  @pytest.mark.dependency(depends=["any_nodes_upgrade"])
                                  +def test_verify_audit_log(self, api_client, host_shell, wait_timeout):
                                  +    code, data = api_client.hosts.get()
                                  +    assert 200 == code, (code, data)
                                  +    label_main = "node-role.kubernetes.io/control-plane"
                                  +    masters = [n for n in data['data'] if n['metadata']['labels'].get(label_main) == "true"]
                                  +    assert len(masters) > 0, "No master nodes found"
                                  +
                                  +    script = ("sudo tail /var/lib/rancher/rke2/server/logs/audit.log | awk 'END{print}' "
                                  +              "| jq .requestReceivedTimestamp "
                                  +              "| xargs -I {} date -d \"{}\" +%s")
                                  +
                                  +    node_ips = [n["metadata"]["annotations"][NODE_INTERNAL_IP_ANNOTATION] for n in masters]
                                  +    cmp = dict()
                                  +    done = set()
                                  +    endtime = datetime.now() + timedelta(seconds=wait_timeout)
                                  +    while endtime > datetime.now():
                                  +        for ip in done.symmetric_difference(node_ips):
                                  +            try:
                                  +                with host_shell.login(ip) as shell:
                                  +                    out, err = shell.exec_command(script)
                                  +                    timestamp = int(out)
                                  +                    if not err and ip not in cmp:
                                  +                        cmp[ip] = timestamp
                                  +                        continue
                                  +                    if not err and cmp[ip] < timestamp:
                                  +                        done.add(ip)
                                  +            except (SSHException, NoValidConnectionsError, ConnectionResetError, TimeoutError):
                                  +                continue
                                  +
                                  +        if not done.symmetric_difference(node_ips):
                                  +            break
                                  +        sleep(5)
                                  +    else:
                                  +        raise AssertionError(
                                  +            "\n".join("Node {ip} audit log is not updated." for ip in set(node_ips) ^ done)
                                  +        )
                                  +
                                  def test_verify_crds_existed(self, api_client, harvester_crds)
                                  +
                                  + +Expand source code + +
                                  @pytest.mark.dependency(depends=["any_nodes_upgrade"])
                                  +def test_verify_crds_existed(self, api_client, harvester_crds):
                                  +    """ Verify crds existed
                                  +    Criteria:
                                  +    - crds should be existed
                                  +    """
                                  +    not_existed_crds = []
                                  +    exist_crds = True
                                  +    for crd in harvester_crds:
                                  +        code, _ = api_client.get_crds(name=crd)
                                  +
                                  +        if code != 200:
                                  +            exist_crds = False
                                  +            not_existed_crds.append(crd)
                                  +
                                  +    if not exist_crds:
                                  +        raise AssertionError(f"CRDs {not_existed_crds} are not existed")
                                  +

                                  Verify crds existed Criteria: - crds should be existed

                                  @@ -812,6 +1576,67 @@

                                  Methods

                                  def test_verify_deployed_components_version(self, api_client)
                                  +
                                  + +Expand source code + +
                                  @pytest.mark.dependency(depends=["any_nodes_upgrade"])
                                  +def test_verify_deployed_components_version(self, api_client):
                                  +    """ Verify deployed kubevirt and longhorn version
                                  +    Criteria:
                                  +    - except version(get from apps.catalog.cattle.io/harvester) should be equal to the version
                                  +      of kubevirt and longhorn
                                  +    """
                                  +
                                  +    kubevirt_version_existed = False
                                  +    engine_image_version_existed = False
                                  +    longhorn_manager_version_existed = False
                                  +
                                  +    # Get except version from apps.catalog.cattle.io/harvester
                                  +    code, apps = api_client.get_apps_catalog(name="harvester",
                                  +                                             namespace=DEFAULT_HARVESTER_NAMESPACE)
                                  +    assert code == 200 and apps['type'] != "error", (
                                  +        f"Failed to get apps.catalog.cattle.io/harvester: {apps['message']}")
                                  +
                                  +    # Get except image of kubevirt and longhorn
                                  +    kubevirt_operator = (
                                  +        apps['spec']['chart']['values']['kubevirt-operator']['containers']['operator'])
                                  +    kubevirt_operator_image = (
                                  +        f"{kubevirt_operator['image']['repository']}:{kubevirt_operator['image']['tag']}")
                                  +
                                  +    longhorn = apps['spec']['chart']['values']['longhorn']['image']['longhorn']
                                  +    longhorn_images = {
                                  +        "engine-image": f"{longhorn['engine']['repository']}:{longhorn['engine']['tag']}",
                                  +        "longhorn-manager": f"{longhorn['manager']['repository']}:{longhorn['manager']['tag']}"
                                  +    }
                                  +
                                  +    # Verify kubevirt version
                                  +    code, pods = api_client.get_pods(namespace=DEFAULT_HARVESTER_NAMESPACE)
                                  +    assert code == 200 and len(pods['data']) > 0, (
                                  +        f"Failed to get pods in namespace {DEFAULT_HARVESTER_NAMESPACE}")
                                  +
                                  +    for pod in pods['data']:
                                  +        if "virt-operator" in pod['metadata']['name']:
                                  +            kubevirt_version_existed = (
                                  +                kubevirt_operator_image == pod['spec']['containers'][0]['image'])
                                  +
                                  +    # Verify longhorn version
                                  +    code, pods = api_client.get_pods(namespace=DEFAULT_LONGHORN_NAMESPACE)
                                  +    assert code == 200 and len(pods['data']) > 0, (
                                  +        f"Failed to get pods in namespace {DEFAULT_LONGHORN_NAMESPACE}")
                                  +
                                  +    for pod in pods['data']:
                                  +        if "longhorn-manager" in pod['metadata']['name']:
                                  +            longhorn_manager_version_existed = (
                                  +              longhorn_images["longhorn-manager"] == pod['spec']['containers'][0]['image'])
                                  +        elif "engine-image" in pod['metadata']['name']:
                                  +            engine_image_version_existed = (
                                  +                longhorn_images["engine-image"] == pod['spec']['containers'][0]['image'])
                                  +
                                  +    assert kubevirt_version_existed, "kubevirt version is not correct"
                                  +    assert engine_image_version_existed, "longhorn engine image version is not correct"
                                  +    assert longhorn_manager_version_existed, "longhorn manager version is not correct"
                                  +

                                  Verify deployed kubevirt and longhorn version Criteria: - except version(get from apps.catalog.cattle.io/harvester) should be equal to the version @@ -821,6 +1646,38 @@

                                  Methods

                                  def test_verify_logging_pods(self, api_client, logging_addon)
                                  +
                                  + +Expand source code + +
                                  @pytest.mark.dependency(depends=["any_nodes_upgrade", "preq_setup_logging"])
                                  +def test_verify_logging_pods(self, api_client, logging_addon):
                                  +    """ Verify logging pods and logs
                                  +    Criteria: https://github.com/harvester/tests/issues/535
                                  +    """
                                  +    # logging is an addon instead of built-in since v1.2.0
                                  +    if api_client.cluster_version.release >= (1, 2, 0):
                                  +        addon = "/".join([logging_addon.namespace, logging_addon.name])
                                  +        code, data = api_client.addons.get(addon)
                                  +        assert data.get('status', {}).get('status') in logging_addon.enable_statuses
                                  +
                                  +    code, pods = api_client.get_pods(namespace=logging_addon.namespace)
                                  +    assert code == 200 and len(pods['data']) > 0, "No logging pods found"
                                  +
                                  +    fails = []
                                  +    for pod in pods['data']:
                                  +        phase = pod["status"]["phase"]
                                  +        if phase not in ("Running", "Succeeded"):
                                  +            fails.append((pod['metadata']['name'], phase))
                                  +    else:
                                  +        assert not fails, (
                                  +            "\n".join(f"Pod({n})'s phase({p}) is not expected." for n, p in fails)
                                  +        )
                                  +
                                  +    # teardown
                                  +    if logging_addon.enable_toggled:
                                  +        api_client.addons.disable(addon)
                                  +

                                  Verify logging pods and logs Criteria: https://github.com/harvester/tests/issues/535

                                  @@ -828,6 +1685,34 @@

                                  Methods

                                  def test_verify_network(self, api_client, cluster_state)
                                  +
                                  + +Expand source code + +
                                  @pytest.mark.dependency(depends=["any_nodes_upgrade", "preq_setup_vmnetwork"])
                                  +def test_verify_network(self, api_client, cluster_state):
                                  +    """ Verify cluster and VLAN networks
                                  +    - cluster network `mgmt` should exists
                                  +    - Created VLAN should exists
                                  +    """
                                  +
                                  +    code, cnets = api_client.clusternetworks.get()
                                  +    assert code == 200, (
                                  +        "Failed to get Networks: %d, %s" % (code, cnets))
                                  +
                                  +    assert len(cnets["items"]) > 0, ("No Networks found")
                                  +
                                  +    assert any(n['metadata']['name'] == "mgmt" for n in cnets['items']), (
                                  +        "Cluster network mgmt not found")
                                  +
                                  +    code, vnets = api_client.networks.get()
                                  +    assert code == 200, (f"Failed to get VLANs: {code}, {vnets}" % (code, vnets))
                                  +    assert len(vnets["items"]) > 0, ("No VLANs found")
                                  +
                                  +    used_vlan = cluster_state.network['metadata']['name']
                                  +    assert any(used_vlan == n['metadata']['name'] for n in vnets['items']), (
                                  +        f"VLAN {used_vlan} not found")
                                  +

                                  Verify cluster and VLAN networks - cluster network mgmt should exists - Created VLAN should exists

                                  @@ -836,12 +1721,106 @@

                                  Methods

                                  def test_verify_os_version(self, request, api_client, cluster_state, host_shell)
                                  +
                                  + +Expand source code + +
                                  @pytest.mark.dependency(depends=["any_nodes_upgrade"])
                                  +def test_verify_os_version(self, request, api_client, cluster_state, host_shell):
                                  +    # Verify /etc/os-release on all nodes
                                  +    script = "cat /etc/os-release"
                                  +    if not cluster_state.version_verify:
                                  +        pytest.skip("skip verify os version")
                                  +
                                  +    # Get all nodes
                                  +    code, data = api_client.hosts.get()
                                  +    assert 200 == code, (code, data)
                                  +    for node in data['data']:
                                  +        node_ip = node["metadata"]["annotations"][NODE_INTERNAL_IP_ANNOTATION]
                                  +
                                  +        with host_shell.login(node_ip) as sh:
                                  +            lines, stderr = sh.exec_command(script, get_pty=True, splitlines=True)
                                  +            assert not stderr, (
                                  +                f"Failed to execute {script} on {node_ip}: {stderr}")
                                  +
                                  +            # eg: PRETTY_NAME="Harvester v1.1.0"
                                  +            assert cluster_state.version == re.findall(r"Harvester (.+?)\"", lines[3])[0], (
                                  +                "OS version is not correct")
                                  +
                                  def test_verify_restore_vm(self, api_client, cluster_state, vm_shell, vm_checker, wait_timeout)
                                  +
                                  + +Expand source code + +
                                  @pytest.mark.dependency(depends=["any_nodes_upgrade", "preq_setup_vms"])
                                  +def test_verify_restore_vm(
                                  +    self, api_client, cluster_state, vm_shell, vm_checker, wait_timeout
                                  +):
                                  +    """ Verify VM restored from the backup
                                  +    Criteria:
                                  +    - VM should able to start
                                  +    - data in VM should not lost
                                  +    """
                                  +
                                  +    backup_name = cluster_state.vms['names'][0]
                                  +    restored_vm_name = f"new-r-{backup_name}"
                                  +
                                  +    # Restore VM from backup and check networking is good
                                  +    restore_spec = api_client.backups.RestoreSpec.for_new(restored_vm_name)
                                  +    code, data = api_client.backups.restore(backup_name, restore_spec)
                                  +    assert code == 201, "Unable to restore backup {backup_name} after upgrade"
                                  +    # Check restore VM is created
                                  +    endtime = datetime.now() + timedelta(seconds=wait_timeout)
                                  +    while endtime > datetime.now():
                                  +        code, data = api_client.vms.get(restored_vm_name)
                                  +        if 200 == code:
                                  +            break
                                  +        sleep(3)
                                  +    else:
                                  +        raise AssertionError(
                                  +            f"restored VM {restored_vm_name} is not created"
                                  +        )
                                  +    vm_got_ips, (code, data) = vm_checker.wait_ip_addresses(restored_vm_name, ["nic-1"])
                                  +    assert vm_got_ips, (
                                  +        f"Failed to Start VM({restored_vm_name}) with errors:\n"
                                  +        f"Status: {data.get('status')}\n"
                                  +        f"API Status({code}): {data}"
                                  +    )
                                  +
                                  +    # Check data in restored VM is consistent
                                  +    pri_key, ssh_user = cluster_state.vms['pkey'], cluster_state.vms['ssh_user']
                                  +    vm_ip = next(iface['ipAddress'] for iface in data['status']['interfaces']
                                  +                 if iface['name'] == 'nic-1')
                                  +    endtime = datetime.now() + timedelta(seconds=wait_timeout)
                                  +    while endtime > datetime.now():
                                  +        try:
                                  +            with vm_shell.login(vm_ip, ssh_user, pkey=pri_key) as sh:
                                  +                cloud_inited, (out, err) = vm_checker.wait_cloudinit_done(sh)
                                  +                assert cloud_inited and not err, (out, err)
                                  +                out, err = sh.exec_command("md5sum -c ./generate_file.md5")
                                  +                assert not err, (out, err)
                                  +                md5, err = sh.exec_command("cat ./generate_file.md5")
                                  +                assert not err, (md5, err)
                                  +                assert md5 == cluster_state.vms['md5']
                                  +                break
                                  +        except (SSHException, NoValidConnectionsError, ConnectionResetError, TimeoutError):
                                  +            sleep(5)
                                  +    else:
                                  +        raise AssertionError("Unable to login to restored VM to check data consistency")
                                  +
                                  +    # teardown: remove the VM
                                  +    code, data = api_client.vms.get(restored_vm_name)
                                  +    spec = api_client.vms.Spec.from_dict(data)
                                  +    _ = vm_checker.wait_deleted(restored_vm_name)
                                  +    for vol in spec.volumes:
                                  +        vol_name = vol['volume']['persistentVolumeClaim']['claimName']
                                  +        api_client.volumes.delete(vol_name)
                                  +

                                  Verify VM restored from the backup Criteria: - VM should able to start @@ -851,12 +1830,70 @@

                                  Methods

                                  def test_verify_rke2_version(self, api_client, host_shell)
                                  +
                                  + +Expand source code + +
                                  @pytest.mark.dependency(depends=["any_nodes_upgrade"])
                                  +def test_verify_rke2_version(self, api_client, host_shell):
                                  +    # Verify node version on all nodes
                                  +    script = "cat /etc/harvester-release.yaml"
                                  +
                                  +    label_main = "node-role.kubernetes.io/control-plane"
                                  +    code, data = api_client.hosts.get()
                                  +    assert 200 == code, (code, data)
                                  +    masters = [n for n in data['data'] if n['metadata']['labels'].get(label_main) == "true"]
                                  +
                                  +    # Verify rke2 version
                                  +    except_rke2_version = ""
                                  +    for node in masters:
                                  +        node_ip = node["metadata"]["annotations"][NODE_INTERNAL_IP_ANNOTATION]
                                  +
                                  +        # Get except rke2 version
                                  +        if except_rke2_version == "":
                                  +            with host_shell.login(node_ip) as sh:
                                  +                lines, stderr = sh.exec_command(script, get_pty=True, splitlines=True)
                                  +                assert not stderr, (
                                  +                    f"Failed to execute {script} on {node_ip}: {stderr}")
                                  +
                                  +                for line in lines:
                                  +                    if "kubernetes" in line:
                                  +                        except_rke2_version = re.findall(r"kubernetes: (.*)", line.strip())[0]
                                  +                        break
                                  +
                                  +                assert except_rke2_version != "", ("Failed to get except rke2 version")
                                  +
                                  +        assert node.get('status', {}).get('nodeInfo', {}).get(
                                  +               "kubeletVersion", "") == except_rke2_version, (
                                  +               "rke2 version is not correct")
                                  +
                                  def test_verify_storage_class(self, api_client, cluster_state)
                                  +
                                  + +Expand source code + +
                                  @pytest.mark.dependency(depends=["any_nodes_upgrade", "preq_setup_storageclass"])
                                  +def test_verify_storage_class(self, api_client, cluster_state):
                                  +    """ Verify StorageClasses and defaults
                                  +    - `new_sc` should be settle as default
                                  +    - `longhorn` should exists
                                  +    """
                                  +    code, scs = api_client.scs.get()
                                  +    assert code == 200, ("Failed to get StorageClasses: %d, %s" % (code, scs))
                                  +    assert len(scs["items"]) > 0, ("No StorageClasses found")
                                  +
                                  +    created_sc = cluster_state.scs[-1]['metadata']['name']
                                  +    names = {sc['metadata']['name']: sc['metadata'].get('annotations') for sc in scs['items']}
                                  +    assert "longhorn" in names
                                  +    assert created_sc in names
                                  +    assert "storageclass.kubernetes.io/is-default-class" in names[created_sc]
                                  +    assert "true" == names[created_sc]["storageclass.kubernetes.io/is-default-class"]
                                  +

                                  Verify StorageClasses and defaults - new_sc should be settle as default - longhorn should exists

                                  @@ -865,6 +1902,71 @@

                                  Methods

                                  def test_verify_vms(self, api_client, cluster_state, vm_shell, vm_checker, wait_timeout)
                                  +
                                  + +Expand source code + +
                                  @pytest.mark.dependency(depends=["any_nodes_upgrade", "preq_setup_vms"])
                                  +def test_verify_vms(self, api_client, cluster_state, vm_shell, vm_checker, wait_timeout):
                                  +    """ Verify VMs' state and data
                                  +    Criteria:
                                  +    - VMs should keep in running state
                                  +    - data in VMs should not lost
                                  +    """
                                  +
                                  +    code, vmis = api_client.vms.get_status()
                                  +    assert code == 200 and len(vmis['data']), (code, vmis)
                                  +
                                  +    endtime = datetime.now() + timedelta(seconds=wait_timeout)
                                  +    while endtime > datetime.now():
                                  +        fails, ips = list(), dict()
                                  +        for name in cluster_state.vms['names']:
                                  +            code, data = api_client.vms.get_status(name)
                                  +            try:
                                  +                assert 200 == code
                                  +                assert "Running" == data['status']['phase']
                                  +                assert data['status']['nodeName']
                                  +                ips[name] = next(iface['ipAddress'] for iface in data['status']['interfaces']
                                  +                                 if iface['name'] == 'nic-1')
                                  +            except (AssertionError, TypeError, StopIteration, KeyError) as ex:
                                  +                fails.append((name, (ex, code, data)))
                                  +        if not fails:
                                  +            break
                                  +    else:
                                  +        raise AssertionError("\n".join(
                                  +            f"VM {name} is not in expected state.\nException: {ex}\nAPI Status({code}): {data}"
                                  +            for (name, (ex, code, data)) in fails)
                                  +        )
                                  +
                                  +    pri_key, ssh_user = cluster_state.vms['pkey'], cluster_state.vms['ssh_user']
                                  +    for name in cluster_state.vms['names'][:-1]:
                                  +        vm_ip = ips[name]
                                  +        endtime = datetime.now() + timedelta(seconds=wait_timeout)
                                  +        while endtime > datetime.now():
                                  +            try:
                                  +                with vm_shell.login(vm_ip, ssh_user, pkey=pri_key) as sh:
                                  +                    out, err = sh.exec_command("md5sum -c ./generate_file.md5")
                                  +                    assert not err, (out, err)
                                  +                    md5, err = sh.exec_command("cat ./generate_file.md5")
                                  +                    assert not err, (md5, err)
                                  +                    assert md5 == cluster_state.vms['md5']
                                  +                    break
                                  +            except (SSHException, NoValidConnectionsError, ConnectionResetError, TimeoutError):
                                  +                sleep(5)
                                  +        else:
                                  +            fails.append(f"Data in VM({name}, {vm_ip}) is inconsistent.")
                                  +
                                  +    assert not fails, "\n".join(fails)
                                  +
                                  +    # Teardown: remove all VMs
                                  +    for name in cluster_state.vms['names']:
                                  +        code, data = api_client.vms.get(name)
                                  +        spec = api_client.vms.Spec.from_dict(data)
                                  +        _ = vm_checker.wait_deleted(name)
                                  +        for vol in spec.volumes:
                                  +            vol_name = vol['volume']['persistentVolumeClaim']['claimName']
                                  +            api_client.volumes.delete(vol_name)
                                  +

                                  Verify VMs' state and data Criteria: - VMs should keep in running state @@ -876,7 +1978,6 @@

                                  Methods

                                  class TestInvalidUpgrade
                                  -
                                  Expand source code @@ -1052,6 +2153,7 @@

                                  Methods

                                  # Teardown invalid upgrade api_client.versions.delete(version)
                                  +

                                  Class variables

                                  var pytestmark
                                  @@ -1065,12 +2167,125 @@

                                  Methods

                                  def test_checksum(self, api_client, unique_name, upgrade_target, upgrade_timeout, resort)
                                  +
                                  + +Expand source code + +
                                  @pytest.mark.parametrize(
                                  +    "resort", [slice(None, None, -1), slice(None, None, 2)], ids=("mismatched", "invalid")
                                  +)
                                  +def test_checksum(self, api_client, unique_name, upgrade_target, upgrade_timeout, resort):
                                  +    version, url, checksum = upgrade_target
                                  +    version = f"{version}-{unique_name}"
                                  +
                                  +    if resort.step == 2:
                                  +        # ref: https://github.com/harvester/harvester/issues/5480
                                  +        code, data = api_client.versions.create(version, url, checksum[resort])
                                  +        try:
                                  +            assert 400 == code, (code, data)
                                  +        finally:
                                  +            return api_client.versions.delete(version)
                                  +
                                  +    code, data = api_client.versions.create(version, url, checksum[resort])
                                  +    assert 201 == code, f"Failed to create upgrade for {version}"
                                  +    code, data = api_client.upgrades.create(version)
                                  +    assert 201 == code, f"Failed to start upgrade for {version}"
                                  +
                                  +    endtime = datetime.now() + timedelta(seconds=upgrade_timeout)
                                  +    while endtime > datetime.now():
                                  +        code, data = api_client.upgrades.get(data['metadata']['name'])
                                  +        conds = dict((c['type'], c) for c in data.get('status', {}).get('conditions', []))
                                  +        verified = [
                                  +            "False" == conds.get('Completed', {}).get('status'),
                                  +            "False" == conds.get('ImageReady', {}).get('status'),
                                  +            "n't match the file actual check" in conds.get('ImageReady', {}).get('message', "")
                                  +        ]
                                  +        if all(verified):
                                  +            break
                                  +    else:
                                  +        raise AssertionError(f"Upgrade NOT failed in expected conditions: {conds}")
                                  +
                                  +    # teardown
                                  +    api_client.upgrades.delete(data['metadata']['name'])
                                  +    api_client.versions.delete(version)
                                  +
                                  -def test_degraded_volume(self, api_client, wait_timeout, vm_shell_from_host, vm_checker, upgrade_target, stopped_vm) +def test_degraded_volume(self,
                                  api_client,
                                  wait_timeout,
                                  vm_shell_from_host,
                                  vm_checker,
                                  upgrade_target,
                                  stopped_vm)
                                  +
                                  + +Expand source code + +
                                  def test_degraded_volume(self, api_client, wait_timeout, vm_shell_from_host,
                                  +                         vm_checker, upgrade_target, stopped_vm):
                                  +    """
                                  +    Criteria: create upgrade should fails if there are any degraded volumes
                                  +    Steps:
                                  +    1. Create a VM using a volume with 3 replicas.
                                  +    2. Delete one replica of the volume. Let the volume stay in
                                  +       degraded state.
                                  +    3. Immediately upgrade Harvester.
                                  +    4. Upgrade should fail.
                                  +    """
                                  +    # https://github.com/harvester/harvester/issues/6425
                                  +    code, data = api_client.hosts.get()
                                  +    assert 200 == code, (code, data)
                                  +    if (cluster_size := len(data['data'])) < 3:
                                  +        pytest.skip(
                                  +            f"Degraded volumes only checked on 3+ nodes cluster, skip on {cluster_size}."
                                  +        )
                                  +
                                  +    vm_name, ssh_user, pri_key = stopped_vm
                                  +    vm_started, (code, vmi) = vm_checker.wait_started(vm_name)
                                  +    assert vm_started, (code, vmi)
                                  +
                                  +    # Write date into VM
                                  +    vm_ip = next(iface['ipAddress'] for iface in vmi['status']['interfaces']
                                  +                 if iface['name'] == 'default')
                                  +    code, data = api_client.hosts.get(vmi['status']['nodeName'])
                                  +    host_ip = next(addr['address'] for addr in data['status']['addresses']
                                  +                   if addr['type'] == 'InternalIP')
                                  +    with vm_shell_from_host(host_ip, vm_ip, ssh_user, pkey=pri_key) as sh:
                                  +        stdout, stderr = sh.exec_command(
                                  +            "dd if=/dev/urandom of=./generate_file bs=1M count=1024; sync"
                                  +        )
                                  +        assert not stdout, (stdout, stderr)
                                  +
                                  +    # Get pv name of the volume
                                  +    claim_name = vmi["spec"]["volumes"][0]["persistentVolumeClaim"]["claimName"]
                                  +    code, data = api_client.volumes.get(name=claim_name)
                                  +    assert code == 200, f"Failed to get volume {claim_name}: {data}"
                                  +    pv_name = data["spec"]["volumeName"]
                                  +
                                  +    # Make the volume becomes degraded
                                  +    code, data = api_client.lhreplicas.get()
                                  +    assert code == 200 and data['items'], f"Failed to get longhorn replicas ({code}): {data}"
                                  +    replica = next(r for r in data["items"] if pv_name == r['spec']['volumeName'])
                                  +    api_client.lhreplicas.delete(name=replica['metadata']['name'])
                                  +    endtime = datetime.now() + timedelta(seconds=wait_timeout)
                                  +    while endtime > datetime.now():
                                  +        code, data = api_client.lhvolumes.get(pv_name)
                                  +        if 200 == code and "degraded" == data['status']['robustness']:
                                  +            break
                                  +    else:
                                  +        raise AssertionError(
                                  +            f"Unable to make the Volume {pv_name} degraded\n"
                                  +            f"API Status({code}): {data}"
                                  +        )
                                  +
                                  +    # create upgrade and verify it is not allowed
                                  +    version, url, checksum = upgrade_target
                                  +    code, data = api_client.versions.create(version, url, checksum)
                                  +    assert code == 201, f"Failed to create version {version}: {data}"
                                  +    code, data = api_client.upgrades.create(version)
                                  +    assert code == 400, f"Failed to verify degraded volume: {code}, {data}"
                                  +
                                  +    # Teardown invalid upgrade
                                  +    api_client.versions.delete(version)
                                  +

                                  Criteria: create upgrade should fails if there are any degraded volumes Steps: 1. Create a VM using a volume with 3 replicas. @@ -1083,6 +2298,46 @@

                                  Methods

                                  def test_iso_url(self, api_client, unique_name, upgrade_timeout)
                                  +
                                  + +Expand source code + +
                                  def test_iso_url(self, api_client, unique_name, upgrade_timeout):
                                  +    """
                                  +    Steps:
                                  +    1. Create an invalid manifest.
                                  +    2. Try to upgrade with the invalid manifest.
                                  +    3. Upgrade should not start and fail.
                                  +    """
                                  +    version, url = unique_name, "https://invalid_iso_url"
                                  +    checksum = sha512(b'not_a_valid_checksum').hexdigest()
                                  +
                                  +    code, data = api_client.versions.get(version)
                                  +    if code != 200:
                                  +        code, data = api_client.versions.create(version, url, checksum)
                                  +        assert code == 201, f"Failed to create invalid version: {data}"
                                  +
                                  +    code, data = api_client.upgrades.create(version)
                                  +    assert code == 201, f"Failed to create invalid upgrade: {data}"
                                  +
                                  +    endtime = datetime.now() + timedelta(seconds=upgrade_timeout)
                                  +    while endtime > datetime.now():
                                  +        code, data = api_client.upgrades.get(data['metadata']['name'])
                                  +        conds = dict((c['type'], c) for c in data.get('status', {}).get('conditions', []))
                                  +        verified = [
                                  +            "False" == conds.get('Completed', {}).get('status'),
                                  +            "False" == conds.get('ImageReady', {}).get('status'),
                                  +            "retry limit" in conds.get('ImageReady', {}).get('message', "")
                                  +        ]
                                  +        if all(verified):
                                  +            break
                                  +    else:
                                  +        raise AssertionError(f"Upgrade NOT failed in expected conditions: {conds}")
                                  +
                                  +    # teardown
                                  +    api_client.upgrades.delete(data['metadata']['name'])
                                  +    api_client.versions.delete(version)
                                  +

                                  Steps: 1. Create an invalid manifest. 2. Try to upgrade with the invalid manifest. @@ -1092,6 +2347,36 @@

                                  Methods

                                  def test_version_compatibility(self, api_client, unique_name, upgrade_target, upgrade_timeout)
                                  +
                                  + +Expand source code + +
                                  @pytest.mark.skip("https://github.com/harvester/harvester/issues/5494")
                                  +def test_version_compatibility(
                                  +    self, api_client, unique_name, upgrade_target, upgrade_timeout
                                  +):
                                  +    version, url, checksum = upgrade_target
                                  +    version = f"{version}-{unique_name}"
                                  +
                                  +    code, data = api_client.versions.create(version, url, checksum)
                                  +    assert 201 == code, f"Failed to create upgrade for {version}"
                                  +    code, data = api_client.upgrades.create(version)
                                  +    assert 201 == code, f"Failed to start upgrade for {version}"
                                  +
                                  +    endtime = datetime.now() + timedelta(seconds=upgrade_timeout)
                                  +    while endtime > datetime.now():
                                  +        code, data = api_client.upgrades.get(data['metadata']['name'])
                                  +        conds = dict((c['type'], c) for c in data.get('status', {}).get('conditions', []))
                                  +        verified = []  # TODO
                                  +        if all(verified):
                                  +            break
                                  +    else:
                                  +        raise AssertionError(f"Upgrade NOT failed in expected conditions: {conds}")
                                  +
                                  +    # teardown
                                  +    api_client.upgrades.delete(data['metadata']['name'])
                                  +    api_client.versions.delete(version)
                                  +
                                  @@ -1166,7 +2451,7 @@

                                  -

                                  Generated by pdoc 0.11.1.

                                  +

                                  Generated by pdoc 0.11.5.

                                  diff --git a/backend/integrations/test_z_terraform.html b/backend/integrations/test_z_terraform.html index e6232ffeb..d48654534 100644 --- a/backend/integrations/test_z_terraform.html +++ b/backend/integrations/test_z_terraform.html @@ -3,19 +3,30 @@ - + harvester_e2e_tests.integrations.test_z_terraform API documentation - + @@ -37,78 +48,298 @@

                                  Functions

                                  def clusternetwork_resource(unique_name, tf_resource)
                                  +
                                  + +Expand source code + +
                                  @pytest.fixture(scope="module")
                                  +def clusternetwork_resource(unique_name, tf_resource):
                                  +    name = f"cnet-{datetime.strptime(unique_name, '%Hh%Mm%Ss%f-%m-%d').strftime('%H%M%S')}"
                                  +    spec = tf_resource.cluster_network(f"tf_{unique_name}", name)
                                  +    return spec, name
                                  +
                                  def image_resource(unique_name, image_opensuse, tf_resource)
                                  +
                                  + +Expand source code + +
                                  @pytest.fixture(scope="module")
                                  +def image_resource(unique_name, image_opensuse, tf_resource):
                                  +    spec = tf_resource.image_download(
                                  +        f"tf_{unique_name}", unique_name, unique_name, image_opensuse.url
                                  +    )
                                  +    return spec, unique_name, image_opensuse
                                  +
                                  def ssh_key_resource(ssh_keypair, unique_name, tf_resource)
                                  +
                                  + +Expand source code + +
                                  @pytest.fixture(scope="module")
                                  +def ssh_key_resource(ssh_keypair, unique_name, tf_resource):
                                  +    pub_key, _ = ssh_keypair
                                  +    spec = tf_resource.ssh_key(f"tf_{unique_name}", unique_name, pub_key)
                                  +    return spec, unique_name, pub_key
                                  +
                                  def test_create_image(api_client, tf_harvester, image_resource)
                                  +
                                  + +Expand source code + +
                                  @pytest.mark.p0
                                  +@pytest.mark.terraform
                                  +@pytest.mark.dependency(name="create_image")
                                  +def test_create_image(api_client, tf_harvester, image_resource):
                                  +    spec, unique_name, img_info = image_resource
                                  +    tf_harvester.save_as(spec.ctx, "images")
                                  +
                                  +    out, err, code = tf_harvester.apply_resource(spec.type, spec.name)
                                  +    assert not err and 0 == code
                                  +
                                  +    code, data = api_client.images.get(unique_name)
                                  +    assert 200 == code
                                  +    assert img_info.url == data['spec']['url']
                                  +
                                  def test_create_ssh_key(api_client, tf_harvester, ssh_key_resource)
                                  +
                                  + +Expand source code + +
                                  @pytest.mark.p0
                                  +@pytest.mark.terraform
                                  +@pytest.mark.dependency(name="create_ssh_key")
                                  +def test_create_ssh_key(api_client, tf_harvester, ssh_key_resource):
                                  +    spec, unique_name, _ = ssh_key_resource
                                  +    tf_harvester.save_as(spec.ctx, "ssh_key")
                                  +
                                  +    out, err, code = tf_harvester.apply_resource(spec.type, spec.name)
                                  +    assert not err and 0 == code
                                  +
                                  +    code, data = api_client.keypairs.get(unique_name)
                                  +    assert 200 == code
                                  +
                                  def test_create_volume(api_client, tf_harvester, volume_resource)
                                  +
                                  + +Expand source code + +
                                  @pytest.mark.p0
                                  +@pytest.mark.terraform
                                  +@pytest.mark.dependency(name="create_volume")
                                  +def test_create_volume(api_client, tf_harvester, volume_resource):
                                  +    spec, unique_name, size = volume_resource
                                  +    tf_harvester.save_as(spec.ctx, "volumes")
                                  +
                                  +    out, err, code = tf_harvester.apply_resource(spec.type, spec.name)
                                  +    assert not err and 0 == code
                                  +
                                  +    code, data = api_client.volumes.get(unique_name)
                                  +    assert 200 == code
                                  +    vol_spec = api_client.volumes.Spec.from_dict(data)
                                  +    assert size == vol_spec.size
                                  +
                                  def test_create_volume_from_image(api_client, tf_harvester, tf_resource, image_resource)
                                  +
                                  + +Expand source code + +
                                  @pytest.mark.p0
                                  +@pytest.mark.terraform
                                  +def test_create_volume_from_image(api_client, tf_harvester, tf_resource, image_resource):
                                  +    spec, unique_name, img_info = image_resource
                                  +    tf_harvester.save_as(spec.ctx, "images")
                                  +
                                  +    out, err, code = tf_harvester.apply_resource(spec.type, spec.name)
                                  +    assert not err and 0 == code
                                  +
                                  +    code, data = api_client.images.get(unique_name)
                                  +
                                  +    vol_name, size = f"vol-from-img-{unique_name}", "10Gi"
                                  +    spec = tf_resource.volume(
                                  +        f"tf_{vol_name}", vol_name, size, image=f"{data['metadata']['namespace']}/{unique_name}"
                                  +    )
                                  +    tf_harvester.save_as(spec.ctx, "vol_from_img")
                                  +
                                  +    out, err, code = tf_harvester.apply_resource(spec.type, spec.name)
                                  +    assert not err and 0 == code
                                  +
                                  +    code, data = api_client.volumes.get(vol_name)
                                  +    assert 200 == code
                                  +    vol_spec = api_client.volumes.Spec.from_dict(data)
                                  +    assert size == vol_spec.size
                                  +
                                  +    # Teardown
                                  +    api_client.volumes.delete(vol_name)
                                  +
                                  def test_delete_image(api_client, wait_timeout, tf_harvester, image_resource)
                                  +
                                  + +Expand source code + +
                                  @pytest.mark.p0
                                  +@pytest.mark.terraform
                                  +@pytest.mark.dependency(depends=["create_image"])
                                  +def test_delete_image(api_client, wait_timeout, tf_harvester, image_resource):
                                  +    spec, unique_name, _ = image_resource
                                  +
                                  +    out, err, rc = tf_harvester.destroy_resource(spec.type, spec.name)
                                  +    assert not err and 0 == rc
                                  +
                                  +    endtime = datetime.now() + timedelta(seconds=wait_timeout)
                                  +    while endtime > datetime.now():
                                  +        code, data = api_client.images.get(unique_name)
                                  +        if 404 == code:
                                  +            break
                                  +    else:
                                  +        raise AssertionError(
                                  +            "Terraform destroy image fail\n"
                                  +            f"stdout: {out}\nstderr: {err}, code: {rc}"
                                  +        )
                                  +
                                  def test_delete_ssh_key(api_client, tf_harvester, ssh_key_resource)
                                  +
                                  + +Expand source code + +
                                  @pytest.mark.p0
                                  +@pytest.mark.terraform
                                  +@pytest.mark.dependency(depends=["create_ssh_key"])
                                  +def test_delete_ssh_key(api_client, tf_harvester, ssh_key_resource):
                                  +    spec, unique_name, _ = ssh_key_resource
                                  +
                                  +    out, err, code = tf_harvester.destroy_resource(spec.type, spec.name)
                                  +    assert not err and 0 == code
                                  +
                                  +    code, data = api_client.keypairs.get(unique_name)
                                  +    assert 404 == code
                                  +
                                  def test_delete_volume(api_client, wait_timeout, tf_harvester, volume_resource)
                                  +
                                  + +Expand source code + +
                                  @pytest.mark.p0
                                  +@pytest.mark.terraform
                                  +@pytest.mark.dependency(depends=["create_volume"])
                                  +def test_delete_volume(api_client, wait_timeout, tf_harvester, volume_resource):
                                  +    spec, unique_name, _ = volume_resource
                                  +
                                  +    out, err, rc = tf_harvester.destroy_resource(spec.type, spec.name)
                                  +    assert not err and 0 == rc
                                  +
                                  +    endtime = datetime.now() + timedelta(seconds=wait_timeout)
                                  +    while endtime > datetime.now():
                                  +        code, data = api_client.volumes.get(unique_name)
                                  +        if 404 == code:
                                  +            break
                                  +    else:
                                  +        raise AssertionError(
                                  +            "Terraform destroy volume fail\n"
                                  +            f"stdout: {out}\nstderr:{err}, code: {rc}"
                                  +        )
                                  +
                                  def vlanconfig_resource(request, unique_name, tf_resource, clusternetwork_resource)
                                  +
                                  + +Expand source code + +
                                  @pytest.fixture(scope="module")
                                  +def vlanconfig_resource(request, unique_name, tf_resource, clusternetwork_resource):
                                  +    vlan_nic = request.config.getoption('--vlan-nic')
                                  +    assert vlan_nic, f"VLAN NIC {vlan_nic} not configured correctly."
                                  +
                                  +    _, clusternetwork_name = clusternetwork_resource
                                  +    name, nics = f"{clusternetwork_name}-{vlan_nic}".lower(), [vlan_nic]
                                  +    spec = tf_resource.vlanconfig(f"tf_{unique_name}", name, clusternetwork_name, nics)
                                  +
                                  +    return spec, name, clusternetwork_name, nics
                                  +
                                  def vmnetwork_resource(request, unique_name, tf_resource, clusternetwork_resource)
                                  +
                                  + +Expand source code + +
                                  @pytest.fixture(scope="module")
                                  +def vmnetwork_resource(request, unique_name, tf_resource, clusternetwork_resource):
                                  +    vlan_id = request.config.getoption('--vlan-id')
                                  +    assert 4095 > vlan_id > 0, (f"VLAN ID should in range 1-4094, not {vlan_id}")
                                  +
                                  +    _, clusternetwork_name = clusternetwork_resource
                                  +    spec = tf_resource.network(f"tf_{unique_name}", unique_name, vlan_id, clusternetwork_name)
                                  +
                                  +    return spec, unique_name, vlan_id, clusternetwork_name
                                  +
                                  def volume_resource(unique_name, tf_resource)
                                  +
                                  + +Expand source code + +
                                  @pytest.fixture(scope="module")
                                  +def volume_resource(unique_name, tf_resource):
                                  +    size = "2Gi"
                                  +    spec = tf_resource.volume(f"tf_{unique_name}", unique_name, size)
                                  +    return spec, unique_name, size
                                  +

                                @@ -120,7 +351,6 @@

                                Classes

                                class TestNetworking
                                -
                                Expand source code @@ -214,6 +444,7 @@

                                Classes

                                f"stdout: {out}\nstderr: {err}, code: {rc}" )
                                +

                                Class variables

                                var pytestmark
                                @@ -227,36 +458,146 @@

                                Methods

                                def test_create_clusternetwork(self, api_client, tf_harvester, clusternetwork_resource)
                                +
                                + +Expand source code + +
                                @pytest.mark.dependency(name="create_clusternetwork")
                                +def test_create_clusternetwork(self, api_client, tf_harvester, clusternetwork_resource):
                                +    spec, unique_name = clusternetwork_resource
                                +    tf_harvester.save_as(spec.ctx, "clusternetwork")
                                +
                                +    out, err, code = tf_harvester.apply_resource(spec.type, spec.name)
                                +    assert not err and 0 == code
                                +
                                +    code, data = api_client.clusternetworks.get(unique_name)
                                +    assert 200 == code
                                +
                                def test_create_vlanconfig(self, api_client, tf_harvester, vlanconfig_resource)
                                +
                                + +Expand source code + +
                                @pytest.mark.dependency(name="create_vlanconfig", depends=["create_clusternetwork"])
                                +def test_create_vlanconfig(self, api_client, tf_harvester, vlanconfig_resource):
                                +    spec, unique_name, cnet_name, nics = vlanconfig_resource
                                +    tf_harvester.save_as(spec.ctx, "vlanconfig")
                                +
                                +    out, err, code = tf_harvester.apply_resource(spec.type, spec.name)
                                +    assert not err and 0 == code
                                +
                                +    code, data = api_client.clusternetworks.get_config(unique_name)
                                +    assert 200 == code
                                +    assert cnet_name == data['spec']['clusterNetwork']
                                +    assert nics == data['spec']['uplink']['nics']
                                +
                                def test_create_vm_network(self, api_client, tf_harvester, vmnetwork_resource)
                                +
                                + +Expand source code + +
                                @pytest.mark.dependency(name="create_vm_network", depends=["create_vlanconfig"])
                                +def test_create_vm_network(self, api_client, tf_harvester, vmnetwork_resource):
                                +    spec, unique_name, vlan_id, cnet_name = vmnetwork_resource
                                +    tf_harvester.save_as(spec.ctx, "vmnetwork")
                                +
                                +    out, err, code = tf_harvester.apply_resource(spec.type, spec.name)
                                +    assert not err and 0 == code
                                +
                                +    code, data = api_client.networks.get(unique_name)
                                +    assert 200 == code
                                +    config = json.loads(data['spec'].get('config', ""))
                                +    assert vlan_id == config['vlan']
                                +    assert cnet_name in config['bridge']
                                +
                                def test_delete_clusternetwork(self, api_client, wait_timeout, tf_harvester, clusternetwork_resource)
                                +
                                + +Expand source code + +
                                @pytest.mark.dependency(depends=["create_clusternetwork"])
                                +def test_delete_clusternetwork(
                                +    self, api_client, wait_timeout, tf_harvester, clusternetwork_resource
                                +):
                                +    spec, unique_name, *_ = clusternetwork_resource
                                +
                                +    out, err, rc = tf_harvester.destroy_resource(spec.type, spec.name)
                                +    assert not err and 0 == rc
                                +
                                +    endtime = datetime.now() + timedelta(seconds=wait_timeout)
                                +    while endtime > datetime.now():
                                +        code, data = api_client.clusternetworks.get(unique_name)
                                +        if 404 == code:
                                +            break
                                +    else:
                                +        raise AssertionError(
                                +            "Terraform destroy clusternetwork fail\n"
                                +            f"stdout: {out}\nstderr: {err}, code: {rc}"
                                +        )
                                +
                                def test_delete_vlanconfig(self, api_client, wait_timeout, tf_harvester, vlanconfig_resource)
                                +
                                + +Expand source code + +
                                @pytest.mark.dependency(depends=["create_vlanconfig"])
                                +def test_delete_vlanconfig(self, api_client, wait_timeout, tf_harvester, vlanconfig_resource):
                                +    spec, unique_name, *_ = vlanconfig_resource
                                +
                                +    out, err, rc = tf_harvester.destroy_resource(spec.type, spec.name)
                                +    assert not err and 0 == rc
                                +
                                +    endtime = datetime.now() + timedelta(seconds=wait_timeout)
                                +    while endtime > datetime.now():
                                +        code, data = api_client.clusternetworks.get_config(unique_name)
                                +        if 404 == code:
                                +            break
                                +    else:
                                +        raise AssertionError(
                                +            "Terraform destroy vlanconfig fail\n"
                                +            f"stdout: {out}\nstderr: {err}, code: {rc}"
                                +        )
                                +
                                def test_delete_vm_network(self, api_client, tf_harvester, vmnetwork_resource)
                                +
                                + +Expand source code + +
                                @pytest.mark.dependency(depends=["create_vm_network"])
                                +def test_delete_vm_network(self, api_client, tf_harvester, vmnetwork_resource):
                                +    spec, unique_name, *_ = vmnetwork_resource
                                +
                                +    out, err, code = tf_harvester.destroy_resource(spec.type, spec.name)
                                +    assert not err and 0 == code
                                +
                                +    code, data = api_client.networks.get(unique_name)
                                +    assert 404 == code
                                +
                                @@ -311,7 +652,7 @@

                                -

                                Generated by pdoc 0.11.1.

                                +

                                Generated by pdoc 0.11.5.

                                diff --git a/backend/integrations/test_z_terraform_rancher.html b/backend/integrations/test_z_terraform_rancher.html index 459002150..58a328c9a 100644 --- a/backend/integrations/test_z_terraform_rancher.html +++ b/backend/integrations/test_z_terraform_rancher.html @@ -3,19 +3,30 @@ - + harvester_e2e_tests.integrations.test_z_terraform_rancher API documentation - + @@ -37,90 +48,391 @@

                                Functions

                                def cluster_resource(tf_rancher_resource, rke2_cluster, harvester, credential_resource)
                                +
                                + +Expand source code + +
                                @pytest.fixture(scope="module")
                                +def cluster_resource(tf_rancher_resource, rke2_cluster, harvester, credential_resource):
                                +    return tf_rancher_resource.cluster_config(
                                +        rke2_cluster["name"], rke2_cluster["k8s_version"], harvester["name"],
                                +        credential_resource.name
                                +    )
                                +
                                def credential_resource(unique_name, tf_rancher_resource, harvester)
                                +
                                + +Expand source code + +
                                @pytest.fixture(scope="module")
                                +def credential_resource(unique_name, tf_rancher_resource, harvester):
                                +    return tf_rancher_resource.cloud_credential(
                                +        f"cc-{unique_name}", harvester["name"]
                                +    )
                                +
                                def harvester(api_client, rancher_api_client, unique_name, wait_timeout)
                                +
                                + +Expand source code + +
                                @pytest.fixture(scope='module')
                                +def harvester(api_client, rancher_api_client, unique_name, wait_timeout):
                                +    """ Rancher creates Harvester entry (Import Existing)
                                +    """
                                +    name = f"hvst-{unique_name}"
                                +
                                +    rc, data = rancher_api_client.mgmt_clusters.create_harvester(name)
                                +    assert 201 == rc, (rc, data)
                                +
                                +    endtime = datetime.now() + timedelta(seconds=wait_timeout)
                                +    while endtime > datetime.now():
                                +        rc, data = rancher_api_client.mgmt_clusters.get(name)
                                +        if data.get('status', {}).get('clusterName'):
                                +            break
                                +    else:
                                +        raise AssertionError(
                                +            f"Fail to get MgmtCluster with clusterName {name}\n"
                                +            f"rc: {rc}, data:\n{data}"
                                +        )
                                +    namespace = data['metadata']['namespace']
                                +
                                +    yield {
                                +        "name": name,
                                +        "namespace": namespace,
                                +        "id": data['status']['clusterName'],
                                +        "kubeconfig": api_client.generate_kubeconfig()
                                +    }
                                +
                                +    rancher_api_client.mgmt_clusters.delete(name, namespace)
                                +    updates = dict(value="")
                                +    api_client.settings.update("cluster-registration-url", updates)
                                +

                                Rancher creates Harvester entry (Import Existing)

                                def machine_resource(tf_rancher_resource, rke2_cluster, vlan_network, ubuntu_image)
                                +
                                + +Expand source code + +
                                @pytest.fixture(scope="module")
                                +def machine_resource(tf_rancher_resource, rke2_cluster, vlan_network, ubuntu_image):
                                +    return tf_rancher_resource.machine_config(
                                +        rke2_cluster["name"], vlan_network["id"], ubuntu_image["id"], ubuntu_image["ssh_user"]
                                +    )
                                +
                                def rancher(rancher_api_client)
                                +
                                + +Expand source code + +
                                @pytest.fixture(scope="module")
                                +def rancher(rancher_api_client):
                                +    access_key, secret_key = rancher_api_client.token.split(":")
                                +    yield {
                                +        "endpoint": rancher_api_client.endpoint,
                                +        "token": rancher_api_client.token,
                                +        "access_key": access_key,
                                +        "secret_key": secret_key
                                +    }
                                +
                                def rke2_cluster(unique_name, rke2_version)
                                +
                                + +Expand source code + +
                                @pytest.fixture(scope='module')
                                +def rke2_cluster(unique_name, rke2_version):
                                +    return {
                                +        "name": f"rke2-{unique_name}",
                                +        "id": "",                         # set in test_create_rke2_cluster
                                +        "k8s_version": rke2_version
                                +    }
                                +
                                def test_create_cloud_credential(rancher_api_client, tf_rancher, credential_resource)
                                +
                                + +Expand source code + +
                                @pytest.mark.p0
                                +@pytest.mark.terraform
                                +@pytest.mark.rancher
                                +@pytest.mark.dependency(name="create_cloud_credential", depends=["import_harvester"])
                                +def test_create_cloud_credential(rancher_api_client, tf_rancher, credential_resource):
                                +    spec = credential_resource
                                +    tf_rancher.save_as(spec.ctx, "cloud_credential")
                                +    out, err, code = tf_rancher.apply_resource(spec.type, spec.name)
                                +    assert not err and 0 == code
                                +
                                +    code, data = rancher_api_client.cloud_credentials.get(params={"name": spec.name})
                                +    assert 200 == code, (
                                +        f"Failed to get cloud credential {spec.name}: {code}, {data}"
                                +    )
                                +
                                def test_create_machine_config(tf_rancher, machine_resource)
                                +
                                + +Expand source code + +
                                @pytest.mark.p0
                                +@pytest.mark.terraform
                                +@pytest.mark.rancher
                                +@pytest.mark.dependency(name="create_machine_config", depends=["create_cloud_credential"])
                                +def test_create_machine_config(tf_rancher, machine_resource):
                                +    spec = machine_resource
                                +    tf_rancher.save_as(spec.ctx, "machine_config")
                                +    out, err, code = tf_rancher.apply_resource(spec.type, spec.name)
                                +    assert not err and 0 == code
                                +
                                def test_create_rke2_cluster(tf_rancher, rke2_cluster, rancher_api_client, cluster_resource)
                                +
                                + +Expand source code + +
                                @pytest.mark.p0
                                +@pytest.mark.terraform
                                +@pytest.mark.rancher
                                +@pytest.mark.dependency(name="create_rke2_cluster", depends=["create_machine_config"])
                                +def test_create_rke2_cluster(tf_rancher, rke2_cluster, rancher_api_client, cluster_resource):
                                +    spec = cluster_resource
                                +    tf_rancher.save_as(spec.ctx, "rke2_cluster")
                                +    out, err, code = tf_rancher.apply_resource(spec.type, spec.name)
                                +    assert not err and 0 == code
                                +
                                +    rc, data = rancher_api_client.mgmt_clusters.get(rke2_cluster['name'])
                                +    cluster_state = data.get("metadata", {}).get("state", {})
                                +    assert "active" == cluster_state['name'] and \
                                +           "Ready" in cluster_state['message']
                                +
                                +    # check deployments
                                +    rke2_cluster['id'] = data["status"]["clusterName"]
                                +    for deployment in ["harvester-cloud-provider", "harvester-csi-driver-controllers"]:
                                +        rc, data = rancher_api_client.cluster_deployments.get(
                                +            rke2_cluster['id'], "kube-system", deployment
                                +        )
                                +        cluster_state = data.get("metadata", {}).get("state", {})
                                +        assert 200 == rc and \
                                +               "active" == cluster_state["name"]
                                +
                                def test_delete_cloud_credential(tf_rancher, credential_resource)
                                +
                                + +Expand source code + +
                                @pytest.mark.p0
                                +@pytest.mark.terraform
                                +@pytest.mark.rancher
                                +@pytest.mark.dependency(name="delete_cloud_credential", depends=["create_cloud_credential"])
                                +def test_delete_cloud_credential(tf_rancher, credential_resource):
                                +    spec = credential_resource
                                +    out, err, code = tf_rancher.destroy_resource(spec.type, spec.name)
                                +    assert not err and 0 == code
                                +
                                def test_delete_machine_config(tf_rancher, machine_resource)
                                +
                                + +Expand source code + +
                                @pytest.mark.p0
                                +@pytest.mark.terraform
                                +@pytest.mark.rancher
                                +@pytest.mark.dependency(name="delete_machine_config", depends=["create_machine_config"])
                                +def test_delete_machine_config(tf_rancher, machine_resource):
                                +    spec = machine_resource
                                +    out, err, code = tf_rancher.destroy_resource(spec.type, spec.name)
                                +    assert not err and 0 == code
                                +
                                def test_delete_rke2_cluster(tf_rancher, rke2_cluster, rancher_api_client, cluster_resource)
                                +
                                + +Expand source code + +
                                @pytest.mark.p0
                                +@pytest.mark.terraform
                                +@pytest.mark.rancher
                                +@pytest.mark.dependency(name="delete_rke2_cluster", depends=["create_rke2_cluster"])
                                +def test_delete_rke2_cluster(tf_rancher, rke2_cluster, rancher_api_client, cluster_resource):
                                +    spec = cluster_resource
                                +    out, err, code = tf_rancher.destroy_resource(spec.type, spec.name)
                                +    assert not err and 0 == code
                                +
                                +    rc, data = rancher_api_client.mgmt_clusters.get(rke2_cluster['name'])
                                +    assert 404 == rc
                                +
                                def test_import_harvester(api_client, rancher_api_client, harvester, wait_timeout)
                                +
                                + +Expand source code + +
                                @pytest.mark.p0
                                +@pytest.mark.terraform
                                +@pytest.mark.rancher
                                +@pytest.mark.dependency(name="import_harvester")
                                +def test_import_harvester(api_client, rancher_api_client, harvester, wait_timeout):
                                +    # Get cluster registration URL in Rancher's Virtualization Management
                                +    endtime = datetime.now() + timedelta(seconds=wait_timeout)
                                +    while endtime > datetime.now():
                                +        rc, data = rancher_api_client.cluster_registration_tokens.get(harvester['id'])
                                +        if 200 == rc and data.get('manifestUrl'):
                                +            break
                                +    else:
                                +        raise AssertionError(
                                +            f"Fail to registration URL for the imported harvester {harvester['name']}\n"
                                +            f"rc: {rc}, data:\n{data}"
                                +        )
                                +
                                +    # Set cluster-registration-url on Harvester
                                +    updates = dict(value=data['manifestUrl'])
                                +    rc, data = api_client.settings.update("cluster-registration-url", updates)
                                +    assert 200 == rc, (
                                +        f"Failed to update Harvester's settings `cluster-registration-url`"
                                +        f"rc: {rc}, data:\n{data}"
                                +    )
                                +
                                +    # Check Cluster becomes `active` in Rancher's Virtualization Management
                                +    endtime = datetime.now() + timedelta(seconds=wait_timeout)
                                +    while endtime > datetime.now():
                                +        rc, data = rancher_api_client.mgmt_clusters.get(harvester['name'])
                                +        cluster_state = data['metadata']['state']
                                +        if "active" == cluster_state['name'] and "Ready" in cluster_state['message']:
                                +            break
                                +    else:
                                +        raise AssertionError(
                                +            f"Fail to import harvester\n"
                                +            f"rc: {rc}, data:\n{data}"
                                +        )
                                +
                                def ubuntu_image(api_client, unique_name, image_ubuntu, wait_timeout)
                                +
                                + +Expand source code + +
                                @pytest.fixture(scope="module")
                                +def ubuntu_image(api_client, unique_name, image_ubuntu, wait_timeout):
                                +    rc, data = api_client.images.create_by_url(unique_name, image_ubuntu.url)
                                +    assert 201 == rc, (
                                +        f"Failed to upload ubuntu image with error\n"
                                +        f"rc: {rc}, data:\n{data}"
                                +    )
                                +
                                +    endtime = datetime.now() + timedelta(seconds=wait_timeout)
                                +    while endtime > datetime.now():
                                +        rc, data = api_client.images.get(unique_name)
                                +        if 200 == rc and image_ubuntu.url == data['spec']['url']:
                                +            break
                                +    else:
                                +        raise AssertionError(
                                +            f"Fail to create image {unique_name}\n"
                                +            f"rc: {rc}, data:\n{data}"
                                +        )
                                +    namespace = data['metadata']['namespace']
                                +    name = data['metadata']['name']
                                +
                                +    yield {
                                +        "id": f"{namespace}/{name}",
                                +        "ssh_user": "ubuntu"
                                +    }
                                +
                                +    api_client.images.delete(name, namespace)
                                +
                                def vlan_network(request, api_client)
                                +
                                + +Expand source code + +
                                @pytest.fixture(scope='module')
                                +def vlan_network(request, api_client):
                                +    vlan_nic = request.config.getoption('--vlan-nic')
                                +    vlan_id = request.config.getoption('--vlan-id')
                                +    assert -1 != vlan_id, "Rancher integration test needs VLAN"
                                +
                                +    api_client.clusternetworks.create(vlan_nic)
                                +    api_client.clusternetworks.create_config(vlan_nic, vlan_nic, vlan_nic)
                                +
                                +    network_name = f'vlan-network-{vlan_id}'
                                +    code, data = api_client.networks.get(network_name)
                                +    if code != 200:
                                +        code, data = api_client.networks.create(network_name, vlan_id, cluster_network=vlan_nic)
                                +        assert 201 == code, (
                                +            f"Failed to create network-attachment-definition {network_name}\n"
                                +            f"rc: {code}, data:\n{data}"
                                +        )
                                +    namespace = data['metadata']['namespace']
                                +    name = data['metadata']['name']
                                +
                                +    yield {
                                +        "id": f"{namespace}/{name}"
                                +    }
                                +
                                +    api_client.networks.delete(network_name, namespace)
                                +

                  @@ -161,7 +473,7 @@

                  Functions

                  diff --git a/backend/integrations/vmconsole.html b/backend/integrations/vmconsole.html index 42dcc511b..0692e1997 100644 --- a/backend/integrations/vmconsole.html +++ b/backend/integrations/vmconsole.html @@ -3,19 +3,30 @@ - + harvester_e2e_tests.integrations.vmconsole API documentation - + @@ -40,7 +51,6 @@

                  Classes

                  (virctl, name, user, passwd, command_timeout=300)
                  -
                  Expand source code @@ -100,24 +110,72 @@

                  Classes

                  outbytes = len(proc.stdout.peek()) return proc.stdout.read(outbytes).decode()
                  +

                  Methods

                  def execute_command(self, command, *, timeout=None)
                  +
                  + +Expand source code + +
                  def execute_command(self, command, *, timeout=None):
                  +    if not self.proc:
                  +        self.login(timeout)
                  +    proc = self.proc
                  +    proc.stdin.write(command.encode() + b"\n")
                  +    proc.stdin.flush()
                  +    sleep(1)
                  +    outbytes = len(proc.stdout.peek())
                  +    return proc.stdout.read(outbytes).decode()
                  +
                  def login(self, timeout=None, **kwargs)
                  +
                  + +Expand source code + +
                  def login(self, timeout=None, **kwargs):
                  +    self.proc = Popen(f"{self.virctl} console {self.name} 2>&1",
                  +                      stdin=PIPE, stdout=PIPE, shell=True, **kwargs)
                  +
                  +    endtime = datetime.now() + timedelta(seconds=timeout or self.timeout)
                  +    while endtime > datetime.now():
                  +        out = self.execute_command("\n\n")
                  +        if re.search(rf"{self.name} login:\s+?", out, re.M):
                  +            break
                  +        print(out)
                  +        sleep(5)
                  +    else:
                  +        raise TimeoutError(-1, "Login timeout: Unable to catch login hints.\n"
                  +                           f"self.{out}")
                  +
                  +    out = self.execute_command(self.user)
                  +
                  +    return self.execute_command(self.passwd)
                  +
                  def logout(self)
                  +
                  + +Expand source code + +
                  def logout(self):
                  +    if self.proc:
                  +        self.execute_command("exit\n")
                  +        self.proc.communicate()
                  +        self.proc = None
                  +
                  @@ -151,7 +209,7 @@

                  -

                  Generated by pdoc 0.11.1.

                  +

                  Generated by pdoc 0.11.5.

                  diff --git a/index.html b/index.html index 858e27a60..6e4878bc3 100644 --- a/index.html +++ b/index.html @@ -1,7 +1,7 @@ - + Harvester manual test cases diff --git a/integration/modules/skel_skel_spec.html b/integration/modules/skel_skel_spec.html index b8c8e0622..40e4c60f7 100644 --- a/integration/modules/skel_skel_spec.html +++ b/integration/modules/skel_skel_spec.html @@ -1,11 +1,11 @@ -<!DOCTYPE html><html class="default"><head><meta charSet="utf-8"/><meta http-equiv="x-ua-compatible" content="IE=edge"/><title>skel/skel.spec | Cypress Integration Tests for Harvester
                  Options
                  All
                  • Public
                  • Public/Protected
                  • All
                  Menu

                  Index

                  Functions

                  • changePassword(): void