diff --git a/.github/workflows/clean.yml b/.github/workflows/clean.yml new file mode 100644 index 0000000..fd28f7d --- /dev/null +++ b/.github/workflows/clean.yml @@ -0,0 +1,37 @@ +name: Branch Deleted +on: delete + +env: + TAG_NAME: ${{ github.event.ref }} + +jobs: + delete: + strategy: + fail-fast: false + matrix: + component: + - name: qubership-docker-integration-tests + if: github.event.ref_type == 'branch' + runs-on: ubuntu-latest + steps: + - name: Login to Docker Hub + uses: docker/login-action@v3 + with: + registry: ghcr.io + username: ${GITHUB_ACTOR} + password: ${{secrets.GITHUB_TOKEN}} + - name: Prepare Tag + run: echo "TAG_NAME=$(echo ${TAG_NAME} | sed 's@refs/heads/@@;s@/@_@g')" >> $GITHUB_ENV + - name: Get package IDs for delete + id: get-ids-for-delete + uses: Netcracker/get-package-ids@v0.0.1 + with: + component-name: ${{ matrix.component.name }} + component-tag: ${{ env.TAG_NAME }} + access-token: ${{ secrets.GH_ACCESS_TOKEN }} + - uses: actions/delete-package-versions@v5 + with: + package-name: ${{ matrix.component.name }} + package-type: 'container' + package-version-ids: ${{ steps.get-ids-for-delete.outputs.ids-for-delete }} + if: ${{ steps.get-ids-for-delete.outputs.ids-for-delete != '' }} diff --git a/.github/workflows/push.yml b/.github/workflows/push.yml new file mode 100644 index 0000000..c2c9a47 --- /dev/null +++ b/.github/workflows/push.yml @@ -0,0 +1,57 @@ +name: Build Artifacts +on: + release: + types: [created] + push: + branches: + - 'main' +env: + TAG_NAME: ${{ github.event.release.tag_name || github.ref }} + +jobs: + multiplatform_build: + strategy: + fail-fast: false + matrix: + component: + - name: qubership-docker-integration-tests + file: docker/Dockerfile + runs-on: ubuntu-latest + steps: + - name: Checkout + uses: actions/checkout@v4 + - name: Set up QEMU + uses: docker/setup-qemu-action@v3 + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@v3 + - name: Login to Docker Hub + uses: docker/login-action@v3 + with: + registry: ghcr.io + username: ${GITHUB_ACTOR} + password: ${{secrets.GITHUB_TOKEN}} + - name: Prepare Tag + run: echo "TAG_NAME=$(echo ${TAG_NAME} | sed 's@refs/tags/@@;s@refs/heads/@@;s@/@_@g')" >> $GITHUB_ENV + - name: Get package IDs for delete + id: get-ids-for-delete + uses: Netcracker/get-package-ids@v0.0.1 + with: + component-name: ${{ matrix.component.name }} + component-tag: ${{ env.TAG_NAME }} + access-token: ${{ secrets.GH_ACCESS_TOKEN }} + - name: Build and push + uses: docker/build-push-action@v5 + with: + no-cache: true + context: ${{ matrix.component.dir }} + file: ${{ matrix.component.file }} + platforms: linux/amd64,linux/arm64 + push: true + tags: ghcr.io/netcracker/${{ matrix.component.name }}:${{ env.TAG_NAME }} + provenance: false + - uses: actions/delete-package-versions@v5 + with: + package-name: ${{ matrix.component.name }} + package-type: 'container' + package-version-ids: ${{ steps.get-ids-for-delete.outputs.ids-for-delete }} + if: ${{ steps.get-ids-for-delete.outputs.ids-for-delete != '' }} \ No newline at end of file diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..0307310 --- /dev/null +++ b/.gitignore @@ -0,0 +1,79 @@ +.idea/ +# Temporary Build Files +build/_output +build/_test +# Created by https://www.gitignore.io/api/go,vim,emacs,visualstudiocode +### Emacs ### +# -*- mode: gitignore; -*- +*~ +\#*\# +/.emacs.desktop +/.emacs.desktop.lock +*.elc +auto-save-list +tramp +.\#* +# Org-mode +.org-id-locations +*_archive +# flymake-mode +*_flymake.* +# eshell files +/eshell/history +/eshell/lastdir +# elpa packages +/elpa/ +# reftex files +*.rel +# AUCTeX auto folder +/auto/ +# cask packages +.cask/ +dist/ +# Flycheck +flycheck_*.el +# server auth directory +/server/ +# projectiles files +.projectile +projectile-bookmarks.eld +# directory configuration +.dir-locals.el +# saveplace +places +# url cache +url/cache/ +# cedet +ede-projects.el +# smex +smex-items +# company-statistics +company-statistics-cache.el +# anaconda-mode +anaconda-mode/ +### Go ### +# Binaries for programs and plugins +*.exe +*.exe~ +*.dll +*.so +*.dylib +# Test binary, build with 'go test -c' +*.test +# Output of the go coverage tool, specifically when used with LiteIDE +*.out +### Vim ### +# swap +.sw[a-p] +.*.sw[a-p] +# session +Session.vim +# temporary +.netrwhist +# auto-generated tag files +tags +### VisualStudioCode ### +.vscode/* +.history +# End of https://www.gitignore.io/api/go,vim,emacs,visualstudiocode +*.iml \ No newline at end of file diff --git a/CODE-OF-CONDUCT.md b/CODE-OF-CONDUCT.md new file mode 100644 index 0000000..ccad4b8 --- /dev/null +++ b/CODE-OF-CONDUCT.md @@ -0,0 +1,73 @@ +# Code of Conduct + +This repository is governed by following code of conduct guidelines. + +We put collaboration, trust, respect and transparency as core values for our community. +Our community welcomes participants from all over the world with different experience, +opinion and ideas to share. + +We have adopted this code of conduct and require all contributors to agree with that to build a healthy, +safe and productive community for all. + +The guideline is aimed to support a community where all people should feel safe to participate, +introduce new ideas and inspire others, regardless of: + +* Age +* Gender +* Gender identity or expression +* Family status +* Marital status +* Ability +* Ethnicity +* Race +* Sex characteristics +* Sexual identity and orientation +* Education +* Native language +* Background +* Caste +* Religion +* Geographic location +* Socioeconomic status +* Personal appearance +* Any other dimension of diversity + +## Our Standards + +We are welcoming the following behavior: + +* Be respectful for different ideas, opinions and points of view +* Be constructive and professional +* Use inclusive language +* Be collaborative and show the empathy +* Focus on the best results for the community + +The following behavior is unacceptable: + +* Violence, threats of violence, or inciting others to commit self-harm +* Personal attacks, trolling, intentionally spreading misinformation, insulting/derogatory comments +* Public or private harassment +* Publishing others' private information, such as a physical or electronic address, without explicit permission +* Derogatory language +* Encouraging unacceptable behavior +* Other conduct which could reasonably be considered inappropriate in a professional community + +## Our Responsibilities + +Project maintainers are responsible for clarifying the standards of the Code of Conduct +and are expected to take appropriate actions in response to any instances of unacceptable behavior. + +Project maintainers have the right and responsibility to remove, edit, or reject comments, +commits, code, wiki edits, issues, and other contributions that are not aligned +to this Code of Conduct, or to ban temporarily or permanently any contributor for other behaviors +that they deem inappropriate, threatening, offensive, or harmful. + +## Reporting + +If you believe you’re experiencing unacceptable behavior that will not be tolerated as outlined above, +please report to `plutosdev@gmail.com`. All complaints will be reviewed and investigated and will result in a response +that is deemed necessary and appropriate to the circumstances. The project team is obligated to maintain confidentiality +with regard to the reporter of an incident. + +Please also report if you observe a potentially dangerous situation, someone in distress, or violations of these guidelines, +even if the situation is not happening to you. \ No newline at end of file diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md new file mode 100644 index 0000000..4bfc0ea --- /dev/null +++ b/CONTRIBUTING.md @@ -0,0 +1,12 @@ +# Contribution Guide + +We'd love to accept patches and contributions to this project. +Please, follow these guidelines to make the contribution process easy and effective for everyone involved. + +## Contributor License Agreement + +You must sign the [Contributor License Agreement](https://pages.netcracker.com/cla-main.html) in order to contribute. + +## Code of Conduct + +Please make sure to read and follow the [Code of Conduct](CODE-OF-CONDUCT.md). \ No newline at end of file diff --git a/LICENSE b/LICENSE new file mode 100644 index 0000000..7a4a3ea --- /dev/null +++ b/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. \ No newline at end of file diff --git a/README.md b/README.md new file mode 100644 index 0000000..a962760 --- /dev/null +++ b/README.md @@ -0,0 +1,211 @@ +# Introduction + +The `Docker Integration Tests` (aka `BDI`) is image for integration tests. Supposed that this image will not be used to execute integration tests directly +but real images for integration tests will use this image as basic (`FROM` command in the particular docker file). BDI builds some `sandbox` which +includes `python` interpreter, Robot Framework, some useful tools such as `bash`, `shadow`, `vim`, `rsync`, `ttyd`, common custom Robot Framework libraries +(for example, `PlatformLibrary`) and customized docker entry point script. + +# Pre-installed tools + +The `Docker Integration Tests` contains the following pre-installed Linux tools: + +* `python` (version 3.10.13) +* `bash` +* `shadow` +* `vim` +* `rsync` +* `ttyd` + +# Library documentation + +`PlatformLibrary` documentation is autogenerated by Robot Framework libdoc tool. It can be found [here](/documentation/integration_tests_builtin_library_documentation/PlatformLibrary.html) +To generate new doc you should navigate to current project repository and execute the following command: + +```bash +python -m robot.libdoc PlatformLibrary.py PlatformLibrary.html +``` + +and move `PlatformLibrary.html` file to documentation directory. + +# Docker Entry point Script + +A docker entry point script is a script which will be executed after docker container is created. If you override the image, its entry point script +will be executed by default. But if you override the entry point as well, your own entry point will be run. +Docker Integration Tests contains simple and customized entry point script - `/docker-entrypoint.sh` with the following command (possible `CMD` arguments): + +* `run-robot` is a default `CMD` command which executes Robot Framework test cases, can resolve Robot Framework tags to be excluded, can create +`result.txt` file with parsed Robot Framework tests results in pretty format. After tests execution `ttyd` tool is started. + +* `run-ttyd` command starts `ttyd` tool. `ttyd` is Web-console which rather useful for dev and troubleshooting purposes. + +* `custom` command executes custom bash script if this script's path is provided. To provide custom script this script should exist within container +and environment variable `CUSTOM_ENTRYPOINT_SCRIPT` should contain path to the script. Actually, `custom` command is equivalent to overriding the entry point +script but we recommend implementing custom script instead of entry point overriding. + +Example of equivalent console command: + +```bash +/docker-entrypoint.sh run-robot +``` + +Below is detailed description regarding `run-robot` command. + +## run-robot description + +`run-robot` command contains 4 customized steps: + +* `service checker script`. Sometimes we want to make sure that "tested" service is ready for testing and start execution only for "ready" service. +For this purpose inheritor image should implement "service checker script" (python) which gives `timeout` argument in seconds (by default `timeout` is +300 second but can be overridden by `SERVICE_CHECKER_SCRIPT_TIMEOUT` environment variable) and checks if the service is ready. If the service is ready entry +point script goes to the next step. To specify "service checker script" `SERVICE_CHECKER_SCRIPT` environment variable should not be empty and should contain +path to custom python script. By default, `SERVICE_CHECKER_SCRIPT` is empty and current entry point script step is skipped. + +* `excluded tags resolver`. Sometimes some Robot Framework tests can not be executed in given configuration. For example, test should check `Elasticsearch` +service but `elasticsearch` URL is not given and we want to skip this test. The Robot Framework paradigm supposed that this case should be resolved by tag +approach. We can point which tests should be skipped by its tags. For example, `robot -e my-excluded_tag ./tests`. Docker Integration Tests entry point script +provides development approach to recognize which tests should be skipped before tests execution. Supposed that `.robot` files are contained some root +folder (for, example `tests`). `robot_tags_resolver.py` script recursively bypasses all inner folder to look up all `tags_exclusion.py` modules. For each +found module `get_excluded_tags(environ)` function will be executed, where where environ is a list of environment variables. `get_excluded_tags` function should +return list or dictionary of excluded tags. If list is returned, all tags will be added to result set of excluded tags. If dictionary is returned, all keys +will be added to result set of excluded tags and the particular dictionary will update result dictionary which will be printed to console as some map where +keys are excluded tags and values are reasons why these tags are excluded. Default tags resolver script is `robot_tags_resolver.py` but inheritor image +can override it by `TAGS_RESOLVER_SCRIPT` environment variable which contains path to custom tags resolver script. To skip excluded tags resolving process +environment variable `IS_TAGS_RESOLVER_ENABLED` should be `false` (it is `true` by default). + +* `robot tests execution`. This step can not be skipped. It executes Robot Framework tests without excluded ones. If `TAGS` environment variable is +presented only these tags will be executed. This is an example of specifying only `first` and `second` tests from `first`, `second` and `third`: +```bash +firstORsecond +``` +If tag of test is contained in included and excluded tags it will not be executed. + +* `analyze results`. Sometimes text file with Robot Framework results should be generated in pretty human readable format. For example, we want to copy +this result from Kubernetes Pod to Jenkins job and we want text results instead of all Pod's logs or `html` formatted file. For this purpose +`analyze_result.py` module will be executed. This module creates `result.txt` file in `output` folder (with all Robot Framework results) with tests results +in pretty format. To skip this step environment variable `IS_ANALYZER_RESULT_ENABLED` should be `false` (it is `true` by default). Default analyzer +script is `analyze_result.py` but inheritor image can override it by `ANALYZE_RESULT_SCRIPT` environment variable which contains path to custom +analyzer script. + +* `write status`. To integrate with deployer Jenkins Job status of integration tests should be set to watched by Jenkins Job Kubernetes entity. It can be +as custom resource (CR) as native Kubernetes entities (deployments, pods, etc.). The BDI provides an ability to write status of executed tests to some +Kubernetes entity out of the box. Status is written as Kubernetes status condition with the following fields: +- `lastTransitionTime` - timestamp. +- `message` - parsed results of Robot framework integration tests as string. +- `reason` - static field with `IntegrationTestsExecutionStatus` value. +- `status` - can be `True` or `False`. This field depends on `type` field. It is `True` if `type` is `Ready` or `Successful` + and `False` if `type` is `Failed` or `In progress`. +- `type` - can be `Ready`, `Successful`, `Failed` or `In progress`. + +For example, +``` +lastTransitionTime: "2021-04-21T11:21:31.332Z" +message: +reason: IntegrationTestsExecutionStatus +status: "True" +type: Ready +``` + +In some cases, it is necessary to have `status` field as `boolean` instead of `string`. + +For example, +``` +lastTransitionTime: "2021-04-21T11:21:31.332Z" +message: +reason: IntegrationTestsExecutionStatus +status: true +type: Ready +``` + +To support this, environment variable `IS_STATUS_BOOLEAN` must be set to `true`. +By default, `IS_STATUS_BOOLEAN` is considered to be `false`. + +**Note!** For using feature `write status` in restricted environment, the user or service account used by the Deployer should have +permissions on entity group with the verbs `get`, `patch` and resources `/status` in current the namespace or project. +For example, permissions for write status in Custom Resource: +``` +- apiGroups: + - qubership.org + resources: + - platformmonitorings/status + verbs: + - get + - patch +``` + +To write status to some k8s entity you should specify the entity. There are two ways to do this. The first one is to specify +full path. For example, you have `ZooKeeperService` custom resource which has +`metadata.selfLink` field with value - `/apis/qubership.org/v1/namespaces/zookeeper-service/zookeeperservices/zookeeper`, +in the current approach you should specify `STATUS_CUSTOM_RESOURCE_PATH` environment variable with value from `selfLink` without +`apis` prefix and `namespaces` part: +``` +STATUS_CUSTOM_RESOURCE_PATH=qubership.org/v1/zookeeper-service/zookeeperservices/zookeeper +``` +The second approach is to point the path in parts using the following environment variables: +``` +STATUS_CUSTOM_RESOURCE_GROUP=qubership.org +STATUS_CUSTOM_RESOURCE_VERSION=v1 +STATUS_CUSTOM_RESOURCE_NAMESPACE=zookeeper-service +STATUS_CUSTOM_RESOURCE_PLURAL=zookeeperservices +STATUS_CUSTOM_RESOURCE_NAME=zookeeper +``` +If your k8s pod with integration tests always writes status to well-known custom resource you can override all this environment +variables (excluding `STATUS_CUSTOM_RESOURCE_NAMESPACE`) in your docker file and set namespace in helm chart. + +Both of this approaches work with native k8s entities too. For example, +``` +STATUS_CUSTOM_RESOURCE_GROUP=apps +STATUS_CUSTOM_RESOURCE_VERSION=v1 +STATUS_CUSTOM_RESOURCE_NAMESPACE=zookeeper-service +STATUS_CUSTOM_RESOURCE_PLURAL=deployments +STATUS_CUSTOM_RESOURCE_NAME=zookeeper-1 +``` + +If feature is available `write_status.py` script is called two times. The first time immediately after docker entrypoint script +was started to set `In progress` condition. The second time after tests are finished and parsed by `analyze results` script +to set in the `message` field tests results. Default analyzer script is `write_status.py` but inheritor image can override it +by `WRITE_STATUS_SCRIPT` environment variable which contains path to custom "write status" script. + +By default, if all tests are passed BDI set `Ready` value to `type` condition field. There is an ability to deploy only +integration tests without any component (component was installed before). In this case you should set `ONLY_INTEGRATION_TESTS` +environment variable as `true` and BDI will set `Successful` as value of `type` condition field. + +The `message` field in the status condition by default contains first line from `result.txt` file (which is generated in the +previous step). To write full parsed result you should set `IS_SHORT_STATUS_MESSAGE` environment variable to `false`. + +**Important!** If you use custom script to parse result (`ANALYZE_RESULT_SCRIPT` is not empty) please pay attention that result +should be placed in the `result.txt` file and the first line will be used as short status message. + +**Note!** This feature (write status to k8s entities) is disabled by default! To turn on it please set the `STATUS_WRITING_ENABLED` environment variable to `true`. +For example in your docker file as +``` +ENV STATUS_WRITING_ENABLED=true +``` + + +# Environment Variables + +Docker Integration Tests uses the following environment variables: + +* DEBUG +* TTYD_PORT +* CUSTOM_ENTRYPOINT_SCRIPT +* SERVICE_CHECKER_SCRIPT +* SERVICE_CHECKER_SCRIPT_TIMEOUT +* IS_TAGS_RESOLVER_ENABLED +* IS_ANALYZER_RESULT_ENABLED +* ANALYZE_RESULT_SCRIPT +* STATUS_CUSTOM_RESOURCE_GROUP +* STATUS_CUSTOM_RESOURCE_VERSION +* STATUS_CUSTOM_RESOURCE_NAMESPACE +* STATUS_CUSTOM_RESOURCE_PLURAL +* STATUS_CUSTOM_RESOURCE_NAME +* ONLY_INTEGRATION_TESTS +* STATUS_CUSTOM_RESOURCE_PATH +* STATUS_WRITING_ENABLED +* WRITE_STATUS_SCRIPT +* IS_SHORT_STATUS_MESSAGE +* TAGS +* IS_STATUS_BOOLEAN + +All of them instead of `TAGS`, `ONLY_INTEGRATION_TESTS`, `STATUS_CUSTOM_RESOURCE_NAMESPACE`, `STATUS_CUSTOM_RESOURCE_PATH` and maybe `DEBUG` +we recommend overriding in the docker file and do not forward them to the integration tests deployment environment. \ No newline at end of file diff --git a/SECURITY.md b/SECURITY.md new file mode 100644 index 0000000..8162261 --- /dev/null +++ b/SECURITY.md @@ -0,0 +1,15 @@ +# Security Reporting Process + +Please, report any security issue to `opensourcegroup@netcracker.com` where the issue will be triaged appropriately. + +If you know of a publicly disclosed security vulnerability please IMMEDIATELY email `opensourcegroup@netcracker.com` +to inform the team about the vulnerability, so we may start the patch, release, and communication process. + +# Security Release Process + +If the vulnerability is found in the latest stable release, then it would be fixed in patch version for that release. +E.g., issue is found in 2.5.0 release, then 2.5.1 version with a fix will be released. +By default, older versions will not have security releases. + +If the issue doesn't affect any existing public releases, the fix for medium and high issues is performed +in a main branch before releasing a new version. For low priority issues the fix can be planned for future releases. diff --git a/demo/README.md b/demo/README.md new file mode 100644 index 0000000..d76ee57 --- /dev/null +++ b/demo/README.md @@ -0,0 +1,37 @@ +# Introduction +This file describes `demo` example for Base Docker Image. Example contains `Docker file` which overrides Base Docker Image, +Robot Framework file (*.robot) with keywords and test cases, special python files to exclude some tags (see documentation), +`docker-compose.yml` file. + +# Configuration +`tests_holder.robot` file contains Robot Framework test cases. You can add new test cases or keywords to test `PlatformLibrary`. +To configure `docker-compose.yml` file you should configure Kubernetes configuration file (set environment, context, etc.), +mount you host folder which contains "./kube/config" file (or custom kubeconfig), specify `KUBECONFIG` environment variable +with path to mounted kubeconfig file. For example, you have the following kubeconfig file: C:/Users/test/.kube/config, +you can mount disk "C" to "/mnt" directory in `volumes` block of docker-compose.yml (- C:/:/mnt) and specify `KUBECONFIG` +environment variable as /mnt/Users/test/.kube/config: +```yaml +version: '2' +services: + integration-tests: + build: + context: ./ + dockerfile: docker/Dockerfile + ports: + - 8090:8080 + volumes: + - ./docker/robot/tests:/opt/robot/tests + - ./output:/opt/robot/output + - C:/:/mnt + environment: + - KUBECONFIG=/mnt/Users/test/.kube/config + - DEBUG=true + command: ["run-robot"] +``` + +# Execution +Navigate to `demo` folder and execute: +```bash +docker-compose up -d --build +``` +`output` folder will be generated in `demo` folder with Robot Framework results. \ No newline at end of file diff --git a/demo/docker-compose.yml b/demo/docker-compose.yml new file mode 100644 index 0000000..a607669 --- /dev/null +++ b/demo/docker-compose.yml @@ -0,0 +1,24 @@ +version: '2' +services: + integration-tests: + build: + context: ./ + dockerfile: docker/Dockerfile + ports: + - 8090:8080 + volumes: + - ./docker/robot/tests:/opt/robot/tests + - ./output:/opt/robot/output +# - :/mnt + environment: +# - KUBECONFIG=/mnt/ + - DEBUG=true + #variables for writing status in CR +# - STATUS_WRITING_ENABLED=true +# - STATUS_CUSTOM_RESOURCE_GROUP=qubership.org +# - STATUS_CUSTOM_RESOURCE_VERSION=v1 +# - STATUS_CUSTOM_RESOURCE_NAMESPACE=zookeeper-service +# - STATUS_CUSTOM_RESOURCE_PLURAL=zookeeperservices +# - STATUS_CUSTOM_RESOURCE_NAME=zookeeper +# - ONLY_INTEGRATION_TESTS=false + command: ["run-robot"] \ No newline at end of file diff --git a/demo/docker/Dockerfile b/demo/docker/Dockerfile new file mode 100644 index 0000000..2dfabc5 --- /dev/null +++ b/demo/docker/Dockerfile @@ -0,0 +1,3 @@ +FROM ghcr.io/netcracker/qubership-docker-integration-tests:main + +COPY docker/robot ${ROBOT_HOME} \ No newline at end of file diff --git a/demo/docker/robot/tests/exclusion_directory/tags_exclusion.py b/demo/docker/robot/tests/exclusion_directory/tags_exclusion.py new file mode 100644 index 0000000..8f7b0b5 --- /dev/null +++ b/demo/docker/robot/tests/exclusion_directory/tags_exclusion.py @@ -0,0 +1,2 @@ +def get_excluded_tags(environ) -> dict: + return {'second_sample_test': 'This test should be excluded because of some reasons'} diff --git a/demo/docker/robot/tests/tags_exclusion.py b/demo/docker/robot/tests/tags_exclusion.py new file mode 100644 index 0000000..8775fe9 --- /dev/null +++ b/demo/docker/robot/tests/tags_exclusion.py @@ -0,0 +1,2 @@ +def get_excluded_tags(environ) -> list: + return ['third'] diff --git a/demo/docker/robot/tests/tests_holder.robot b/demo/docker/robot/tests/tests_holder.robot new file mode 100644 index 0000000..fe5a746 --- /dev/null +++ b/demo/docker/robot/tests/tests_holder.robot @@ -0,0 +1,25 @@ +*** Variables *** +${MY_VARIABLE} variable + +*** Settings *** +Library PlatformLibrary + +*** Keywords *** +Sample Keyword + Log To Console this is sample keyword + +*** Test Cases *** +Sample Test Case + [Tags] sample_test + ${service}= Get Service elasticsearch elasticsearch-service + Log To Console ${service} + +Second Sample Test + [Tags] second_sample_test + ${service}= Get Service zookeeper-1 zookeeper-service + Log To Console ${service} + +Third Sample Test + [Tags] third + ${service}= Get Service zookeeper-2 zookeeper-service + Log To Console ${service} \ No newline at end of file diff --git a/docker/Dockerfile b/docker/Dockerfile new file mode 100644 index 0000000..a23e10f --- /dev/null +++ b/docker/Dockerfile @@ -0,0 +1,48 @@ +FROM python:3.13.1-alpine3.21 + +ENV ROBOT_HOME=/opt/robot +ENV PYTHONPATH=/usr/local/lib/python3.13/site-packages/integration_library_builtIn +ENV IS_ANALYZER_RESULT_ENABLED=true +ENV IS_TAGS_RESOLVER_ENABLED=true +ENV STATUS_WRITING_ENABLED=false + +COPY docker/docker-entrypoint.sh / +COPY docker/requirements.txt ${ROBOT_HOME}/requirements.txt +COPY docker/robot_tags_resolver.py ${ROBOT_HOME}/robot_tags_resolver.py +COPY docker/analyze_result.py ${ROBOT_HOME}/analyze_result.py +COPY docker/write_status.py ${ROBOT_HOME}/write_status.py +COPY integration-tests-built-in-library ${ROBOT_HOME}/integration-tests-built-in-library + +RUN set -x \ + && apk add --update --no-cache bash shadow python3 vim rsync ttyd build-base apk-tools py3-yaml + +# Upgrade all tools to avoid vulnerabilities +RUN set -x && apk upgrade --no-cache --available + +#Add unprivileged user +RUN set -x \ + && groupadd -r robot --gid=1000 \ + && useradd -s /bin/bash -r -g robot --uid=1000 robot \ + && usermod -a -G 0 robot + +RUN set -x \ + && python3 -m ensurepip \ + && rm -r /usr/lib/python*/ensurepip \ + && pip3 install --upgrade pip setuptools \ + && pip3 install -r ${ROBOT_HOME}/requirements.txt \ + && pip3 install --no-cache-dir ${ROBOT_HOME}/integration-tests-built-in-library \ + && rm -rf ${ROBOT_HOME}/integration-tests-built-in-library \ + && rm -rf /var/cache/apk/* + +RUN set -x \ + && for path in \ + /docker-entrypoint.sh \ + ; do \ + chmod +x "$path"; \ + chgrp 0 "$path"; \ + done + +WORKDIR ${ROBOT_HOME} + +ENTRYPOINT ["/docker-entrypoint.sh"] +CMD ["run-robot"] diff --git a/docker/analyze_result.py b/docker/analyze_result.py new file mode 100644 index 0000000..779a334 --- /dev/null +++ b/docker/analyze_result.py @@ -0,0 +1,122 @@ +# Copyright 2024-2025 NetCracker Technology Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from datetime import datetime +from enum import Enum +import logging + +from robot.api import ExecutionResult +from robot.model import TestSuite + +space = "\n**********************************************************************************************************\n" + +class Status(str, Enum): + PASS = "PASS" + FAIL = "FAIL" + +def analyze_result(): + try: + result = ExecutionResult("./output/output.xml") + except Exception as e: + logging.error("Exception occurred while open tests result file: {}".format(str(e))) + return + + logging.debug("Start parsing the robotframework test result") + file_write = open('./output/result.txt', 'w') + main_suite = result.suite + result_str = "Main Test Suite: {}\t|\tPassed: {}\t|\tFailed: {}\n".format(main_suite.name, + main_suite.statistics.passed, + main_suite.statistics.failed) + if main_suite.suites: + result_str += space + result_str += print_suite(main_suite.suites) + result_str += space + if main_suite.status == Status.FAIL: + result_str += "RESULT: TESTS FAILED\n" + else: + result_str += "RESULT: TESTS PASSED\n" + file_write.write(result_str) + file_write.close() + logging.debug("The result file has been saved") + + +def get_keywords(entity): + keywords = [] + if entity.has_setup: + keywords.append(entity.setup) + if not isinstance(entity, TestSuite): + keywords.extend(entity.body.filter(keywords=True)) + if entity.has_teardown: + keywords.append(entity.teardown) + return keywords + + +def print_test_cases(test_cases, level=0): + result_str = "" + for test_case in test_cases: + start_time = datetime.strptime(test_case.starttime, "%Y%m%d %H:%M:%S.%f") + end_time = datetime.strptime(test_case.endtime, "%Y%m%d %H:%M:%S.%f") + duration = int((end_time - start_time).total_seconds() * 1000) # Total time in milliseconds + result_str += "{}{}\t|\tStatus: '{}'|\tDuration: {}\n".format("\t" * level, test_case.name, test_case.status, duration) + if test_case.status != Status.PASS: + keywords = get_keywords(test_case) + if keywords: + result_str += "{}Keywords:\n".format("\t" * level) + result_str += print_keywords(keywords, level + 1) + result_str += "\n" + return result_str + + +def print_keywords(keywords, level=0): + result_str = "" + for keyword in keywords: + result_str += "{}{}\t|\tStatus: '{}'\n".format("\t" * level, keyword.kwname, keyword.status) + if keyword.status == Status.FAIL: + if keyword.messages: + result_str += "{}Messages:\n".format("\t" * level) + result_str += print_messages(keyword.messages, level + 1) + nested_keywords = get_keywords(keyword) + if nested_keywords: + result_str += "{}Keywords:\n".format("\t" * level) + result_str += print_keywords(nested_keywords, level + 1) + return result_str + + +def print_messages(messages, level=0): + result_str = "" + for message in messages: + result_str += "{}{}\t|\tLevel: '{}'\n".format("\t" * level, message.message.replace("\n", ""), message.level) + return result_str + + +def print_suite(suites): + result_str = "" + for suite in suites: + result_str += "Suite: {}\t|\tPassed: {}\t|\tFailed: {}\n".format(suite.name, + suite.statistics.passed, + suite.statistics.failed) + keywords = get_keywords(suite) + if keywords: + result_str += "Keywords:\n" + result_str += print_keywords(keywords, 1) + if suite.tests: + result_str += "Test cases:\n" + result_str += print_test_cases(suite.tests, 1) + if suite.suites: + result_str += print_suite(suite.suites) + result_str += space + return result_str + + +analyze_result() diff --git a/docker/docker-entrypoint.sh b/docker/docker-entrypoint.sh new file mode 100644 index 0000000..1962ae1 --- /dev/null +++ b/docker/docker-entrypoint.sh @@ -0,0 +1,115 @@ +#!/bin/bash + +export ROBOT_OPTIONS="--loglevel=info --outputdir output" +export ROBOT_SYSLOG_FILE=./output/syslog.txt +export ROBOT_SYSLOG_LEVEL=DEBUG + +if [[ "$READONLY_CONTAINER_FILE_SYSTEM_ENABLED" == "true" ]]; then + echo "Read-only file system configuration enabled, copying test files from temp directory..." + TMP_FOLDER_READ_FS="/opt/robot_tmp" + cp -r "${TMP_FOLDER_READ_FS}/." "${ROBOT_HOME}/" +fi + +if [[ "$DEBUG" == true ]]; then + set -x + printenv +fi + +run_ttyd() { + if [[ -z "$TTYD_PORT" ]]; then + TTYD_PORT=8080 + fi + + exec ttyd -p ${TTYD_PORT} bash +} + +run_custom_script() { + if [[ -n "$CUSTOM_ENTRYPOINT_SCRIPT" ]]; then + ${CUSTOM_ENTRYPOINT_SCRIPT} + fi +} + +create_tags_resolver_array() { + tags_resolver_script="robot_tags_resolver.py" + if [[ -n "$TAGS_RESOLVER_SCRIPT" ]]; then + tags_resolver_script=${TAGS_RESOLVER_SCRIPT} + fi + tags_resolver_array=() + while IFS=";"; read -d ";" line; do + tags_resolver_array+=($line) + done < <(python ${tags_resolver_script}) +} + +# Process some known arguments to run integration tests +case $1 in + custom) + run_custom_script + ;; + run-robot) + status_writing_script="write_status.py" + if [[ ${STATUS_WRITING_ENABLED} == "true" ]]; then + if [[ -n "$WRITE_STATUS_SCRIPT" ]]; then + status_writing_script=${WRITE_STATUS_SCRIPT} + fi + if ! python "$status_writing_script" "in_progress"; then + echo "Can not set in progress status for integration tests" + fi + fi + + if [[ -n "$SERVICE_CHECKER_SCRIPT" ]]; then + timeout=300 + if [[ -n "$SERVICE_CHECKER_SCRIPT_TIMEOUT" ]]; then + timeout=${SERVICE_CHECKER_SCRIPT_TIMEOUT} + fi + python ${SERVICE_CHECKER_SCRIPT} ${timeout} + if [[ $? -ne 0 ]]; then + echo "Service is not ready at least $timeout seconds or some exception occurred" + exit 1 + fi + fi + + excluded_tags="" + if [[ ${IS_TAGS_RESOLVER_ENABLED} == "true" ]]; then + create_tags_resolver_array + echo "Excluded tags: ${tags_resolver_array[0]}" + echo ${tags_resolver_array[1]} # print all excluded tags with matched reason + excluded_tags=${tags_resolver_array[0]} + fi + + if [[ -z "$TAGS" ]]; then + robot ${excluded_tags} ./tests + else + robot -i ${TAGS} ${excluded_tags} ./tests + fi + robot_result=$? + if [[ ${robot_result} -ne 0 ]]; then + touch ./output/result.txt + echo "Robot framework process was interrupted with code - ${robot_result}" + fi + + analyze_result_script="analyze_result.py" + if [[ ${IS_ANALYZER_RESULT_ENABLED} == "true" ]]; then + if [[ -n "$ANALYZE_RESULT_SCRIPT" ]]; then + analyze_result_script=${ANALYZE_RESULT_SCRIPT} + fi + python "${analyze_result_script}" + fi + + if [[ ${STATUS_WRITING_ENABLED} == "true" ]]; then + if [[ ${IS_ANALYZER_RESULT_ENABLED} != "true" ]]; then + python "${analyze_result_script}" + fi + + if ! python "$status_writing_script" "update"; then + echo "Can not update status for integration tests" + fi + fi + run_ttyd + ;; + run-ttyd) + run_ttyd + ;; +esac + +# Otherwise just run the specified command +exec "$@" diff --git a/docker/requirements.txt b/docker/requirements.txt new file mode 100644 index 0000000..d6b0cd5 --- /dev/null +++ b/docker/requirements.txt @@ -0,0 +1,17 @@ +websocket-client==1.3.3 +urllib3==2.2.2 +requests==2.32.0 +google-auth==2.10.0 +cachetools==5.0.0 +pyyaml==6.0.1 +certifi==2024.07.04 +wrapt==1.14.1 +robotframework==6.0.1 +kubernetes==12.0.1 +openshift==0.12.1 +requests_oauthlib==1.3.0 +pyjwt==2.4.0 +deprecated==1.2.13 +boto3==1.35.79 +botocore==1.35.79 +idna==3.7 diff --git a/docker/robot_tags_resolver.py b/docker/robot_tags_resolver.py new file mode 100644 index 0000000..d863a04 --- /dev/null +++ b/docker/robot_tags_resolver.py @@ -0,0 +1,57 @@ +# Copyright 2024-2025 NetCracker Technology Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +import importlib.util + + +def create_exclude_tags_robot_command(tags: list) -> str: + return f'-e {"OR".join(tags)}' if tags else "" + + +def create_exclude_tags_description(tags: dict) -> str: + if not tags: + return "" + title = "The following tags will be excluded with provided reason\n" + description_list = [] + for tag in tags.items(): + description_list.append(f'{tag[0]}: {tag[1]}') + tags_with_description = "\n".join(description_list) + return f'{title}{tags_with_description}' + + +def resolve_robot_tags(start_directory="./tests", tags_resolver_module="tags_exclusion.py"): + tags = [] + tags_with_description = {} + environ = os.environ + for root, dirs, files in os.walk(start_directory): + for file in files: + if file == tags_resolver_module: + spec = importlib.util.spec_from_file_location(file[:-3], location=os.path.join(root, file)) + foo = importlib.util.module_from_spec(spec) + spec.loader.exec_module(foo) + new_tags = foo.get_excluded_tags(environ) + if isinstance(new_tags, dict): + tags += list(new_tags.keys()) + tags_with_description.update(new_tags) + if isinstance(new_tags, list): + tags += new_tags + tags = set(tags) + excluded_tags_line = create_exclude_tags_robot_command(tags) + excluded_tags_description = create_exclude_tags_description(tags_with_description) + print(f'{excluded_tags_line};{excluded_tags_description};') + + +resolve_robot_tags() + diff --git a/docker/write_status.py b/docker/write_status.py new file mode 100644 index 0000000..637b6e9 --- /dev/null +++ b/docker/write_status.py @@ -0,0 +1,158 @@ +# Copyright 2024-2025 NetCracker Technology Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from PlatformLibrary import PlatformLibrary +from datetime import datetime +from enum import Enum +import os +import sys +import re + +class CustomResourceStatusResolver: + def __init__(self, **kwargs): + self.path = os.getenv("STATUS_CUSTOM_RESOURCE_PATH") + if self.path is None: + self.group = os.getenv("STATUS_CUSTOM_RESOURCE_GROUP") + self.version = os.getenv("STATUS_CUSTOM_RESOURCE_VERSION") + self.namespace = os.getenv("STATUS_CUSTOM_RESOURCE_NAMESPACE") + self.plural = os.getenv("STATUS_CUSTOM_RESOURCE_PLURAL") + self.name = os.getenv("STATUS_CUSTOM_RESOURCE_NAME") + else: + self.resolve_custom_resource_by_path() + + def resolve_custom_resource_by_path(self): + parts = self.path.split("/") + if len(parts) != 5: + raise Exception(f'Path to custom resource must contain exactly five parts, {len(parts)} given') + self.group = parts[0] + self.version = parts[1] + self.namespace = parts[2] + self.plural = parts[3] + self.name = parts[4] + + def check_cr_path(self): + errors = [] + for attr, value in self.__dict__.items(): + if attr != "path" and not value: + errors.append(attr) + if errors: + raise Exception(f'{",".join(errors)} attribute{"s" if len(errors) > 1 else ""} must not be empty to find ' + f'custom resource for status update') + + def update_custom_resource_status_condition(self, condition): + self.check_cr_path() + client = PlatformLibrary(managed_by_operator="true") + status_obj = client.get_namespaced_custom_object_status(self.group, + self.version, + self.namespace, + self.plural, + self.name) + status = status_obj.get('status') + conditions = [] + if status is not None: + conditions = status.get('conditions') + else: + status = {} + status_obj['status'] = status + is_presented = False + for i, con in enumerate(conditions): + if con['reason'] == "IntegrationTestsExecutionStatus": + conditions[i] = condition + is_presented = True + break + if not is_presented: + conditions.append(condition) + + status['conditions'] = conditions + client.custom_objects_api.patch_namespaced_custom_object_status(self.group, + self.version, + self.namespace, + self.plural, + self.name, + status_obj) + + +class ConditionType(Enum): + SUCCESSFUL = "Successful" + FAILED = "Failed" + IN_PROGRESS = 'In Progress' + READY = 'Ready' + + +class ConditionStatus(Enum): + TRUE = "True" + FALSE = "False" + UNKNOWN = "Unknown" + + +def str2bool(v): + return v.lower() in ("yes", "true", "t", "1") + +class Condition: + def __init__(self, + is_in_progress: bool = False, + message: str = None, + reason: str = None, + status: ConditionStatus = None, + type: ConditionType = None): + self.is_in_progress = is_in_progress + self.message = message + self.reason = reason if reason is not None else "IntegrationTestsExecutionStatus" + self.status = status if status is not None else ConditionStatus.UNKNOWN + self.type = type if type is not None else ConditionType.READY + + def get_condition_body(self): + status_value = self.status.value + if str2bool(os.getenv("IS_STATUS_BOOLEAN", "false")): + status_value = str2bool(status_value) + return {"message": self.message, + "reason": self.reason, + "status": status_value, + "type": self.type.value, + "lastTransitionTime": datetime.utcnow().isoformat()[:-3]+'Z'} + + def generate_condition_state(self): + if self.is_in_progress: + self.generate_in_progress_condition_state() + return + with open('./output/result.txt', 'r') as file: + self.message = file.read() + if "RESULT: TESTS PASSED" in self.message: + self.status = ConditionStatus.TRUE + if os.getenv("ONLY_INTEGRATION_TESTS") and os.getenv("ONLY_INTEGRATION_TESTS").lower() == "true": + self.type = ConditionType.SUCCESSFUL + else: + self.type = ConditionType.READY + else: + self.status = ConditionStatus.FALSE + self.type = ConditionType.FAILED + if os.getenv("IS_SHORT_STATUS_MESSAGE", "true").lower() == "true": + result_str = self.message.split("\n")[0] + self.message = re.sub(r'\t', " ", result_str) + + def generate_in_progress_condition_state(self): + self.message = "Service in progress" + self.type = ConditionType.IN_PROGRESS + self.status = ConditionStatus.FALSE + + +if __name__ == '__main__': + argv = sys.argv[1:] + is_in_progress = False if len(argv) < 1 or argv[0] != "in_progress" else True + + condition = Condition(is_in_progress=is_in_progress) + condition.generate_condition_state() + condition_body = condition.get_condition_body() + status_resolver = CustomResourceStatusResolver() + status_resolver.update_custom_resource_status_condition(condition_body) diff --git a/documentation/integration_tests_builtin_library_documentation/PlatformLibrary.html b/documentation/integration_tests_builtin_library_documentation/PlatformLibrary.html new file mode 100644 index 0000000..ff6b405 --- /dev/null +++ b/documentation/integration_tests_builtin_library_documentation/PlatformLibrary.html @@ -0,0 +1,912 @@ + + + + + + + + + + + + + + + + + + + + + + + +
+

Opening library documentation failed

+
    +
  • Verify that you have JavaScript enabled in your browser.
  • +
  • Make sure you are using a modern enough browser. If using Internet Explorer, version 8 or newer is required.
  • +
  • Check are there messages in your browser's JavaScript error log. Please report the problem if you suspect you have encountered a bug.
  • +
+
+ + + + + + + + + + + + + + + + + + + + + + diff --git a/integration-tests-built-in-library/LICENSE b/integration-tests-built-in-library/LICENSE new file mode 100644 index 0000000..335ea9d --- /dev/null +++ b/integration-tests-built-in-library/LICENSE @@ -0,0 +1,19 @@ +Copyright (c) 2018 The Python Packaging Authority + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. \ No newline at end of file diff --git a/integration-tests-built-in-library/README.md b/integration-tests-built-in-library/README.md new file mode 100644 index 0000000..ef14387 --- /dev/null +++ b/integration-tests-built-in-library/README.md @@ -0,0 +1,4 @@ +# Integration tests built in Robot Framework library + +This is a Robot Framework library which provides basic keywords to write RF integration tests for Kubernetes based +applications. diff --git a/integration-tests-built-in-library/integration_library_builtIn/FileSystemS3.py b/integration-tests-built-in-library/integration_library_builtIn/FileSystemS3.py new file mode 100644 index 0000000..f718aeb --- /dev/null +++ b/integration-tests-built-in-library/integration_library_builtIn/FileSystemS3.py @@ -0,0 +1,39 @@ +# Copyright 2024-2025 NetCracker Technology Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os + +class FileSystem: + + def exists(self, path): + return os.path.exists(path) + + def makedirs(self, path): + if not self.exists(path): + return os.makedirs(path) + + def listdir(self, path): + return os.listdir(path) + + def remove(self, path): + if self.exists(path): + os.remove(path) + + def rmdir(self, path): + if self.exists(path): + os.rmdir(path) + + def rmtree(self, path): + if self.exists(path): + fsutil.rmtree(path) diff --git a/integration-tests-built-in-library/integration_library_builtIn/KubernetesClient.py b/integration-tests-built-in-library/integration_library_builtIn/KubernetesClient.py new file mode 100644 index 0000000..d0f7a86 --- /dev/null +++ b/integration-tests-built-in-library/integration_library_builtIn/KubernetesClient.py @@ -0,0 +1,160 @@ +# Copyright 2024-2025 NetCracker Technology Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from kubernetes import client +from openshift.dynamic import DynamicClient + + +class KubernetesClient(object): + def __init__(self, api_client): + self.api_client = api_client + self.k8s_apps_v1_client = client.AppsV1Api(api_client) + self.dyn_client: DynamicClient = DynamicClient(self.api_client) + + def get_deployment_entity(self, name: str, namespace: str): + return self.k8s_apps_v1_client.read_namespaced_deployment(name, namespace) + + def get_deployment_entities(self, namespace: str) -> list: + return [deployment for deployment in self.k8s_apps_v1_client.list_namespaced_deployment(namespace).items] + + def get_deployment_entity_names_for_service(self, namespace: str, service: str, label: str ='clusterName') -> list: + deployments = self.get_deployment_entities(namespace) + return [deployment.metadata.name for deployment in deployments + if deployment.spec.template.metadata.labels.get(label, '') == service] + + def get_inactive_deployment_entities_for_service(self, + namespace: str, + service: str, + label: str = 'clusterName') -> list: + deployment_configs = self.get_deployment_entities(namespace) + inactive_deployments = [] + for dc in deployment_configs: + if dc.spec.template.metadata.labels.get(label, '') == service: + if dc.status.replicas is None \ + or dc.status.unavailable_replicas is not None: + inactive_deployments.append(dc) + return inactive_deployments + + def get_inactive_deployment_entities_names_for_service(self, + namespace: str, + service: str, + label: str = 'clusterName') -> list: + inactive_dc = self.get_inactive_deployment_entities_for_service(namespace, service, label) + inactive_deployment_names = [] + for deployment in inactive_dc: + inactive_deployment_names.append(deployment.metadata.name) + return inactive_deployment_names + + def get_first_deployment_entity_name_for_service(self, + namespace: str, + service: str, + label: str ='clusterName') -> str: + deployments = self.get_deployment_entities(namespace) + for deployment in deployments: + if deployment.spec.template.metadata.labels.get(label, '') == service: + return deployment.metadata.name + return None + + def get_inactive_deployment_entities_count_for_service(self, + namespace: str, + service: str, + label: str = 'clusterName') -> int: + return len(self.get_inactive_deployment_entities_for_service(namespace, service, label)) + + def get_active_deployment_entities_for_service(self, + namespace: str, + service: str, + label: str ='clusterName') -> list: + deployments = self.get_deployment_entities(namespace) + active_deployments = [] + + for deployment in deployments: + if deployment.spec.template.metadata.labels.get(label, '') == service \ + and not deployment.status.unavailable_replicas \ + and deployment.status.available_replicas is not None: + active_deployments.append(deployment) + return active_deployments + + def get_active_deployment_entities_names_for_service(self, + namespace: str, + service: str, + label: str ='clusterName') -> list: + + active_deployments = self.get_active_deployment_entities_for_service(namespace, service, label) + active_deployments_names = [] + for deployment in active_deployments: + active_deployments_names.append(deployment.metadata.name) + return active_deployments_names + + def get_active_deployment_entities_count_for_service(self, + namespace: str, + service: str, + label: str ='clusterName') -> int: + return len(self.get_active_deployment_entities_for_service(namespace, service, label)) + + def get_deployment_entities_count_for_service(self, namespace: str, service: str, label: str ='clusterName'): + return len(self.get_deployment_entity_names_for_service(namespace, service, label)) + + def get_deployment_scale(self, name: str, namespace: str): + return self.k8s_apps_v1_client.read_namespaced_deployment_scale(name, namespace) + + def set_deployment_scale(self, name: str, namespace: str, scale): + self.k8s_apps_v1_client.patch_namespaced_deployment_scale(name, namespace, scale) + + def set_replicas_for_deployment_entity(self, name: str, namespace: str, replicas: int = 1): + scale = self.get_deployment_scale(name, namespace) + scale.spec.replicas = replicas + scale.status.replicas = replicas + self.set_deployment_scale(name, namespace, scale) + + def scale_up_deployment_entity(self, name: str, namespace: str): + scale = self.get_deployment_scale(name, namespace) + if scale.spec.replicas is None: + scale.spec.replicas = 1 + else: + scale.spec.replicas += 1 + scale.status.replicas += 1 + self.set_deployment_scale(name, namespace, scale) + + def scale_down_deployment_entity(self, name: str, namespace: str): + scale = self.get_deployment_scale(name, namespace) + if scale.spec.replicas is None or not scale.spec.replicas: + scale.spec.replicas = 0 + else: + scale.spec.replicas -= 1 + if scale.status.replicas: + scale.status.replicas -= 1 + self.set_deployment_scale(name, namespace, scale) + + def get_deployment_entity_pod_selector_labels(self, name: str, namespace: str) -> dict: + deployments = self.get_deployment_entities(namespace) + for deployment in deployments: + if deployment.metadata.name == name: + return deployment.spec.selector.match_labels + return None + + def patch_namespaced_deployment_entity(self, name: str, namespace: str, body): + self.k8s_apps_v1_client.patch_namespaced_deployment(name, namespace, body) + + def get_deployment_entity_ready_replicas(self, deployment): + return deployment.status.ready_replicas + + def get_deployment_entity_unavailable_replicas(self, deployment): + return deployment.status.unavailable_replicas + + def create_deployment_entity(self, body, namespace: str): + return self.k8s_apps_v1_client.create_namespaced_deployment(namespace=namespace, body=body) + + def delete_deployment_entity(self, name: str, namespace: str): + return self.k8s_apps_v1_client.delete_namespaced_deployment(name=name, namespace=namespace) \ No newline at end of file diff --git a/integration-tests-built-in-library/integration_library_builtIn/MonitoringLibrary.py b/integration-tests-built-in-library/integration_library_builtIn/MonitoringLibrary.py new file mode 100644 index 0000000..023d113 --- /dev/null +++ b/integration-tests-built-in-library/integration_library_builtIn/MonitoringLibrary.py @@ -0,0 +1,162 @@ +# Copyright 2024-2025 NetCracker Technology Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import json + +import requests +import urllib3 + +from PlatformLibrary import PlatformLibrary + +from requests.auth import HTTPBasicAuth + +from robot.libraries.BuiltIn import BuiltIn + +class MonitoringLibrary(object): + """This Robot Framework library provides access to the Prometheus API for working with rules and the ability to + perform operations with GrafanaDashboards Custom Resources like Kubernetes entities. + + To access the Prometheus when using the library, you need to specify the parameter `host` specifying the protocol, + host and port of Prometheus. By default MonitoringLibrary is imported without this parameter. + To perform operations only on GrafanaDashboard Custom Resources, the library can be imported without specifying the + `host`. + + These are examples of import library with Prometheus host initialization. + + | Library | MonitoringLibrary | host=http://${PROMETHEUS_HOST}:${PROMETHEUS_PORT} | + | Library | MonitoringLibrary | + """ + + def __init__(self, host=None, username=None, password=None): + + urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning) + + self._host = host + self._api_rules_url = f'{host}/api/v1/rules' + self._headers = {'Content-Type': 'application/json'} + self._auth = HTTPBasicAuth(username, password) + self.k8s_lib = PlatformLibrary() + + def get_alert_status(self, alert_name, namespace): + """Returns status of specified alert name. Possible return values: inactive, firing, pending + + Examples: + | Get Alert Status | Elasticsearch_Is_Down_Alarm | elasticsearch-service + """ + response = requests.get(self._api_rules_url, auth=self._auth) + json_content = json.loads(response.text) + groups = json_content["data"]["groups"] + namespace_in_query = f'namespace="{namespace}"' + for group in groups: + for rule in group["rules"]: + if rule['name'] == alert_name: + namespace_label = rule['labels'].get('namespace') + if namespace_label == namespace: + return rule["state"] + elif namespace_label is None and namespace_in_query in rule["query"]: + BuiltIn().run_keyword('log to console', + f"Warning! There is no namespace label in {alert_name} alert") + return rule["state"] + return None + + def get_metric_values(self, metric_name: str): + _api_query_url = f'{self._host}/api/v1/query?query={metric_name}' + response = requests.get(_api_query_url, auth=self._auth) + json_content = json.loads(response.text) + return json_content["data"] + + def get_full_metric_values(self, metric_name: str): + """Returns all values for specified metric name. + + Examples: + | Get Full Metric Values | kafka_kafka_Threading_ThreadCount + """ + data = self.get_metric_values(metric_name) + all_values = [] + for res in data["result"]: + all_values.append(res["value"][1]) + return all_values + + def get_last_metric_value(self, metric_name: str): + """Returns last value for specified metric name. + + Examples: + | Get Last Metric Value | kafka_kafka_Threading_ThreadCount + """ + data = self.get_metric_values(metric_name) + return data["result"][0]["value"][1] + + def get_dashboard_in_namespace(self, namespace, name): + """Returns Kubernetes 'GrafanaDashboard' custom object with body configuration as JSON object in project/namespace. + + :param namespace: namespace of existing GrafanaDashboard + :param name: the name of GrafanaDashboard to return + + Example: + | Get Dashboard In Namespace | prometheus-operator | grafanadashboard_name | + """ + return self.k8s_lib.get_namespaced_custom_object(group='integreatly.org', version='v1alpha1', + namespace=namespace, plural='grafanadashboards', name=name) + + def create_dashboard_in_namespace(self, namespace, body): + """Method of creating Kubernetes 'GrafanaDashboard' custom object with body configuration as JSON object in project/namespace. + + :param namespace: namespace where GrafanaDashboard to be created + :param body: JSON object for creating GrafanaDashboard + + Example: + | Create Dashboard In Namespace | prometheus-operator | grafandashboard_body | + """ + return self.k8s_lib.create_namespaced_custom_object(group='integreatly.org', version='v1alpha1', + namespace=namespace, plural='grafanadashboards', body=body) + + def delete_dashboard_in_namespace(self, namespace, name): + """Deletes Kubernetes 'GrafanaDashboard' custom object in project/namespace. + + :param namespace: namespace of existing GrafanaDashboard + :param name: the name of GrafanaDashboard to delete + + Example: + | Delete Dashboard In Namespace | prometheus-operator | grafanadashboard_name | + """ + return self.k8s_lib.delete_namespaced_custom_object(group='integreatly.org', version='v1alpha1', + namespace=namespace, plural='grafanadashboards', name=name) + + def patch_dashboard_in_namespace(self, namespace, name, body): + """Patches Kubernetes 'GrafanaDashboard' custom object with body configuration as JSON object in project/namespace. + + :param namespace: namespace of existing GrafanaDashboard + :param name: the name of GrafanaDashboard to patch + :param body: JSON object to patch GrafanaDashboard + + Example: + | Patch Dashboard In Namespace | prometheus-operator | test_dashboard | grafanadashboard_body | + """ + return self.k8s_lib.patch_namespaced_custom_object(group='integreatly.org', version='v1alpha1', + namespace=namespace, plural='grafanadashboards', name=name, + body=body) + + def replace_dashboard_in_namespace(self, namespace, name, body): + """Method for replacing Kubernetes 'GrafanaDashboard' custom object with body configuration as JSON object in project/namespace. + + :param namespace: namespace of existing GrafanaDashboard + :param name: the name of GrafanaDashboard to replace + :param body: JSON object for replace GrafanaDashboard + + Example: + | Replace Dashboard In Namespace | prometheus-operator | test_dashboard | grafanadashboard_body | + """ + return self.k8s_lib.replace_namespaced_custom_object(group='integreatly.org', version='v1alpha1', + namespace=namespace, plural='grafanadashboards', name=name, + body=body) diff --git a/integration-tests-built-in-library/integration_library_builtIn/OAuthLibrary.py b/integration-tests-built-in-library/integration_library_builtIn/OAuthLibrary.py new file mode 100644 index 0000000..331071b --- /dev/null +++ b/integration-tests-built-in-library/integration_library_builtIn/OAuthLibrary.py @@ -0,0 +1,147 @@ +# Copyright 2024-2025 NetCracker Technology Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import json +import os +from contextlib import suppress + +import jwt +import requests +from oauthlib.oauth2 import MobileApplicationClient +from requests_oauthlib import OAuth2Session +from robot.api import logger + +os.environ['OAUTHLIB_INSECURE_TRANSPORT'] = '1' + + +class OAuthLibrary(object): + """ + This Robot Framework library provides API to communicate with Identity Provider. It allows you to register own + client with desired name, receive a token for the client, and remove the client if you no longer need it. + + This is an example of import library with Identity Provider parameters. + + | Library | OAuthLibrary | url=http://identity-management.security-services-ci.svc:8080 | registration_token=1BK2ztNwKMlO0fHKocPQW2glUC0Tg4aN | username=username | password=password | + """ + + def __init__(self, url, registration_token, username, password, registration_endpoint="/register", + grant_type="implicit"): + self.session = requests.session() + self.url = url + self.registration_token = registration_token + self.username = username + self.password = password + self.registration_endpoint = registration_endpoint + self.grant_type = grant_type + self.scope = '' + + def __del__(self): + self.session.close() + + def register_client(self, client_name: str, scope='profile openid'): + """ + Registers client with specified name in Identity Provider. + :param client_name: the name of new client + + Example: + | Register Client | elasticsearch-integration-tests-client | + """ + headers = { + 'Accept': 'application/json', + 'Content-Type': 'application/json', + 'Authorization': 'Bearer %s' % str(self.registration_token) + } + + data = { + "client_name": str(client_name), + "redirect_uris": [self.url], + "application_type": "web", + "grant_types": self.grant_type + } + + if 'authorization_code' in self.grant_type or 'implicit' in self.grant_type \ + or 'client_credentials' in self.grant_type: + data['scope'] = scope + self.scope = scope + + response = requests.post(f'{self.url}{self.registration_endpoint}', headers=headers, json=data) + with suppress(Exception): + logger.info(f'response json: {json.dumps(response.json())}', html=True) + + return { + "client_id": response.json()['client_id'], + "client_secret": response.json()['client_secret'] + } + + def delete_client(self, client_id): + """ + Deletes client from Identity Provider by specified client identifier. + :param client_id: the identifier of client + + Example: + | Delete Client | esd5-b3o3-dasdgdf-174 | + """ + token = self.get_token(client_id) + + headers = { + 'Accept': 'application/json', + 'Content-Type': 'application/json', + 'Authorization': 'Bearer %s' % str(token) + } + + response = requests.get(f'{self.url}/api/clients', headers=headers) + + clients = response.json() + + for client in clients: + if client["clientId"] == client_id: + response = requests.delete(f'{self.url}/api/clients/{client["id"]}', headers=headers) + with suppress(Exception): + logger.info(f'response json: {json.dumps(response.json())}', html=True) + break + + def get_token(self, client_id): + """ + Obtains JWT access token for specified client. + :param client_id: the identifier of client + + Example: + | Get Token | esd5-b3o3-dasdgdf-174 | + """ + client = MobileApplicationClient(client_id) + fitbit = OAuth2Session(client_id, client=client, scope=self.scope) + authorization_url, state = fitbit.authorization_url(f'{self.url}/authorize') + self.__login() + response = self.session.post(authorization_url) + response.raise_for_status() + token = fitbit.token_from_fragment(response.url).get('access_token') + return token + + def get_tenant(self, token): + """ + Receives tenant name from JWT access token. + :param token: JWT access token + + Example: + | Get Tenant | eb53o3dasdgdf174... | + """ + tenant = jwt.decode(token, verify=False)['tenant-id'] + return tenant + + def __login(self): + login_url = f'{self.url}/login' + headers = {'Content-Type': 'application/x-www-form-urlencoded'} + data = {'login': self.username, 'password': self.password} + response = self.session.post(login_url, data=data, headers=headers) + response.raise_for_status() diff --git a/integration-tests-built-in-library/integration_library_builtIn/OpenShiftClient.py b/integration-tests-built-in-library/integration_library_builtIn/OpenShiftClient.py new file mode 100644 index 0000000..d66b855 --- /dev/null +++ b/integration-tests-built-in-library/integration_library_builtIn/OpenShiftClient.py @@ -0,0 +1,169 @@ +# Copyright 2024-2025 NetCracker Technology Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from openshift.dynamic import DynamicClient + + +class OpenShiftClient(object): + def __init__(self, api_client): + self.api_client = api_client + self.dyn_client = DynamicClient(api_client) + self._patch_scale_dict = {'spec': {'replicas': 1}} + + def get_deployment_entity(self, name: str, namespace: str): + return self.dyn_client.resources.get(api_version='apps.openshift.io/v1', kind='DeploymentConfig').get(namespace=namespace, name=name) + + def get_deployment_entities(self, namespace: str): + deployment_configs = self.dyn_client.resources.get(api_version='apps.openshift.io/v1', kind='DeploymentConfig') + return deployment_configs.get(namespace=namespace).items + + def get_deployment_entity_names_for_service(self, namespace: str, service: str, label: str = 'clusterName') -> list: + deployment_configs = self.get_deployment_entities(namespace) + deployment_config_names = [] + for deployment in deployment_configs: + if deployment.spec.template.metadata.labels.get(label, '') == service: + deployment_config_names.append(deployment.metadata.name) + return deployment_config_names + + def get_inactive_deployment_entities_for_service(self, + namespace: str, + service: str, + label: str = 'clusterName') -> list: + deployment_configs = self.get_deployment_entities(namespace) + inactive_dc = [] + + for dc in deployment_configs: + if dc.spec.template.metadata.labels.get(label, '') == service: + if dc.status.availableReplicas == 0 or not dc.status.replicas: + inactive_dc.append(dc) + return inactive_dc + + def get_inactive_deployment_entities_names_for_service(self, + namespace: str, + service: str, + label: str = 'clusterName') -> list: + inactive_dc = self.get_inactive_deployment_entities_for_service(namespace, service, label) + inactive_dc_names = [] + for deployment in inactive_dc: + inactive_dc_names.append(deployment.metadata.name) + return inactive_dc_names + + def get_inactive_deployment_entities_count_for_service(self, + namespace: str, + service: str, + label: str = 'clusterName') -> int: + deployment_configs = self.get_deployment_entities(namespace) + if not deployment_configs: + return 0 + return len(self.get_inactive_deployment_entities_for_service(namespace, service, label)) + + + def get_first_deployment_entity_name_for_service(self, + namespace: str, + service: str, + label: str = 'clusterName') -> str: + deployment_configs = self.get_deployment_entities(namespace) + for dc in deployment_configs: + if dc.spec.template.metadata.labels.get(label, '') == service: + return dc.metadata.name + return None + + def get_active_deployment_entities_for_service(self, + namespace: str, + service: str, + label: str = 'clusterName') -> list: + deployment_configs = self.get_deployment_entities(namespace) + active_dc = [] + + for dc in deployment_configs: + if dc.spec.template.metadata.labels.get(label, '') == service \ + and not dc.status.unavailableReplicas \ + and dc.status.availableReplicas != 0: + active_dc.append(dc) + return active_dc + + def get_active_deployment_entities_names_for_service(self, + namespace: str, + service: str, + label: str ='clusterName') -> list: + + active_dc = self.get_active_deployment_entities_for_service(namespace, service, label) + active_dc_names = [] + for deployment in active_dc: + active_dc_names.append(deployment.metadata.name) + return active_dc_names + + def get_active_deployment_entities_count_for_service(self, + namespace: str, + service: str, + label: str = 'clusterName') -> int: + deployment_configs = self.get_deployment_entities(namespace) + if not deployment_configs: + return 0 + return len(self.get_active_deployment_entities_for_service(namespace, service, label)) + + def get_deployment_entities_count_for_service(self, namespace: str, service: str, label: str = 'clusterName'): + return len(self.get_deployment_entity_names_for_service(namespace, service, label)) + + def set_replicas_for_deployment_entity(self, name: str, namespace: str, replicas: int = 1): + self._patch_scale_dict['spec']['replicas'] = replicas + deployment_configs = self.dyn_client.resources.get(api_version='apps.openshift.io/v1', kind='DeploymentConfig') + deployment_configs.patch(body=self._patch_scale_dict, name=name, namespace=namespace) + + def scale_up_deployment_entity(self, name: str, namespace: str): + deployment_configs = self.dyn_client.resources.get(api_version='apps.openshift.io/v1', kind='DeploymentConfig') + replicas = None + dc = deployment_configs.get(name, namespace) + if dc: + replicas = dc.spec.replicas + if replicas is None: + replicas = 1 + else: + replicas += 1 + self._patch_scale_dict['spec']['replicas'] = replicas + deployment_configs.patch(body=self._patch_scale_dict, name=name, namespace=namespace) + + def scale_down_deployment_entity(self, name: str, namespace: str): + deployment_configs = self.dyn_client.resources.get(api_version='apps.openshift.io/v1', kind='DeploymentConfig') + replicas = None + dc = deployment_configs.get(name, namespace) + if dc: + replicas = dc.spec.replicas + if replicas: + replicas -= 1 + self._patch_scale_dict['spec']['replicas'] = replicas + deployment_configs.patch(body=self._patch_scale_dict, name=name, namespace=namespace) + + def get_deployment_entity_pod_selector_labels(self, name: str, namespace: str) -> dict: + deployment_configs = self.dyn_client.resources.get(api_version='apps.openshift.io/v1', kind='DeploymentConfig') + return deployment_configs.get(name=name, namespace=namespace).spec.selector + + # TODO: check that body is dictionary + def patch_namespaced_deployment_entity(self, name: str, namespace: str, body): + deployment_configs = self.dyn_client.resources.get(api_version='apps.openshift.io/v1', kind='DeploymentConfig') + deployment_configs.patch(name=name, namespace=namespace, body=body) + + def get_deployment_entity_ready_replicas(self, deployment): + return deployment.status.readyReplicas + + def get_deployment_entity_unavailable_replicas(self, deployment): + return deployment.status.unavailableReplicas + + def create_deployment_entity(self, body, namespace: str): + return self.dyn_client.resources.get(api_version='apps.openshift.io/v1', kind='DeploymentConfig') \ + .create(body=body, namespace=namespace) + + def delete_deployment_entity(self, name: str, namespace: str): + return self.dyn_client.resources.get(api_version='apps.openshift.io/v1', kind='DeploymentConfig') \ + .delete(name=name, namespace=namespace) diff --git a/integration-tests-built-in-library/integration_library_builtIn/PlatformLibrary.py b/integration-tests-built-in-library/integration_library_builtIn/PlatformLibrary.py new file mode 100644 index 0000000..344c7a8 --- /dev/null +++ b/integration-tests-built-in-library/integration_library_builtIn/PlatformLibrary.py @@ -0,0 +1,1635 @@ +# Copyright 2024-2025 NetCracker Technology Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import time +import yaml +from typing import List + +import kubernetes +import urllib3 +from kubernetes import client +from kubernetes import config +from kubernetes.stream import stream +from openshift.dynamic import ResourceInstance +from deprecated import deprecated + +from KubernetesClient import KubernetesClient +from OpenShiftClient import OpenShiftClient + + +def get_kubernetes_api_client(config_file=None, context=None, persist_config=True): + try: + config.load_incluster_config() + return kubernetes.client.ApiClient() + except config.ConfigException: + return kubernetes.config.new_client_from_config(config_file=config_file, + context=context, + persist_config=persist_config) + + +class PlatformLibrary(object): + """This is a Robot Framework library to communicate with Kubernetes/OpenShift platform. + + = Table of contents = + + - `Usage` + - `Client Versions` + - `Examples` + - `Importing` + - `Shortcuts` + - `Keywords` + + = Usage = + + This library uses Kubernetes or OpenShift clients to communicate with Kubernetes/OpenShift API servers. + The OpenShift API extends a Kubernetes API. Since Custom Resource Definition feature is available for Kubernetes + (OpenShift 3.11) the OpenShift provides own custom entities such as `Deployment Config` or `Route` by CRD. + It is supposed that there are no custom OpenShift entities if your service is installed by Kubernetes Operator or + Helm chart directly. So in this case only Kubernetes entities are presented and therefore PlatformLibrary uses + Kubernetes client only. If your tested service uses custom OpenShift entities (for example, `Deployment Config`) and + is not installed by Helm chart (for example, by DVM) you can set parameter `managed_by_operator` to `false` (or do + nothing because this is default variable value) - PlatformLibrary will use OpenShift client for custom OpenShift + entities. + + = Client Versions = + + *Note!* `PlatformLibrary` uses particular versions of `kubernetes` and `openshift` python libraries which do not + guaranteed backward capabilities. This library is used with OpenShift 3.11 and higher or with Kubernetes 1.16 and + higher. + + = Examples = + + These are examples of keywords usages. + + | `Get Deployment Entities` | elasticsearch-cluster | + | `Get Deployment Entity Names For Service` | cassandra | cassandra-backup-daemon | label=name | + | `Get First Deployment Entity Name For Service` | postgres-service | monitoring-collector | label=app | + | `Get Active Deployment Entities Count For Service` | cassandra | cassandra-backup-daemon | label=name | + | `Get Deployment Entities Count For Service` | postgres-service | monitoring-collector | label=app | + | `Set Replicas For Deployment Entity` | monitoring-collector | postgres-service | + | `Scale Up Deployment Entity` | cassandra-backup-daemon | cassandra | + | `Scale Down Deployment Entity` | monitoring-collector | postgres-service | + + + """ + ROBOT_LIBRARY_VERSION = '0.0.1' + + def __init__(self, + managed_by_operator="false", + config_file=None, + context=None, + persist_config=True): + """A platform can be chosen between Kubernetes and OpenShift at library import time. + + Examples of `managed_by_operator` variable usage: + + | =Setting= | =Value= | =Value= | =Comment= | + | Library | PlatformLibrary | managed_by_operator="true" | Kubernetes client will be used | + | Library | PlatformLibrary | managed_by_operator="false" | OpenShift client will be used | + | Library | PlatformLibrary | | OpenShift client will be used | + + To login to Kubernetes/OpenShift API server the `PlatformLibrary` tries to use `in-cluster` config by default. + It means that the library tries to read Kubernetes service account token in the current Kubernetes pod with + trusted certs. If files do not exist default `kubeconfig` (`~/.kube/config`) setting is used. There is an + ability to use custom `kubeconfig` file to get Kubernetes/OpenShift token. + + Examples of custom Kubernetes config file usage: + | =Setting= | =Value= | =Value= | =Value= | =Value= | =Comment= | + | Library | PlatformLibrary | config_file=/mnt/kubeconfig/config | context=cluster/admin | persist_config=False | Custom config file will be used with predefined context - `cluster/admin`, GCP token will not be refreshed | + | Library | PlatformLibrary | config_file=/mnt/kubeconfig/config | context=cluster/admin | | Custom config file will be used with predefined context - `cluster/admin`, GCP token will be refreshed | + | Library | PlatformLibrary | config_file=/mnt/kubeconfig/config | | | Custom config file will be used with current context for provided file, GCP token will be refreshed | + | Library | PlatformLibrary | | | | Default config file will be used with current context for provided file, GCP token will be refreshed | + """ + urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning) + + self.k8s_api_client = get_kubernetes_api_client(config_file=config_file, + context=context, + persist_config=persist_config) + + if managed_by_operator == "true": + self.platform_client = KubernetesClient(self.k8s_api_client) + self.k8s_apps_v1_client = self.platform_client.k8s_apps_v1_client + else: + self.platform_client = OpenShiftClient(self.k8s_api_client) + self.k8s_apps_v1_client = client.AppsV1Api(self.k8s_api_client) + + self.k8s_core_v1_client = client.CoreV1Api(self.k8s_api_client) + self.custom_objects_api = client.CustomObjectsApi(self.k8s_api_client) + self.networking_api = client.NetworkingV1Api(self.k8s_api_client) + + def _get_resource(self, + api_version: str, + kind: str, + label_selector: str = None, + namespace: str = None, + **kwargs) -> List[ResourceInstance]: + """ + Returns specified resources by project/namespace. + If resource API or resource not found, returns empty list. + + :param api_version: OpenShift API group version. + :param kind: OpenShift API kind. + :param label_selector: Comma-separated labels selector in format key=value + :param namespace: Namespace name to find resources + """ + + return self.platform_client.dyn_client.resources.get(api_version=api_version, kind=kind) \ + .get(namespace=namespace, label_selector=label_selector).items + + def get_custom_resources(self, api_version: str, kind: str, namespace: str) -> List[ResourceInstance]: + """ + Returns custom resources by project/namespace. + :param api_version: ApiVersion to find resource + :param kind: Kind to find resource + :param namespace: namespace to find resource + + Example: + | Get Custom Resources | v1alpha1 | integreatly.org | prometheus-operator | + """ + return self._get_resource(api_version=api_version, kind=kind, namespace=namespace) + + def get_custom_resource(self, api_version: str, kind: str, namespace: str, name: str) -> ResourceInstance: + """ + Returns custom resource by name and project/namespace. + :param api_version: ApiVersion to find resource + :param kind: Kind to find resource + :param namespace: namespace to find resource + :param name: name to find resource + + Example: + | Get Custom Resource | v1alpha1 | integreatly.org | prometheus-operator | test_dashboard | + """ + ret = self.get_custom_resources(api_version, kind, namespace) + items = [item for item in ret if name == item.metadata.name] + return items[0] + + def get_ingress_api_version(self): + """ + Returns current api version of Ingress objects. + + Example: + | Get Ingress Api Version | + """ + return self.networking_api.get_api_resources().group_version + + def get_ingresses(self, namespace: str) -> List[ResourceInstance]: + """ + Returns list of Ingress objects in specified project/namespace. + + :param namespace: namespace to find ingresses + + Example: + | Get Ingresses | cassandra | + """ + api_version = self.get_ingress_api_version() + return self._get_resource(api_version=api_version, kind='Ingress', namespace=namespace) + + def get_ingress(self, name: str, namespace: str) -> ResourceInstance: + """ + Returns ingress by name in specified project/namespace. + + :param namespace: namespace to find ingress + :param name: name to find ingress + + Example: + | Get Ingress | cassandra-ingress | cassandra | + """ + ret = self.get_ingresses(namespace) + items = [item for item in ret if name == item.metadata.name] + return items[0] + + def get_ingress_url(self, name, namespace): + """Returns url of given Ingress in project/namespace. + + :param namespace: namespace to find ingress + :param name: name to find ingress + + Example: + | Get Ingress Url | cassandra-ingress | cassandra | + """ + ret = self.get_ingress(name, namespace) + return "http://" + ret.spec.rules[0].host + + def get_routes(self, namespace: str) -> List[ResourceInstance]: + """ + Returns list of routes in specified project/namespace. + + :param namespace: namespace to find routes + :return: a list of found routes in the namespace + + Example: + | Get Routes | cassandra | + """ + return self._get_resource(api_version='route.openshift.io/v1', kind='Route', namespace=namespace) + + def get_route(self, name: str, namespace: str) -> ResourceInstance: + """ + Returns route by name in specified project/namespace. + + :param namespace: namespace to find route + :param name: name to find route + :return: a found route in the namespace + + Example: + | Get Route | cassandra-route | cassandra | + """ + ret = self.get_routes(namespace) + items = [item for item in ret if name == item.metadata.name] + return items[0] + + def get_route_url(self, name, namespace): + """Gets url of given Route in project/namespace. + + :param namespace: namespace to find route + :param name: name to find route + + Example: + | Get Route Url | cassandra-route | cassandra | + """ + ret = self.get_route(name, namespace) + return "http://" + ret.spec.host + + def create_namespaced_custom_object(self, group, version, namespace, plural, body): + """Create Kubernetes custom object with body configuration as JSON object in project/namespace. + + :param group: the custom resource's group name + :param version: the custom resource's version + :param namespace: the custom resource's namespace + :param plural: the custom resource's plural name. For TPRs this would be lowercase plural kind. + :param body: the JSON schema of the Resource to create. + + Example: + | Create Namespaced Custom Object | integreatly.org | v1alpha1 | prometheus-operator | grafanadashboards | dashboard_body | + """ + return self.custom_objects_api.create_namespaced_custom_object(group, version, namespace, plural, body, + pretty='true') + + def get_namespaced_custom_object(self, group, version, namespace, plural, name): + """Returns existing Kubernetes custom object by provided group, version, plural, name in project/namespace. + + :param group: the custom resource's group name + :param version: the custom resource's version + :param namespace: the custom resource's namespace + :param plural: the custom resource's plural name. For TPRs this would be lowercase plural kind. + :param name: the name of custom object to return. + + Example: + | Get Namespaced Custom Object | integreatly.org | v1alpha1 | prometheus-operator | grafanadashboards | dashboard_vault | + """ + return self.custom_objects_api.get_namespaced_custom_object(group, version, namespace, plural, name) + + def get_namespaced_custom_object_status(self, group, version, namespace, plural, name): + """Returns status of existing Kubernetes custom object by provided group, version, plural, name in project/namespace. + + :param group: the custom resource's group name + :param version: the custom resource's version + :param namespace: the custom resource's namespace + :param plural: the custom resource's plural name. For TPRs this would be lowercase plural kind. + :param name: the name of custom object to return. + + Example: + | Get Namespaced Custom Object Status| integreatly.org | v1alpha1 | prometheus-operator | grafanadashboards | dashboard_vault | + """ + return self.custom_objects_api.get_namespaced_custom_object_status(group, version, namespace, plural, name) + + def replace_namespaced_custom_object(self, group, version, namespace, plural, name, body): + """Replaces Kubernetes custom object with body configuration as JSON object in project/namespace. + + :param group: the custom resource's group name + :param version: the custom resource's version + :param namespace: the custom resource's namespace + :param plural: the custom resource's plural name. For TPRs this would be lowercase plural kind. + :param name: the name of custom object to return. + :param body: the JSON schema of the Resource to create. + + Example: + | Replace Namespaced Custom Object | integreatly.org | v1alpha1 | prometheus-operator | grafanadashboards | test_dashboard | dashboard_body | + """ + return self.custom_objects_api.replace_namespaced_custom_object(group, version, namespace, plural, name, body) + + def patch_namespaced_custom_object(self, group, version, namespace, plural, name, body): + """Patches existing Kubernetes custom object by provided body in project/namespace. + + :param group: the custom resource's group name + :param version: the custom resource's version + :param namespace: the custom resource's namespace + :param plural: the custom resource's plural name. For TPRs this would be lowercase plural kind. + :param name: the name of custom object to update + :param body: the JSON schema of the Resource to update. + + Example: + | Patch Namespaced Custom Object | integreatly.org | v1alpha1 | prometheus-operator | grafanadashboards | dashboard_vault | body + """ + return self.custom_objects_api.patch_namespaced_custom_object(group, version, namespace, plural, name, body) + + def delete_namespaced_custom_object(self, group, version, namespace, plural, name): + """Delete existing custom object by provided group, api version, plural, name in project/namespace. + + :param group: the custom resource's group name + :param version: the custom resource's version + :param namespace: the custom resource's namespace + :param plural: the custom resource's plural name. For TPRs this would be lowercase plural kind. + :param name: the name of custom object to delete. + + Example: + | Delete Namespaced Custom Object | integreatly.org | v1alpha1 | prometheus-operator | grafanadashboards | dashboard_vault | + """ + return self.custom_objects_api.delete_namespaced_custom_object(group, version, namespace, plural, name) + + def get_daemon_sets(self, namespace: str) -> List[ResourceInstance]: + """ + Returns list of daemon sets in specified project/namespace. + + :param namespace: namespace to find daemon sets + :return: a list of found daemon sets in the namespace + + Example: + | Get Daemon Sets | prometheus-operator | + """ + return self._get_resource(api_version='apps/v1', kind='DaemonSet', namespace=namespace) + + def get_daemon_set(self, name: str, namespace: str) -> ResourceInstance: + """ + Returns daemon set by name in specified project/namespace. + + :param namespace: namespace to find daemon set + :param name: name to find daemon set + :return: a found daemon set in the namespace + + Example: + | Get Daemon Set | node-exporter | prometheus-operator | + """ + ret = self.get_daemon_sets(namespace) + items = [item for item in ret if name == item.metadata.name] + return items[0] + + def get_service(self, name: str, namespace: str): + """Returns Kubernetes `Service` configuration as JSON object. + + Method raises an Exception if `Service` or `namespace` is not found. + + Example: + | Get Service | cassandra-dc-dc1 | cassandra | + """ + return self.k8s_core_v1_client.read_namespaced_service(name, namespace) + + def create_service(self, body, namespace: str): + """Creates Kubernetes `Service` with body configuration as JSON object in specified project/namespace. + + Example: + | Create Service | | cassandra | + """ + return self.k8s_core_v1_client.create_namespaced_service(namespace=namespace, body=body) + + def create_service_from_file(self, file_path, namespace: str): + """Creates Kubernetes `Service` by specified file path in project/namespace. + The file must be in the yaml format. + + Example: + | Create Service From File | /service.yaml | cassandra | + """ + + body = self._parse_yaml_from_file(file_path) + return self.create_service(body=body, namespace=namespace) + + def delete_service(self, name: str, namespace: str): + """Deletes Kubernetes `Service` by specified name in project/namespace. + + Example: + | Delete Service | test-application | cassandra | + """ + + return self.k8s_core_v1_client.delete_namespaced_service(name=name, namespace=namespace) + + def get_service_selector(self, name: str, namespace: str) -> dict: + """Returns Kubernetes `Service` labels selector as dictionary. + This selector is used by service to look up relative Kubernetes `pods`. + + Method raises an Exception if `Service` or `namespace` is not found. + + Example: + | Get Service Selector | cassandra-dc-dc1 | cassandra | + """ + service = self.k8s_core_v1_client.read_namespaced_service(name, namespace) + return service.spec.selector + + def get_deployment_entity(self, name: str, namespace: str): + """Returns `deployment/deployment config` configuration. + Example: + | Get Deployment Entity | elasticsearch-1 | elasticsearch-service | + """ + return self.platform_client.get_deployment_entity(name, namespace) + + def get_deployment_entities(self, namespace: str) -> list: + """Returns list of `deployments` (Kubernetes client) or `deployment configs` (OpenShift client) + which belong to taken `namespace` / `project`. Each element of the list is `object` which describes + current `deployment` / `deployment config` configuration. + + Example: + + | Get Deployment Entities | kafka-cluster | + """ + return self.platform_client.get_deployment_entities(namespace) + + def get_deployment_entity_names_for_service(self, namespace: str, service: str, label: str = 'clusterName') -> list: + """Returns list of `deployments` (Kubernetes client) or `deployment configs` (OpenShift client) names. + Supposed that all deployment entities are matched on the particular Kubernetes service. This matching is + implemented by special `label` for deployment entity. The name of this `label` is specified by developer, + value is Kubernetes/OpenShift `service` name. The keyword finds all `deployment entity` names by + namespace/project and label (`label` argument is name of label, `service` argument is value). + + Examples: + | Get Deployment Entity Names For Service | kafka-cluster | kafka | + | Get Deployment Entity Names For Service | postgres-service | monitoring-collector | label=app | + """ + return self.platform_client.get_deployment_entity_names_for_service(namespace, service, label) + + def get_first_deployment_entity_name_for_service(self, + namespace: str, + service: str, + label: str = 'clusterName') -> str: + """Returns first found `deployment` (Kubernetes client) or `deployment config` (OpenShift client) name. + Supposed that all deployment entities are matched on the particular Kubernetes service. This matching is + implemented by special `label` for deployment entity. The name of this `label` is specified by developer, + value is Kubernetes/OpenShift `service` name. The keyword finds first `deployment entity` name by + namespace/project and label (`label` argument is name of label, `service` argument is the value of this label). + + Examples: + | Get First Deployment Entity Name For Service | kafka-cluster | kafka | + | Get First Deployment Entity Name For Service | postgres-service | monitoring-collector | label=app | + """ + return self.platform_client.get_first_deployment_entity_name_for_service(namespace, service, label) + + def get_inactive_deployment_entities_for_service(self, + namespace: str, + service: str, + label: str = 'clusterName') -> list: + """Returns list of inactive `deployments` (Kubernetes client) or `deployment configs` (OpenShift client). + Supposed that all deployment entities are matched on the particular Kubernetes service. This matching is + implemented by special `label` for deployment entity. The name of this `label` is specified by developer, + value is Kubernetes/OpenShift `service` name. The keyword finds all inactive (there are no available replicas) + `deployment entities` by namespace/project, label (`label` argument is name of label, `service` argument is the + value of this label) and returns its. + + Examples: + | Get Inactive Deployment Entities For Service | kafka-cluster | kafka | + | Get Inactive Deployment Entities For Service | postgres-service | monitoring-collector | label=app | + """ + return self.platform_client.get_inactive_deployment_entities_for_service(namespace, service, label) + + def get_inactive_deployment_entities_names_for_service(self, + namespace: str, + service: str, + label: str = 'clusterName') -> list: + """Returns list with names of inactive `deployments` (Kubernetes client) or `deployment configs` (OpenShift client). + Supposed that all deployment entities are matched on the particular Kubernetes service. This matching is + implemented by special `label` for deployment entity. The name of this `label` is specified by developer, + value is Kubernetes/OpenShift `service` name. The keyword finds all inactive (there are no available replicas) + `deployment entities` by namespace/project, label (`label` argument is name of label, `service` argument is the + value of this label) and returns its names. + + Examples: + | Get Inactive Deployment Entities Names For Service | kafka-cluster | kafka | + | Get Inactive Deployment Entities Names For Service | postgres-service | monitoring-collector | label=app | + """ + return self.platform_client.get_inactive_deployment_entities_names_for_service(namespace, service, label) + + def get_inactive_deployment_entities_count_for_service(self, + namespace: str, + service: str, + label: str = 'clusterName') -> int: + """Returns number of inactive `deployments` (Kubernetes client) or `deployment configs` (OpenShift client). + Supposed that all deployment entities are matched on the particular Kubernetes service. This matching is + implemented by special `label` for deployment entity. The name of this `label` is specified by developer, + value is Kubernetes/OpenShift `service` name. The keyword finds all inactive (there are no unavailable replicas) + `deployment entities` by namespace/project, label (`label` argument is name of label, `service` argument is the + value of this label) and returns its count. + + Examples: + | Get Inactive Deployment Entities Count For Service | kafka-cluster | kafka | + | Get Inactive Deployment Entities Count For Service | postgres-service | monitoring-collector | label=app | + """ + return self.platform_client.get_inactive_deployment_entities_count_for_service(namespace, service, label) + + def get_inactive_deployment_entities_count(self, deployment_entity_names: list, namespace: str) -> int: + """Returns number of inactive `deployments/deployment configs`. `Deployment entity` is inactive if it has no + replicas. Deployment entities are found by name and project/namespace. + + Example: + | Get Inactive Deployment Entities Count | | cassandra | + """ + counter = 0 + for deployment_entity_name in deployment_entity_names: + deployment_entity = self.get_deployment_entity(deployment_entity_name, namespace) + if not deployment_entity.status.replicas: + counter += 1 + return counter + + def get_active_deployment_entities_for_service(self, + namespace: str, + service: str, + label: str = 'clusterName') -> list: + """Returns list of active `deployments` (Kubernetes client) or `deployment configs` (OpenShift client). + Supposed that all deployment entities are matched on the particular Kubernetes service. This matching is + implemented by special `label` for deployment entity. The name of this `label` is specified by developer, + value is Kubernetes/OpenShift `service` name. The keyword finds all active (there are no unavailable replicas) + `deployment entities` by namespace/project, label (`label` argument is name of label, `service` argument is the + value of this label) and returns its. + + Examples: + | Get Active Deployment Entities For Service | kafka-cluster | kafka | + | Get Active Deployment Entities For Service | postgres-service | monitoring-collector | label=app | + """ + return self.platform_client.get_active_deployment_entities_for_service(namespace, service, label) + + def get_active_deployment_entities_names_for_service(self, + namespace: str, + service: str, + label: str = 'clusterName') -> list: + """Returns list with names of active `deployments` (Kubernetes client) or `deployment configs` (OpenShift client). + Supposed that all deployment entities are matched on the particular Kubernetes service. This matching is + implemented by special `label` for deployment entity. The name of this `label` is specified by developer, + value is Kubernetes/OpenShift `service` name. The keyword finds all active (there are no unavailable replicas) + `deployment entities` by namespace/project, label (`label` argument is name of label, `service` argument is the + value of this label) and returns its names. + + Examples: + | Get Active Deployment Entities Names For Service | kafka-cluster | kafka | + | Get Active Deployment Entities Names For Service | postgres-service | monitoring-collector | label=app | + """ + return self.platform_client.get_active_deployment_entities_names_for_service(namespace, service, label) + + def get_active_deployment_entities_count_for_service(self, + namespace: str, + service: str, + label: str = 'clusterName') -> int: + """Returns number of active `deployments` (Kubernetes client) or `deployment configs` (OpenShift client). + Supposed that all deployment entities are matched on the particular Kubernetes service. This matching is + implemented by special `label` for deployment entity. The name of this `label` is specified by developer, + value is Kubernetes/OpenShift `service` name. The keyword finds all active (there are no unavailable replicas) + `deployment entities` by namespace/project, label (`label` argument is name of label, `service` argument is the + value of this label) and returns its count. + + Examples: + | Get Active Deployment Entities Count For Service | kafka-cluster | kafka | + | Get Active Deployment Entities Count For Service | postgres-service | monitoring-collector | label=app | + """ + return self.platform_client.get_active_deployment_entities_count_for_service(namespace, service, label) + + def get_active_deployment_entities_count(self, deployment_entity_names: list, namespace: str) -> int: + """Returns number of active `deployments/deployment configs`. `Deployment entity` is active if it has no + replicas in unavailable status. Deployment entities are found by name and project/namespace. + + Example: + | Get Active Deployment Entities Count | | cassandra | + """ + counter = 0 + for deployment_entity_name in deployment_entity_names: + deployment_entity = self.get_deployment_entity(deployment_entity_name, namespace) + if not self.platform_client.get_deployment_entity_unavailable_replicas(deployment_entity) \ + and deployment_entity.status.replicas: + counter += 1 + return counter + + def create_deployment_entity(self, body, namespace: str): + + """Creates `deployment` (Kubernetes client) or `deployment config` (OpenShift client) with body configuration + as JSON object in specified project/namespace. + + Examples: + | Create Deployment Entity | | elasticsearch-service | + """ + return self.platform_client.create_deployment_entity(body=body, namespace=namespace) + + def create_deployment_entity_from_file(self, file_path, namespace: str): + + """Creates `deployment` (Kubernetes client) or `deployment config` (OpenShift client) by specified + file path in project/namespace. The file must be in the yaml format. + + Examples: + | Create Deployment Entity From File | /deployment.yaml | elasticsearch-service | + """ + body = self._parse_yaml_from_file(file_path) + + return self.create_deployment_entity(body=body, namespace=namespace) + + def delete_deployment_entity(self, name: str, namespace: str): + + """Deletes `deployment` (Kubernetes client) or `deployment config` (OpenShift client) by specified + name in project/namespace. + + Examples: + | Delete Deployment Entity | test-application | prometheus-operator | + """ + return self.platform_client.delete_deployment_entity(name=name, namespace=namespace) + + def check_service_is_scaled(self, deployment_entity_names, namespace: str, direction="up", + timeout=300) -> bool: + """Returns `True` if all given `deployments/deployment configs` are scaled. They can be scaled "up" or "down". + `direction` variable defines direction of scale and should be set without quote symbols. "down" direction means + that all given deployment entities have no replicas. "up" direction means that all given deployment entities have + only "ready" replicas. Deployment entities are found by name and project/namespace. Deployment entities names can + be passed as a list or string value. In case of string value type use a space separator. + `timeout` variable (in seconds) is used to cancel waiting circle. + Method raises an Exception if `direction` variable is not "up" or "down" or it passed with quote symbols. + + Examples: + | Check Service Is Scaled | | elasticsearch-service | direction=down | timeout=450 | + | Check Service Is Scaled | | elasticsearch-service | direction=down | + | Check Service Is Scaled | deployment_entity_name | elasticsearch-service | + """ + direction = direction.lower() + + if direction in ('"up"', '"down"', "'up'", "'down'"): + raise Exception(f'set direction parameter (up or down) without quote symbols ""') + elif direction not in ("up", "down"): + raise Exception(f'direction argument should be "up" or "down" but {direction} is given') + + if isinstance(deployment_entity_names, str): + deployment_entity_names = list(deployment_entity_names.split(' ')) + + timeout_start = time.time() + check_func = self.get_active_deployment_entities_count \ + if direction == "up" else self.get_inactive_deployment_entities_count + + while time.time() <= timeout_start + timeout: + if len(deployment_entity_names) == check_func(deployment_entity_names, namespace): + return True + time.sleep(5) + + return False + + def scale_down_deployment_entities_by_service_name(self, + service_name: str, + namespace: str, + with_check: bool = False, + timeout: int = 300): + """Scales down (set 0 replicas) all `deployments/deployment configs` which manage the same `Pods` as given + `Service`. Deployment entities are found by Service name and project/namespace. + `with_check` is used to wait till all Deployment entities will have no replicas. + if `with_check=True` `timeout` variable (in seconds) is used and specifies timeout for checker. + + Examples: + | Scale Down Deployment Entities By Service Name | elasticsearch | elasticsearch-service | with_check=True | timeout=450 | + | Scale Down Deployment Entities By Service Name | elasticsearch | elasticsearch-service | with_check=True | + | Scale Down Deployment Entities By Service Name | elasticsearch | elasticsearch-service | + """ + deployment_entity_names = self.get_deployment_entity_names_by_service_name(service_name, namespace) + for deployment_entity_name in deployment_entity_names: + self.set_replicas_for_deployment_entity(deployment_entity_name, namespace, replicas=0) + if with_check: + self.check_service_is_scaled(deployment_entity_names, namespace, direction="down", timeout=timeout) + + def scale_up_deployment_entities_by_service_name(self, + service_name: str, + namespace: str, + with_check: bool = False, + timeout: int = 300, + **kwargs): + """Scales up `deployments/deployment configs` which manage the same `Pods` as given `Service`. + If `replicas` parameter is presented method set it value to all found Deployment Entities as replicas value. + If this parameter is not presented for all found Deployment Entities number of replicas will be increase on one. + Actually, `replicas=0` can be used to scale down found `Deployment Entities` but for this purpose it is + recommended to use the `Scale Down Deployment Entities By Service Name` method. + Deployment Entities are found by Service name and project/namespace. + `with_check` is used to wait till all Deployment Entities will have no replicas in not "ready" status. + if `with_check=True` `timeout` variable (in seconds) is used and specifies timeout for checker. + + Examples: + | Scale Up Deployment Entities By Service Name | elasticsearch | elasticsearch-service | replicas=2 | with_check=True | timeout=450 | + | Scale Up Deployment Entities By Service Name | elasticsearch | elasticsearch-service | with_check=True | timeout=450 | + | Scale Up Deployment Entities By Service Name | elasticsearch | elasticsearch-service | with_check=True | + | Scale Up Deployment Entities By Service Name | elasticsearch | elasticsearch-service | + """ + replicas = kwargs.get('replicas', None) + if replicas is not None: + replicas = int(replicas) + deployment_entity_names = self.get_deployment_entity_names_by_service_name(service_name, namespace) + for deployment_entity_name in deployment_entity_names: + if replicas is not None: + self.set_replicas_for_deployment_entity(deployment_entity_name, namespace, replicas=replicas) + else: + self.scale_up_deployment_entity(deployment_entity_name, namespace) + if with_check: + direction = "down" if replicas == 0 else "up" + self.check_service_is_scaled(deployment_entity_names, namespace, direction=direction, timeout=timeout) + + def get_deployment_entities_count_for_service(self, namespace: str, service: str, label: str = 'clusterName'): + """Returns number of `deployments` (Kubernetes client) or `deployment configs` (OpenShift client). + Supposed that all deployment entities are matched on the particular Kubernetes service. This matching is + implemented by special `label` for deployment entity. The name of this `label` is specified by developer, + value is Kubernetes/OpenShift `service` name. The keyword finds all `deployment entities` by namespace/project, + label (`label` argument is name of label, `service` argument is value) and returns its count. + + Examples: + | Get Deployment Entities Count For Service | kafka-cluster | kafka | + | Get Deployment Entities Count For Service | postgres-service | monitoring-collector | label=app | + """ + return self.platform_client.get_deployment_entities_count_for_service(namespace, service, label) + + def set_replicas_for_deployment_entity(self, name: str, namespace: str, replicas: int = 1): + """Sets number of replicas for found `deployment` (Kubernetes client) or `deployment config` (OpenShift client). + Pair of `name` and `namespace/project` specifies unique deployment entity. Method finds deployment entity by + name, namespace/project and vertical scales it (patches deployment entity and set given number of replicas). + For each additional replica new Kubernetes pod will be created. + + Example: + | Set Replicas For Deployment Entity | cassandra-backup-daemon | cassandra | replicas=3 | + | Set Replicas For Deployment Entity | monitoring-collector | postgres-service | + """ + self.platform_client.set_replicas_for_deployment_entity(name, namespace, replicas) + + def scale_up_deployment_entity(self, name: str, namespace: str): + """Increases by one number of replicas for found `deployment` (Kubernetes client) or `deployment config` + (OpenShift client). Pair of `name` and `namespace/project` specifies unique deployment entity. Method finds + deployment entity by name, namespace/project, recognizes current number of replicas and vertical scales it + (patches deployment entity and increases number of replicas by one - new Kubernetes pod will be created). + + Example: + | Scale Up Deployment Entity | elasticsearch-1 | elasticsearch-cluster | + """ + self.platform_client.scale_up_deployment_entity(name, namespace) + + def scale_down_deployment_entity(self, name: str, namespace: str): + """Decreases by one number of replicas for found `deployment` (Kubernetes client) or `deployment config` + (OpenShift client). Pair of `name` and `namespace/project` specifies unique deployment entity. Method finds + deployment entity by name, namespace/project, recognizes current number of replicas and vertical scales it + (patches deployment entity and decreases number of replicas by one - some Kubernetes pod will be removed). + + Example: + | Scale Down Deployment Entity | elasticsearch-1 | elasticsearch-cluster | + """ + self.platform_client.scale_down_deployment_entity(name, namespace) + + def get_deployment_entity_pod_selector_labels(self, name: str, namespace: str) -> dict: + """Returns a dictionary of matched labels for `deployment`/`deployment config`. + This matched labels are used by `deployment`/`deployment config` to watch for it pods. + Deployment entity name plus namespace/project defines unique deployment entity. + Method finds deployment entity and returns selector labels for it. + + Example: + | Get Deployment Entity Pod Selector Labels | elasticsearch-1 | elasticsearch-cluster | + """ + return self.platform_client.get_deployment_entity_pod_selector_labels(name, namespace) + + def get_deployment_entity_names_by_selector(self, namespace: str, selector: dict) -> list: + """Returns list of `deployment`/`deployment config` names by given selector. + Method finds deployment entity by it own `pod` selector. The deployment entity looks up + relative pods by own label selector. If this label selector contains all labels from `selector` variable + deployment entity is added to the result list. + + Example: + | Get Deployment Entity Names By Selector | elasticsearch-service | | + """ + deployment_entities = self.get_deployment_entities(namespace) + return [deployment_entity.metadata.name for deployment_entity in deployment_entities + if self._do_labels_satisfy_selector(deployment_entity.spec.template.metadata.labels, selector)] + + def get_deployment_entity_names_by_service_name(self, service_name: str, namespace: str) -> list: + """Returns list of `deployment`/`deployment config` names by given Kubernetes service name and `project`/`namespace`. + There is no direct mapping between `deployment entity` and `service`. Supposed that deployment entity watches the + same kubernetes `pods` as Kubernetes service. So the `deployment entity` matches to the `service` by the + transitivity property. + + Method raises an Exception if `Service` or `namespace` is not found. + + Example: + | Get Deployment Entity Names By Service Name | monitoring-collector | postgres-service | + """ + selector = self.get_service_selector(service_name, namespace) + return self.get_deployment_entity_names_by_selector(namespace, selector) + + def get_pod_names_for_deployment_entity(self, deployment_entity_name: str, namespace: str) -> list: + """Returns a list of Kubernetes pod names which will be found by `deployment entity` name and namespace/project + + Example: + | Get Pod Names For Deployment Entity | elasticsearch-1 | elasticsearch-cluster | + """ + matched_labels = self.get_deployment_entity_pod_selector_labels(deployment_entity_name, namespace) + pods = self.get_pods(namespace) + if not pods or not matched_labels: + return [] + return [pod.metadata.name for pod in pods + if self._do_labels_satisfy_selector(pod.metadata.labels, matched_labels)] + + @staticmethod + def _do_labels_satisfy_selector(labels: dict, selector: dict): + selector_pairs = list(selector.items()) + label_pairs = list(labels.items()) + if len(selector_pairs) > len(label_pairs): + return False + for pair in selector_pairs: + if pair not in label_pairs: + return False + return True + + @staticmethod + def _parse_yaml_from_file(file_path): + return yaml.safe_load(open(file_path)) + + def patch_namespaced_deployment_entity(self, name: str, namespace: str, body): + """Patches `deployment/deployment config` by new body. + Deployment entity is found by name and namespace/project. + `body` is a part of deployment entity spec which should be patched. + + Method raises an Exception if deployment entity is not found. + + Example: + | Patch Namespaced Deployment Entity | elasticsearch-1 | elasticsearch-cluster | | + """ + self.platform_client.patch_namespaced_deployment_entity(name, namespace, body) + + def get_environment_variables_for_deployment_entity_container(self, + name: str, + namespace: str, + container_name: str, + variable_names: list) -> dict: + """Returns a dictionary of `deployment/deployment config` environment variables (key-values) for container. + Deployment entity is found by name and namespace/project. + `container_name` specifies name of docker container associated with environment variables (parameter is + required). + `variable_names` parameter specifies environment variable names for environment variables which should be + returned as dictionary. + + Method raises an Exception if deployment entity is not found. + + Example: + | Get Environment Variables For Deployment Entity Container | elasticsearch-0 | elasticsearch-service | elasticsearch | | + """ + entity = self.get_deployment_entity(name, namespace) + return self._get_environment_variables_for_container(entity, container_name, variable_names) + + def set_environment_variables_for_deployment_entity_container(self, + name: str, + namespace: str, + container_name: str, + variables_to_change: dict): + """Changes values for given environment variables per `deployment/deployment config` container. + Deployment entity is found by name and namespace/project. + `container_name` specifies name of docker container associated with environment variables (parameter is + required). + `variables_to_change` parameter specifies a dictionary of variables to update. If container environment variable + names do not contain any `update_variables` dictionary key then this `update_variables` dictionary pair is + redundant and will be ignored. + + Method raises an Exception if deployment entity is not found. + + Example: + | Set Environment Variables For Deployment Entity Container | elasticsearch-1 | elasticsearch-cluster | elasticsearch | | + """ + entity = self.get_deployment_entity(name, namespace) + self._prepare_entity_with_environment_variables_for_container(entity, container_name, variables_to_change) + self.patch_namespaced_deployment_entity(name, namespace, entity) + + def _get_environment_variables_for_container(self, + entity, + container_name: str, + variable_names: list) -> dict: + environments = self._get_environments_for_container(entity.spec.template.spec.containers, container_name) + return self._get_env_variables(environments, variable_names) + + def _prepare_entity_with_environment_variables_for_container(self, + entity, + container_name: str, + variables_to_update: dict): + environments = self._get_environments_for_container(entity.spec.template.spec.containers, container_name) + + def set_new_variables(dicts: list, params: dict): + for dictionary in dicts: + if dictionary.name in params.keys(): + dictionary.value = params[dictionary.name] + + set_new_variables(environments, variables_to_update) + + @staticmethod + def _get_environments_for_container(containers, container_name): + environments = None + for container in containers: + if container.name == container_name: + environments = container.env + return environments + + @staticmethod + def _get_env_variables(dicts: list, params: list, ignore_reference=True): + if not dicts: + return None + result = {} + for dictionary in dicts: + if dictionary.name in params: + if not ignore_reference: + result[dictionary.name] = dictionary.value if dictionary.value is not None else "" + elif dictionary.value is not None: + result[dictionary.name] = dictionary.value + return result + + def get_stateful_set(self, name: str, namespace: str): + """Returns `Stateful Set` configuration + `Stateful Set` is found by name and namespace/project. + + Method raises an Exception if Stateful Set is not found. + + Example: + | Get Stateful Set | cassandra0 | cassandra | + """ + return self.k8s_apps_v1_client.read_namespaced_stateful_set(name, namespace) + + def get_stateful_sets(self, namespace: str) -> list: + """Returns list of `Stateful Sets` by namespace/project. + + Example: + | Get Stateful Sets | cassandra | + """ + return self.k8s_apps_v1_client.list_namespaced_stateful_set(namespace).items + + def get_stateful_set_names_by_label(self, namespace: str, label_value: str, label_name: str = 'service') -> list: + """Returns list of `Stateful Set` names by project/namespace and the particular label. + `Label` is a "key-value" pair where key is `label_name` variable and value is `label_value`. If a Stateful Set + contains this `label` its name is added to the result list. + + Example: + | Get Stateful Set Names By Label | cassandra | cassandra-dc-dc2 | label_name=app | + | Get Stateful Set Names By Label | cassandra | cassandra-cluster | + """ + stateful_sets = self.get_stateful_sets(namespace) + return [stateful_set.metadata.name for stateful_set in stateful_sets + if stateful_set.metadata.labels.get(label_name, "") == label_value] + + @deprecated(reason="Use get_stateful_set_replicas_count") + def get_stateful_set_replica_counts(self, name: str, namespace: str) -> int: + """Returns replicas number for the particular `Stateful Set`. + The `Stateful Set` is found by its `name` and project/namespace. Number of replicas is number of Kubernetes + `pods` which current Stateful Set should create and manage. Actually some pods may be not in "running" status. + + Method raises an Exception if `Stateful Set` or `namespace` is not found. + + Example: + | Get Stateful Set Replica Counts | cassandra1 | cassandra | + """ + return self.get_stateful_set_replicas_count(name, namespace) + + def get_stateful_set_replicas_count(self, name: str, namespace: str) -> int: + """Returns replicas number for the particular `Stateful Set`. + The `Stateful Set` is found by its `name` and project/namespace. Number of replicas is number of Kubernetes + `pods` which current Stateful Set should create and manage. Actually some pods may be not in "running" status. + + Method raises an Exception if `Stateful Set` or `namespace` is not found. + + Example: + | Get Stateful Set Replicas Count | cassandra1 | cassandra | + """ + stateful_set = self.k8s_apps_v1_client.read_namespaced_stateful_set(name, namespace) + return stateful_set.spec.replicas + + def get_stateful_set_ready_replicas_count(self, name: str, namespace: str) -> int: + """Returns ready replicas number for the particular `Stateful Set`. + The `Stateful Set` is found by its `name` and project/namespace. Number of replicas is number of Kubernetes + `pods` which current Stateful Set should create and manage. Actually some pods may be not in "running" status. + + Method raises an Exception if `Stateful Set` or `namespace` is not found. + + Example: + | Get Stateful Set Ready Replicas Count | cassandra1 | cassandra | + """ + stateful_set = self.k8s_apps_v1_client.read_namespaced_stateful_set(name, namespace) + return stateful_set.status.ready_replicas + + @deprecated(reason="Use get_active_stateful_sets_count") + def get_active_stateful_sets_counts(self, namespace: str, selector: dict) -> int: + """Returns number of active `Stateful Sets`. + `Stateful Sets` are found by project/namespace and given label selector. `selector` is a dictionary which should + be contained by the particular Stateful Set labels. Method recognizes a Stateful Set as active if all its + replicas are active. It means that all relative `pods` are in "running" status. + + Example: + | Get Active Stateful Sets Counts | postgres-service | | + """ + stateful_sets = self.get_stateful_sets(namespace) + count = 0 + for stateful_set in stateful_sets: + if self._do_labels_satisfy_selector(stateful_set.metadata.labels, selector): + count += stateful_set.status.ready_replicas == stateful_set.status.replicas + return count + + def get_active_stateful_sets_count(self, stateful_set_names: list, namespace: str) -> int: + """Returns number of active Stateful Sets. `Stateful set` is active if all of it replicas are in "ready" status. + Stateful Sets are found by name and project/namespace. + + Example: + | Get Active Stateful Sets Count | | cassandra | + """ + counter = 0 + for stateful_set_name in stateful_set_names: + stateful_set = self.get_stateful_set(stateful_set_name, namespace) + if stateful_set.status.replicas == stateful_set.status.ready_replicas: + counter += 1 + return counter + + @deprecated(reason="Use get_inactive_stateful_sets_count") + def get_inactive_stateful_set_count(self, stateful_set_names: list, namespace: str) -> int: + """Returns number of inactive Stateful Sets. `Stateful set` is inactive if it has no replicas or has replica in + not "ready" status. Stateful Sets are found by name and project/namespace. + + Example: + | Get Inactive Stateful Set Count | | cassandra | + """ + return self.get_inactive_stateful_sets_count(stateful_set_names, namespace) + + def get_inactive_stateful_sets_count(self, stateful_set_names: list, namespace: str) -> int: + """Returns number of inactive Stateful Sets. `Stateful set` is inactive if it has no replicas or has replica in + not "ready" status. Stateful Sets are found by name and project/namespace. + + Example: + | Get Inactive Stateful Sets Count | | cassandra | + """ + counter = 0 + for stateful_set_name in stateful_set_names: + stateful_set = self.get_stateful_set(stateful_set_name, namespace) + if not stateful_set.status.replicas or stateful_set.status.replicas != stateful_set.status.ready_replicas: + counter += 1 + return counter + + # TODO: refactor this method with the same one for deployment entities + def check_service_of_stateful_sets_is_scaled(self, stateful_set_names, namespace: str, direction="up", + timeout=300) -> bool: + """Returns `True` if all given Stateful Sets are scaled. They can be scaled "up" or "down". + `direction` variable defines direction of scale and should be set without quote symbols. "down" direction means + that all given Stateful Sets have no replicas. "up" direction means that all given Stateful Sets have only + "ready" replicas. Stateful Sets are found by name and project/namespace. Stateful Sets names can be passed as a + list or string value. In case of string value type use a space separator. `timeout` variable (in seconds) is used + to cancel waiting circle. + Method raises an Exception if `direction` variable is not "up" or "down" or it passed with quote symbols. + + Examples: + | Check Service Of Stateful Sets Is Scaled | | cassandra | direction=down | timeout=450 | + | Check Service Of Stateful Sets Is Scaled | | cassandra | direction=up | + | Check Service Of Stateful Sets Is Scaled | stateful_set_name | cassandra | + """ + + direction = direction.lower() + if direction in ('"up"', '"down"', "'up'", "'down'"): + raise Exception(f'set direction parameter (up or down) without quote symbols ""') + elif direction not in ("up", "down"): + raise Exception(f'direction argument should be "up" or "down" but {direction} is given') + + if isinstance(stateful_set_names, str): + stateful_set_names = list(stateful_set_names.split(' ')) + + timeout_start = time.time() + check_func = self.get_active_stateful_sets_count \ + if direction == "up" else self.get_inactive_stateful_set_count + while True: + if len(stateful_set_names) == check_func(stateful_set_names, namespace): + return True + if time.time() > timeout_start + timeout: + return False + time.sleep(5) + + def scale_down_stateful_sets_by_service_name(self, + service_name: str, + namespace: str, + with_check: bool = False, + timeout: int = 300): + """Scales down (set 0 replicas) all `Stateful Sets` which manage the same `Pods` as given `Service`. + Stateful Sets are found by Service name and project/namespace. + `with_check` is used to wait till all Stateful Sets will have no replicas. + `timeout` variable (in seconds) is used only if `with_check=True` and specifies timeout for checker. + + Example: + | Scale Down Stateful Sets By Service Name | cassandra | cassandra | with_check=True | timeout=450 | + | Scale Down Stateful Sets By Service Name | cassandra | cassandra | with_check=True | + | Scale Down Stateful Sets By Service Name | cassandra | cassandra | + """ + stateful_set_names = self.get_stateful_set_names_by_service_name(service_name, namespace) + for stateful_set_name in stateful_set_names: + self.set_replicas_for_stateful_set(stateful_set_name, namespace, replicas=0) + if with_check: + self.check_service_of_stateful_sets_is_scaled(stateful_set_names, namespace, direction="down", + timeout=timeout) + + # TODO: Add an ability to do it with ordering + def scale_up_stateful_sets_by_service_name(self, + service_name: str, + namespace: str, + with_check: bool = False, + timeout: int = 300, + **kwargs): + """Scales up `Stateful Sets` which manage the same `Pods` as given `Service`. + If `replicas` parameter is presented method set it value to all found Stateful Sets as replicas value. If this + parameter is not presented for all found Stateful Sets number of replicas will be increase on one. Actually, + `replicas=0` can be used to scale down found `Stateful Sets` but `Scale Down Stateful Sets By Service Name` + method recommended for this purpose. + Stateful Sets are found by Service name and project/namespace. + `with_check` is used to wait till all Stateful Sets will have no replicas in not "ready" status. + `timeout` variable (in seconds) is used only if `with_check=True` and specifies timeout for checker. + + Examples: + | Scale Up Stateful Sets By Service Name | cassandra | cassandra | replicas=2 | with_check=True | timeout=250 | + | Scale Up Stateful Sets By Service Name | cassandra | cassandra | with_check=True | timeout=250 | + | Scale Up Stateful Sets By Service Name | cassandra | cassandra | with_check=True | + | Scale Up Stateful Sets By Service Name | cassandra | cassandra | + """ + replicas = kwargs.get('replicas', None) + if replicas is not None: + replicas = int(replicas) + stateful_set_names = self.get_stateful_set_names_by_service_name(service_name, namespace) + for stateful_set_name in stateful_set_names: + if replicas is not None: + self.set_replicas_for_stateful_set(stateful_set_name, namespace, replicas=replicas) + else: + self.scale_up_stateful_set(stateful_set_name, namespace) + if with_check: + direction = "down" if replicas is not None and replicas == 0 else "up" + self.check_service_of_stateful_sets_is_scaled(stateful_set_names, namespace, direction=direction, + timeout=timeout) + + def get_stateful_set_pod_selector(self, name: str, namespace: str) -> dict: + """Returns a Stateful Set labels selector as dictionary. + Stateful Set labels selector is dictionary of labels which are used to look up relative Kubernetes `pods` + for the Stateful Set. The Stateful Set is found by name and project/namespace. + + Method raises an Exception if `Stateful Set` or `namespace` is not found. + + Example: + | Get Stateful Set Pod Selector | cassandra0 | cassandra | + """ + stateful_set = self.k8s_apps_v1_client.read_namespaced_stateful_set(name, namespace) + return stateful_set.spec.selector.match_labels + + def get_stateful_set_names_by_selector(self, namespace: str, selector: dict) -> list: + """Returns list of Kubernetes Stateful Sets by given labels selector. + Method finds `Stateful Sets` by it own `pod` selector. The stateful set looks up + relative pods by own label selector. If this label selector contains all labels from `selector` variable + Stateful Set is added to the result list. + + Example: + | Get Stateful Set Names By Selector | cassandra | | + """ + stateful_sets = self.get_stateful_sets(namespace) + return [stateful_set.metadata.name for stateful_set in stateful_sets + if self._do_labels_satisfy_selector(stateful_set.spec.template.metadata.labels, selector)] + + def get_stateful_set_names_by_service_name(self, service_name: str, namespace: str) -> list: + """Returns list of `Stateful Set` names by given Kubernetes service name and `project`/`namespace`. + There is no direct mapping between `Stateful Set` and `service`. Supposed that Stateful Set watches the + same Kubernetes `pods` as Kubernetes service. So the `Stateful Set` matches to the `service` by the + transitivity property. + + Method raises an Exception if `Service` or `namespace` is not found. + + Example: + | Get Stateful Set Names By Service Name | cassandra-dc-dc1 | cassandra | + """ + selector = self.get_service_selector(service_name, namespace) + return self.get_stateful_set_names_by_selector(namespace, selector) + + def set_replicas_for_stateful_set(self, name: str, namespace: str, replicas: int = 1): + """Sets predefined number of replicas for found Stateful Set. + Method looks up a Stateful Set by name and project/namespace and set number of replicas for it. + It means that the Stateful Set should create and manage given number of Kubernetes `pods`. + + Method raises an Exception if `Stateful Set` or `namespace` is not found. + + Examples: + | Set Replicas For Stateful Set | cassandra-dc-dc1 | cassandra | replicas=2 | + | Set Replicas For Stateful Set | cassandra-dc-dc1 | cassandra | + """ + scale = self.k8s_apps_v1_client.read_namespaced_stateful_set_scale(name, namespace) + scale.spec.replicas = replicas + self.k8s_apps_v1_client.patch_namespaced_stateful_set(name, namespace, scale) + + def scale_up_stateful_set(self, name: str, namespace: str): + """Increases by one number of replicas for found `Stateful Set`. + Pair of `name` and `namespace/project` specifies unique `Stateful Set`. Method finds + `Stateful Set` by name, namespace/project, recognizes current number of replicas and scales it + (patches `Stateful Set` and increases number of replicas by one - new Kubernetes `pod` will be created). + + Method raises an Exception if `Stateful Set` or `namespace` is not found. + + Example: + | Scale Up Stateful Set | cassandra0 | cassandra | + """ + scale = self.k8s_apps_v1_client.read_namespaced_stateful_set_scale(name, namespace) + if scale.spec.replicas is None: + scale.spec.replicas = 1 + else: + scale.spec.replicas += 1 + self.k8s_apps_v1_client.patch_namespaced_stateful_set(name, namespace, scale) + + def scale_down_stateful_set(self, name: str, namespace: str): + """Decreases by one number of replicas for found `Stateful Set`. + Pair of `name` and `namespace/project` specifies unique `Stateful Set`. Method finds + `Stateful Set` by name, namespace/project, recognizes current number of replicas and scales it + (patches `Stateful Set` and decreases number of replicas by one - some Kubernetes `pod` will be removed). + + Method raises an Exception if `Stateful Set` or `namespace` is not found. + + Example: + | Scale Down Stateful Set | cassandra0 | cassandra | + """ + scale = self.k8s_apps_v1_client.read_namespaced_stateful_set_scale(name, namespace) + if not scale.spec.replicas: + scale.spec.replicas = 0 + else: + scale.spec.replicas -= 1 + self.k8s_apps_v1_client.patch_namespaced_stateful_set(name, namespace, scale) + + def get_pod_names_for_stateful_set(self, name: str, namespace: str) -> list: + """Returns expected pod names for a `Stateful Set`. + Supposed that a `Stateful Set` manages N replicas (Kubernetes pods) which have pattern naming: + -0, -1, ..., -N-1. + This behavior is default for Kubernetes `Stateful Set` but can be changed. + + Method raises an Exception if `Stateful Set` or `namespace` is not found. + + Example: + | Get Pod Names For Stateful Set | cassandra0 | cassandra | + """ + stateful_set = self.k8s_apps_v1_client.read_namespaced_stateful_set(name, namespace) + return [f'{name}-{number}' for number in range(stateful_set.status.replicas)] + + def get_environment_variables_for_stateful_set_container(self, + name: str, + namespace: str, + container_name: str, + variable_names: list) -> dict: + """Returns a dictionary of `Stateful Set` environment variables (key-values) for container. + Stateful Set is found by name and namespace/project. + `container_name` specifies name of docker container associated with environment variables (parameter is + required). + `variable_names` parameter specifies environment variable names for environment variables which should be + returned as dictionary. + + Method raises an Exception if Stateful Set is not found. + + Example: + | Get Environment Variables For Stateful Set Container | cassandra0 | cassandra | cassandra | | + """ + entity = self.get_stateful_set(name, namespace) + return self._get_environment_variables_for_container(entity, container_name, variable_names) + + def set_environment_variables_for_stateful_set_container(self, + name: str, + namespace: str, + container_name: str, + variables_to_change: dict): + """Changes values for given environment variables per `Stateful Set` container. + Stateful Set is found by name and namespace/project. + `container_name` specifies name of docker container associated with environment variables (parameter is + required). + `update_variables` parameter specifies a dictionary of variables to update. If container environment variable + names do not contain any `update_variables` dictionary key then this `update_variables` dictionary pair is + redundant and will be ignored. + + Method raises an Exception if Stateful Set is not found. + + Example: + | Set Environment_Variables For Stateful Set Container | cassandra0 | cassandra | cassandra | | + """ + entity = self.get_stateful_set(name, namespace) + self._prepare_entity_with_environment_variables_for_container(entity, container_name, variables_to_change) + self.k8s_apps_v1_client.patch_namespaced_stateful_set(name, namespace, entity) + + def get_pod(self, name: str, namespace: str): + """Returns the particular pod configuration as JSON object. + Method looks up the `pod` by name and project/namespace. + + Method raises an Exception if `Pod` or `namespace` is not found. + + Examples: + | Get Pod | elasticsearch-0-859f48b988-2ljmx | elasticsearch-service | + | Get Pod | cassandra1-0 | cassandra | + """ + return self.k8s_core_v1_client.read_namespaced_pod(name, namespace) + + def get_pods(self, namespace: str) -> list: + """Returns list of `Pods` by namespace/project. + + Example: + | Get Pods | elasticsearch-service | + """ + return self.k8s_core_v1_client.list_namespaced_pod(namespace).items + + def get_pods_by_selector(self, namespace: str, selector: dict) -> list: + """Returns list of `Pods` from given project/namespace and selector. + `selector` is a dictionary of labels. If particular pod's labels contain all labels from `selector` the `Pod` + is added to the result list. + + Example: + | Get Pods By Selector | elasticsearch-service | | + """ + return [pod for pod in self.get_pods(namespace) + if self._do_labels_satisfy_selector(pod.metadata.labels, selector)] + + def get_pods_by_service_name(self, service_name: str, namespace: str) -> list: + """ Returns list of `Pods` from given project/namespace for Service's relative `pods`. + Method looks up `Service` by name and project/namespace takes its label selector and finds all matched + Kubernetes `Pods`. + + Method raises an Exception if `Service` or `namespace` is not found. + + Example: + | Get Pods By Service Name | elasticsearch | elasticsearch-service | + """ + service_labels = self.get_service_selector(service_name, namespace) + return self.get_pods_by_selector(namespace, service_labels) + + def get_pod_names_by_selector(self, namespace: str, selector: dict) -> list: + """Returns list of `Pod` names from given project/namespace and selector. + `selector` is a dictionary of labels. If particular pod's labels contain all labels from `selector` the `Pod` + name is added to the result list. + + Example: + | Get Pod Names By Selector | elasticsearch-service | | + """ + return [pod.metadata.name for pod in self.get_pods(namespace) + if self._do_labels_satisfy_selector(pod.metadata.labels, selector)] + + def get_pod_names_by_service_name(self, service_name: str, namespace: str) -> list: + """Returns list of `Pod` names from given project/namespace for Service's relative `pods`. + Method looks up `Service` by name and project/namespace takes its label selector and finds all matched + Kubernetes `Pods`. + + Method raises an Exception if `Service` or `namespace` is not found. + + Example: + | Get Pod Names By Service Name | elasticsearch | elasticsearch-service | + """ + service_labels = self.get_service_selector(service_name, namespace) + return self.get_pod_names_by_selector(namespace, service_labels) + + def number_of_pods_in_ready_status(self, service_name: str, namespace: str) -> int: + """ + This method returns number of pods in ready status. + *Args:*\n + _namespace_ (str) - OpenShift project name;\n + _service_name_ (str) - service name;\n + *Example:*\n + | Number Of Pods In Ready Status | streaming-platform | streaming-service | + """ + + service_labels = self.get_service_selector(service_name, namespace) + pods = self.get_pods_by_selector(namespace, service_labels) + counter = 0 + for pod in pods: + if pod.status.container_statuses[0].ready: + counter += 1 + return counter + + def get_deployment_replicas_count(self, service: str, namespace: str, label: str = 'clusterName') -> int: + """Returns Number of Replicas from given project/namespace for Service's relative `deployment`. + Method looks up `Deployment` by service name and project/namespace, takes its label selector and finds all matching + Kubernetes `Deployments`. + + Method return 0 if no `Deployment` is found. + + Example: + | Get Deployment Replicas | streaming-platform | streaming-service | + """ + deployment_list = self.platform_client.get_active_deployment_entities_for_service(namespace, service, label) + replicas_count = 0 + for deployment in deployment_list: + replicas_count += deployment.spec.replicas + return replicas_count + + def get_pod_container_environment_variables_for_service(self, + namespace: str, + service: str, + container_name: str, + variable_names: list) -> dict: + """Returns a dictionary of `Pod` names and container environment variables (key-values) from given + project/namespace for Service's relative `pods`. Method looks up `Service` by name and project/namespace takes + its label selector and finds all matched Kubernetes `Pods`. + `container_name` specifies name of docker container associated with environment variables (parameter is + required). + `variable_names` parameter specifies environment variable names for environment variables which should be + returned as dictionary. + + Example: + | Get Pod Container Environment Variables For Service | elasticsearch | elasticsearch-service | elasticsearch | | + """ + pods = self.get_pods_by_service_name(service, namespace) + result = {} + for pod in pods: + environments = self._get_environments_for_container(pod.spec.containers, container_name) + env_variables = self._get_env_variables(environments, variable_names) + result[pod.metadata.labels.get('name', '')] = env_variables + return result + + def look_up_pod_name_by_pod_ip(self, pod_ip: str, namespace: str): + """ Returns name of `Pod` from given project/namespace by ip of `pod`. + + Example: + | Look Up Pod Name By Pod Ip | 10.129.2.61 | elasticsearch-service | + """ + pods = self.get_pods(namespace) + for pod in pods: + if pod.status.pod_ip == pod_ip: + return pod.metadata.name + return None + + def look_up_pod_ip_by_pod_name(self, pod_name: str, namespace: str): + """ Returns IP of `Pod` from given project/namespace by name of `pod`. + + Example: + | Look Up Pod Ip By Pod Name | kafka-1-5d569cc485-zwnpv | kafka-service | + """ + pods = self.get_pods(namespace) + for pod in pods: + if pod.metadata.name in pod_name: + return pod.status.pod_ip + return None + + def delete_pod_by_pod_name(self, name: str, namespace: str, grace_period=0): + """ Deletes `Pod` from given project/namespace by name of `pod`. + + Example: + | Delete Pod By Pod Name | streaming-platform-1-kj8sf | streaming-platform-service | + """ + self.k8s_core_v1_client.delete_namespaced_pod(namespace=namespace, name=name, grace_period_seconds=grace_period) + + def delete_pod_by_pod_ip(self, pod_ip: str, namespace: str): + """ Deletes `Pod` from given project/namespace by ip of `pod`. + + Example: + | Delete Pod By Pod Ip | 10.129.2.61 | streaming-platform-service | + """ + pod_name = self.look_up_pod_name_by_pod_ip(pod_ip, namespace) + if pod_name: + self.delete_pod_by_pod_name(pod_name, namespace) + + def execute_command_in_pod(self, name: str, namespace: str, command: str, container: str = "", shell: str = "/bin/bash"): + """Executes given console command within docker container. + `container` variable specifies name of container. It can be empty if pod contains only one container. + The Pod is found by name and project/namespace. Method executes given console command within the stream and + returns tuple of command result and error message (all of them can be empty). + + Example: + | Execute Command In Pod | elasticsearch-0-859f48b988-2ljmx | elasticsearch | ls -la | + | Execute Command In Pod | consul-server-1 | consul | ls -la | container=consul | + | Execute Command In Pod | consul-server-1 | consul | ls -la | container=consul | shell=/bin/sh | + """ + exec_cmd = [shell, '-c', command] + response = stream(self.k8s_core_v1_client.connect_get_namespaced_pod_exec, + name, + namespace, + container=container, + command=exec_cmd, + stderr=True, + stdin=False, + stdout=True, + tty=False, + _preload_content=False) + + result = "" + errors = "" + while response.is_open(): + response.update(timeout=2) + if response.peek_stdout(): + value = str(response.read_stdout()) + result += value + if response.peek_stderr(): + error = response.read_stderr() + errors += error + return result.strip(), errors.strip() + + def get_config_map(self, name: str, namespace: str): + """ + Returns config map by name in specified project/namespace. + + Example: + | Get Config Map | elasticsearch-config-map | elasticsearch | + """ + return self.k8s_core_v1_client.read_namespaced_config_map(name, namespace) + + def get_config_maps(self, namespace: str): + """ + Returns config maps in specified project/namespace. + + Example: + | Get Config Maps | elasticsearch | + """ + return self.k8s_core_v1_client.list_namespaced_config_map(namespace) + + def create_config_map_from_file(self, namespace, file_path): + """ + Creates config map by specified file path in project/namespace. + The file must be in the yaml format. + + Example: + | Create Config Map From File | elasticsearch | /config.yaml | + """ + body = self._parse_yaml_from_file(file_path) + return self.k8s_core_v1_client.create_namespaced_config_map(namespace, body) + + def delete_config_map_by_name(self, name: str, namespace: str): + """ + Delete config map by name in specified project/namespace. + + Example: + | Delete Config Map By Name | elasticsearch-config-map | elasticsearch | + """ + return self.k8s_core_v1_client.delete_namespaced_config_map(name, namespace) + + def get_secret(self, name: str, namespace: str): + """ + Returns secret in specified project/namespace. + + Example: + | Get Secret | elasticsearch-secret | elasticsearch | + """ + return self.k8s_core_v1_client.read_namespaced_secret(name, namespace) + + def get_secrets(self, namespace: str): + """ + Returns secrets in specified project/namespace. + + Example: + | Get Secrets | elasticsearch | + """ + return self.k8s_core_v1_client.list_namespaced_secret(namespace) + + def create_secret(self, namespace, body): + """Create secret in specified project/namespace. + + :param namespace: the secret's namespace + :param body: the JSON schema of the Secret to create. + + Example: + | Create Secret | elasticsearch | secret_body | + """ + return self.k8s_core_v1_client.create_namespaced_secret(namespace, body) + + def delete_secret_by_name(self, name: str, namespace: str): + """ + Delete secret by name in specified project/namespace. + + Example: + | Delete Secret By Name | elasticsearch-secret | elasticsearch | + """ + return self.k8s_core_v1_client.delete_namespaced_secret(name, namespace) + + def get_replica_sets(self, namespace: str): + """ + Returns replica sets in specified project/namespace. + + Example: + | Get Replica Sets | elasticsearch | + """ + return self.k8s_apps_v1_client.list_namespaced_replica_set(namespace) + + def get_replica_set(self, name: str, namespace: str): + """ + Returns replica set by replica set name in specified project/namespace. + + Example: + | Get Replica Set | elasticsearch-replica-set | elasticsearch | + """ + return self.k8s_apps_v1_client.read_namespaced_replica_set(name, namespace) + + def get_image(self, resource, container_name): + """ + Returns image from resource configuration by container name in specified project/namespace. + """ + if len(resource.spec.template.spec.containers) > 1 and container_name is not None: + for container in resource.spec.template.spec.containers: + if container.name == container_name: + return container.image + elif len(resource.spec.template.spec.containers) == 1: + return resource.spec.template.spec.containers[0].image + return None + + def get_resource_image(self, resource_type: str, resource_name: str, namespace: str, resource_container_name=None): + """ + Identifies the resource type and return image for the specified resource by the name of the resource and container in the specified project/namespace. + """ + if resource_type == 'daemonset': + daemon_set = self.get_daemon_set(resource_name, namespace) + return self.get_image(daemon_set, resource_container_name) + elif resource_type == 'deployment': + deployment = self.get_deployment_entity(resource_name, namespace) + return self.get_image(deployment, resource_container_name) + elif resource_type == 'statefulset': + stateful_set = self.get_stateful_set(resource_name, namespace) + return self.get_image(stateful_set, resource_container_name) + else: + raise Exception(f'The type [{resource_type}] is not supported yet.') + + def get_dd_images_from_config_map(self, config_map_name, namespace): + config_map = self.get_config_map(config_map_name, namespace) + config_map_yaml = (config_map.to_dict()) + cm = config_map_yaml["data"]["dd_images"] + if cm: + return cm + else: + return None \ No newline at end of file diff --git a/integration-tests-built-in-library/integration_library_builtIn/S3BackupLibrary.py b/integration-tests-built-in-library/integration_library_builtIn/S3BackupLibrary.py new file mode 100644 index 0000000..b962e1d --- /dev/null +++ b/integration-tests-built-in-library/integration_library_builtIn/S3BackupLibrary.py @@ -0,0 +1,39 @@ +# Copyright 2024-2025 NetCracker Technology Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from s3_storage import S3Client, S3FileSystem + +class S3BackupLibrary(object): + + def __init__(self, url: str, bucket: str, key_id: str, key_secret: str, ssl_verify=False): + self.s3Client = S3Client(url, bucket, key_id, key_secret, ssl_verify) + self.S3FileSystem = S3FileSystem(client=self.s3Client) + + def check_bucket_exists(self, bucket_name): + buckets = self.s3Client.get_list_buckets() + for bucket in buckets: + if bucket_name == bucket['Name']: + return True + return False + + def get_bucket(self, bucket_name): + bucket = self.s3Client.resource.Bucket(bucket_name) + return bucket + + def check_backup_exists(self, path, backup_id): + backup_file = self.s3Client.list_files(path=f"{path}/{backup_id}") + return bool(backup_file) + + def remove_backup(self, path): + self.S3FileSystem.rmtree(path) diff --git a/integration-tests-built-in-library/integration_library_builtIn/__init__.py b/integration-tests-built-in-library/integration_library_builtIn/__init__.py new file mode 100644 index 0000000..c021333 --- /dev/null +++ b/integration-tests-built-in-library/integration_library_builtIn/__init__.py @@ -0,0 +1,16 @@ +# Copyright 2024-2025 NetCracker Technology Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + + diff --git a/integration-tests-built-in-library/integration_library_builtIn/s3_storage.py b/integration-tests-built-in-library/integration_library_builtIn/s3_storage.py new file mode 100644 index 0000000..fe6eae6 --- /dev/null +++ b/integration-tests-built-in-library/integration_library_builtIn/s3_storage.py @@ -0,0 +1,237 @@ +# Copyright 2024-2025 NetCracker Technology Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +import logging +import time +import json + +import boto3 +from botocore.exceptions import ClientError +from botocore import config +from FileSystemS3 import FileSystem + + +class S3Client: + __log = logging.getLogger("S3Client") + + def __init__(self, url, bucket_name, access_key_id: str=None, + access_key_secret: str=None, ssl_verify=False): + """ + S3Client with access to client itself and resource object + """ + self.url = url + self.access_key_id = access_key_id + self.access_key_secret = access_key_secret + self.bucket_name = bucket_name + + client_config = config.Config( + region_name="auto", + ) + + self.client = boto3.client( + "s3", + endpoint_url=url, + aws_access_key_id=access_key_id, + aws_secret_access_key=access_key_secret, + config=client_config, + verify=ssl_verify, + ) + + self.resource = boto3.resource( + "s3", + region_name="auto", + endpoint_url=self.url, + aws_access_key_id=self.access_key_id, + aws_secret_access_key=self.access_key_secret, + ) + + def create_presigned_url(self, object_name, expiration=3600): + """Generate a presigned URL to share an S3 object + + :param object_name: string + :param expiration: Time in seconds for the presigned URL to remain valid + :return: Presigned URL as string. If error, returns None. + """ + + if expiration is None: + expiration = 3600 + try: + response = self.client.generate_presigned_url('get_object', + Params={'Bucket': self.bucket_name, + 'Key': object_name}, + ExpiresIn=expiration) + except ClientError as e: + self.__log.error(e) + return None + + return response + + def get_list_buckets(self): + """ Get list of all buckets + :return array with objects + """ + response = self.client.list_buckets() + return response['Buckets'] + + def list_files(self, path): + """ Get list of files inside folder + :param path: string + :return array with object keys + """ + path = path.strip("/") + objects = self.client.list_objects_v2(Bucket=self.bucket_name, Prefix=path).get( + 'Contents', []) + files = [] + for obj in objects: + files.append(obj['Key']) + return files + + def upload_folder(self, path): + for root, dirs, files in os.walk(path, topdown=False): + for name in files: + self.upload_file(os.path.join(root, name), os.path.join(root, name)) + + def upload_file(self, source, destination: str=None): + if destination is None: + destination = source + destination = destination.strip("/") + self.client.upload_file(source, self.bucket_name, destination) + self.__log.info(f"Uploading file {source} to S3 {destination}") + + def download_file(self, src, dest): + src = src.strip("/") + self.client.download_file(self.bucket_name, src, dest) + self.__log.info(f"Downloading file {src} uploaded from S3 to {dest}") + + def download_folder(self, s3_folder, local_dir=None): + """ Download the contents of a folder directory + Args: + s3_folder: the folder path in the s3 bucket + local_dir: a relative or absolute directory path in the local file system + """ + self.__log.info(f"Start saving {s3_folder}") + s3_folder = s3_folder.strip("/") + bucket = self.resource.Bucket(self.bucket_name) + for obj in bucket.objects.filter(Prefix=s3_folder): + target = os.path.join("/", obj.key) if local_dir is None \ + else os.path.join(local_dir, os.path.relpath(obj.key, s3_folder)) + FileSystem.makedirs(os.path.dirname(target)) + if obj.key[-1] == '/': + continue + bucket.download_file(obj.key, target) + + self.__log.info(f"Finished saving {s3_folder}") + + +class S3FileSystem(FileSystem): + __log = logging.getLogger("S3FileSystem") + + def __init__(self, client: S3Client): + self.s3client = client + + def listdir(self, path): + """ Get list of directories + :param path: string + :return List of directories inside folder + """ + dirs = [] + path = path.strip("/") + "/" + res = self.s3client.client.list_objects_v2(Bucket=self.s3client.bucket_name, Prefix=path, Delimiter="/") + for prefix in res.get('CommonPrefixes', []): + split_prefix = prefix["Prefix"].strip("/").split("/") + if VAULT_DIRNAME_MATCHER.match(split_prefix[-1]): + dirs.append(split_prefix[-1]) + return dirs + + def exists(self, path, type="dir"): + """ Check that object exist inside of directory + :param path: string + :param type: string. Possible values: "dir" or "file" + :return List of directories inside folder + """ + path = path.strip("/") + if type == "dir": + resp = self.s3client.client. \ + list_objects(Bucket=self.s3client.bucket_name, Prefix=path, MaxKeys=1) + return 'Contents' in resp + elif type == "file": + try: + self.s3client.resource.Object(self.s3client.bucket_name, path).load() + except ClientError as e: + if e.response['Error']['Code'] == "404": + return False + return True + + def makedirs(self, path): + super().makedirs(path) + + def read_file(self, path, log): + path = path.strip("/") + try: + response = self.s3client.client.get_object(Bucket=self.s3client.bucket_name, Key=path) + return json.loads(response['Body'].read()) + except ClientError as e: + self.__log.warning(f'Could not read file from path {path}, error message: {e}') + return {} + + def remove(self, path): + """ Delete all objects inside folder + :param path: string + """ + super().remove(path) + path = path.strip("/") + failed = False + bucket = self.s3client.resource.Bucket(self.s3client.bucket_name) + # does not work in google + # bucket.objects.filter(Prefix=path).delete() + objs = bucket.objects.filter(Prefix=path).all() + + try: + for obj in objs: + obj.delete() + except ClientError as e: + failed = True + self.__log.warning(f"Could not delete files from path {path}, error message: {e}") + + # to delete all versions if s3 cluster replication is enabled + try: + bucket.object_versions.filter(Prefix=path).delete() + self.__log.debug(f"Permanently deleted all versions of object {path}.") + except ClientError as e: + failed = True + self.__log.info(f"Couldn't delete all versions of {path}. {e}") + + if not failed: + # ensure delete + for x in range(5): + if not self.exists(path): + self.__log.debug(f"path {path} has been deleted successfully") + return + self.__log.debug(f"waiting for {path} deletion") + time.sleep(1) + + def rmdir(self, path): + """ Delete directories inside folder + :param path: string + """ + super().rmdir(path) + self.remove(path) + + def rmtree(self, path): + """ It will completely remove the files and subdirectories in a directory + :param path: string + """ + super().rmtree(path) + self.remove(path) diff --git a/integration-tests-built-in-library/setup.py b/integration-tests-built-in-library/setup.py new file mode 100644 index 0000000..08a0a25 --- /dev/null +++ b/integration-tests-built-in-library/setup.py @@ -0,0 +1,35 @@ +# Copyright 2024-2025 NetCracker Technology Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import setuptools + +with open("README.md", "r") as fh: + long_description = fh.read() + +setuptools.setup( + name="integration-tests-builtIn", + version="0.1.17", + author="framework_team", + description="BuiltIn Robot Framework keywords source package", + long_description=long_description, + long_description_content_type="text/markdown", + url="https://github.com/Netcracker/qubership-docker-integration-tests/integration-tests-built-in-library", + packages=setuptools.find_packages(), + classifiers=[ + "Programming Language :: Python :: 3", + "License :: OSI Approved :: MIT License", + "Operating System :: OS Independent", + ], + python_requires='>=3.7', +)