diff --git a/.github/workflows/build-dependency-cache.yml b/.github/workflows/build-dependency-cache.yml new file mode 100644 index 0000000000..b1498d894f --- /dev/null +++ b/.github/workflows/build-dependency-cache.yml @@ -0,0 +1,65 @@ +--- +# Creates a cache of the env directory +# Documentation for the syntax of this file is located +# https://docs.github.com/en/free-pro-team@latest/actions/reference/workflow-syntax-for-github-actions + +# The workflow name will show up in the action tab on github during execution +# https://github.com/VOLTTRON/volttron/actions (or if you are pushing to your own fork change the user) + +name: cache virtual env + +# Check the cache when a push happens to the repository. +on: [push] + +jobs: + build: + runs-on: ${{ matrix.os }} + strategy: + matrix: + os: [ubuntu-16.04, ubuntu-18.04, ubuntu-20.04] + python-version: [3.6, 3.7, 3.8] + steps: + # checkout the volttron repository and set current direectory to it + - uses: actions/checkout@v2 + + - name: Set up Python ${{matrix.os}} ${{ matrix.python-version }} + uses: actions/setup-python@v2 + with: + python-version: ${{ matrix.python-version }} + + - id: envcache + uses: actions/cache@v2 + env: + cache-name: cache-env + with: + # This path is specific to Ubuntu + path: ./env + # Look to see if there is a cache hit for the corresponding requirements file + key: env-${{ matrix.os }}-${{matrix.python-version}}-${{hashFiles('requirements.py')}} + #-${{hashFiles('requirements.py')}} + # env-${{ matrix.os }}-${{matrix.python-version}} + + restore-keys: | + env-${{ matrix.os }}-${{matrix.python-version}}-${{hashFiles('requirements.py')}} + env-${{ matrix.os }}-${{matrix.python-version}} + + - name: Check env existance + id: check_files + uses: andstor/file-existence-action@v1 + with: + files: "env/bin/activate" + + - name: Install dependencies + if: steps.check_files.outputs.files_exists != 'true' + run: | + pip install wheel + python bootstrap.py --all --force + +# Only works on default branch of the target repo +# - name: Repository Dispatch +# uses: peter-evans/repository-dispatch@v1 +# with: +# token: ${{ secrets.WORKFLOW_ACCESS_TOKEN }} +# repository: ${{ github.repository }} +# event-type: my-event +# client-payload: '{"ref": "${{ github.ref }}", "sha": "${{ github.sha }}"}' diff --git a/.github/workflows/pytest-auth.yml b/.github/workflows/pytest-auth.yml new file mode 100644 index 0000000000..04f68b40ff --- /dev/null +++ b/.github/workflows/pytest-auth.yml @@ -0,0 +1,100 @@ +--- +# This workflow is meant as a foundational workflow for running integration/unit tests on multiple targeted +# ubuntu versions with multiple python versions. +# +# This workflow utilizes the build-dependency-cache workflow which sets up the environment dependencies using +# bootstrap.py --all +# + +# Documentation for the syntax of this file is located +# https://docs.github.com/en/free-pro-team@latest/actions/reference/workflow-syntax-for-github-actions + +# The workflow name will show up in the action tab on github during execution +# https://github.com/VOLTTRON/volttron/actions (or if you are pushing to your own fork change the user) +name: Testing platform auth + +# Determine what events are going to trigger a running of the workflow +on: [push, pull_request] + +jobs: + # The job named build + build: + # The strategy allows customization of the build and allows matrixing the version of os and software + # https://docs.github.com/en/free-pro-team@l.atest/actions/reference/workflow-syntax-for-github-actions#jobsjob_idstrategy + strategy: + fail-fast: false + matrix: + # Each entry in the os and python-version matrix will be run so for the 3 x 4 there will be 12 jobs run + os: [ ubuntu-16.04, ubuntu-18.04, ubuntu-20.04 ] + python-version: [ 3.6, 3.7] # , 3.8, 3.9 ] + + # Run-on determines the operating system available to run on + # - At the current time there is only ubuntu machines between 16.04 and 20.04 available + # - This uses the matrix os from the strategy above + runs-on: ${{ matrix.os }} + + # Each step will be run in order of listing. + steps: + # checkout the volttron repository and set current direectory to it + - uses: actions/checkout@v2 + + # setup the python environment for the operating system + - name: Set up Python ${{matrix.os}} ${{ matrix.python-version }} + uses: actions/setup-python@v2 + with: + python-version: ${{ matrix.python-version }} + + # Attempt to restore the cache from the build-dependency-cache workflow if present then + # the output value steps.check_files.outputs.files_exists will be set (see the next step for usage) + - name: Has restored cache + id: check_files + uses: andstor/file-existence-action@v1 + with: + files: "env/bin/activate" + + # This step is only run if the cache wasn't able to be restored. + - name: Install dependencies + if: steps.check_files.outputs.files_exists != 'true' + run: | + pip install wheel + python bootstrap.py --all --force + + - name: Install volttron + run: | + source env/bin/activate + pip install -e . + + # Run the specified tests and save the results to a unique file that can be archived for later analysis. + - name: Run pytest + run: | + source env/bin/activate + pip install -e . + pytest volttrontesting/platform/auth_tests -rf -o junit_family=xunit2 --junitxml=output/test-auth-${{matrix.os}}-${{ matrix.python-version }}-results.xml + + # Archive the results from the pytest to storage. + - name: Archive test results + uses: actions/upload-artifact@v2 + if: always() + with: + name: pytest-report + path: output/test-auth-${{matrix.os}}-${{ matrix.python-version }}-results.xml + +# - name: Publish Unit Test Results +# uses: EnricoMi/publish-unit-test-result-action@v1.5 +# if: always() +# with: +# github_token: ${{ secrets.WORKFLOW_ACCESS_TOKEN }} +# files: output/test-testutils*.xml + + +#-cov=com --cov-report=xml --cov-report=html +# pytest tests.py --doctest-modules --junitxml=junit/test-results.xml --cov=com --cov-report=xml --cov-report=html +# - name: Lint with flake8 +# run: | +# # stop the build if there are Python syntax errors or undefined names +# flake8 . --count --select=E9,F63,F7,F82 --show-source --statistics +# # exit-zero treats all errors as warnings. The GitHub editor is 127 chars wide +# flake8 . --count --exit-zero --max-complexity=10 --max-line-length=127 --statistics +# - name: Test with pytest +# run: | +# pytest --junitxml=junit/test-results.xml --cov=com --cov-report=xml --cov-report=html diff --git a/.github/workflows/pytest-testutils.yml b/.github/workflows/pytest-testutils.yml new file mode 100644 index 0000000000..cd915fdeb6 --- /dev/null +++ b/.github/workflows/pytest-testutils.yml @@ -0,0 +1,89 @@ +--- +# This workflow is meant as a foundational workflow for running integration/unit tests on the +# plaform. For this workflow we are testing the +# +# volttrontesting/testutils directory using pytest. +# +# This workflow also shows the caching mechanisms available for storage +# and retrieval of cache for quicker setup of test environments. + +name: Testing testutils directory +on: [push, pull_request] + +jobs: + build: + # The strategy allows customization of the build and allows matrixing the version of os and software + # https://docs.github.com/en/free-pro-team@l.atest/actions/reference/workflow-syntax-for-github-actions#jobsjob_idstrategy + strategy: + fail-fast: false + matrix: + # Each entry in the os and python-version matrix will be run so for the 3 x 4 there will be 12 jobs run + os: [ ubuntu-16.04, ubuntu-18.04, ubuntu-20.04 ] + python-version: [ 3.6, 3.7] # , 3.8, 3.9 ] + + runs-on: ${{ matrix.os }} + + steps: + # checkout the volttron repository and set current direectory to it + - uses: actions/checkout@v2 + + # Attempt to restore the cache from the build-dependency-cache workflow if present then + # the output value steps.check_files.outputs.files_exists will be set (see the next step for usage) + - name: Set up Python ${{matrix.os}} ${{ matrix.python-version }} + uses: actions/setup-python@v2 + with: + python-version: ${{ matrix.python-version }} + + # Determine if the cache was restored or not. + - name: Has restored cache + id: check_files + uses: andstor/file-existence-action@v1 + with: + files: "env/bin/activate" + + # if cache wasn't restored then do an installation of the dependencies + - name: Install dependencies + if: steps.check_files.outputs.files_exists != 'true' + run: | + pip install wheel + python bootstrap.py --all --force + + - name: Install volttron + run: | + source env/bin/activate + pip install -e . + + # Run the specified tests and save the results to a unique file that can be archived for later analysis. + - name: Run pytest + run: | + source env/bin/activate + pip install -e . + pytest volttrontesting/testutils -rf -o junit_family=xunit2 --junitxml=output/test-testutils-${{matrix.os}}-${{ matrix.python-version }}-results.xml + + # Archive the results from the pytest to storage. + - name: Archive test results + uses: actions/upload-artifact@v2 + if: always() + with: + name: pytest-report + path: output/test-testutils-${{matrix.os}}-${{ matrix.python-version }}-results.xml + +# - name: Publish Unit Test Results +# uses: EnricoMi/publish-unit-test-result-action@v1.5 +# if: always() +# with: +# github_token: ${{ secrets.WORKFLOW_ACCESS_TOKEN }} +# files: output/test-testutils*.xml + + +#-cov=com --cov-report=xml --cov-report=html +# pytest tests.py --doctest-modules --junitxml=junit/test-results.xml --cov=com --cov-report=xml --cov-report=html +# - name: Lint with flake8 +# run: | +# # stop the build if there are Python syntax errors or undefined names +# flake8 . --count --select=E9,F63,F7,F82 --show-source --statistics +# # exit-zero treats all errors as warnings. The GitHub editor is 127 chars wide +# flake8 . --count --exit-zero --max-complexity=10 --max-line-length=127 --statistics +# - name: Test with pytest +# run: | +# pytest --junitxml=junit/test-results.xml --cov=com --cov-report=xml --cov-report=html diff --git a/.gitignore b/.gitignore index 95d07548cf..b824836270 100644 --- a/.gitignore +++ b/.gitignore @@ -20,7 +20,7 @@ applications /configs/ /config/ /docs/build/* -/docs/source/volttron_api +/docs/source/volttron-api volttron.log __pycache__ .python-version @@ -33,3 +33,5 @@ rabbitmq.log htmlcov/ MnesiaCore.* rabbitmq-server.download.tar.xz +/MagicMock/mock/ +/docs/source/volttron_api/ diff --git a/.travis.yml b/.travis.yml index 918f64cd30..c4456b33e0 100644 --- a/.travis.yml +++ b/.travis.yml @@ -9,6 +9,3 @@ services: script: ci-integration/run-test-docker.sh -notifications: - slack: volttron-team:EgnBsUcQ98b75kU59tV8TnEa - diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 5c82923837..fe5ba34cc8 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -6,7 +6,7 @@ We have a lengthy [Guide to Contributions](https://volttron.readthedocs.io/en/de ### Branch Naming -We use `develop` as the active development branch, and not `master`. Please submit PRs to `develop` by default. +We use `develop` as the active development branch, and not `main`. Please submit PRs to `develop` by default. ### Testing diff --git a/COPYRIGHT b/COPYRIGHT index 40f317d401..f7fcb5303e 100644 --- a/COPYRIGHT +++ b/COPYRIGHT @@ -1,4 +1,4 @@ -# Copyright (c) 2019, Battelle Memorial Institute +# Copyright 2020, Battelle Memorial Institute. # All rights reserved. # # Redistribution and use in source and binary forms, with or without diff --git a/README.md b/README.md index 4bb55c13c9..c8910770c3 100644 --- a/README.md +++ b/README.md @@ -1,4 +1,4 @@ -![image](docs/source/images/VOLLTRON_Logo_Black_Horizontal_with_Tagline.png) +![image](docs/source/files/VOLLTRON_Logo_Black_Horizontal_with_Tagline.png) [![Codacy Badge](https://api.codacy.com/project/badge/Grade/fcf58045b4804edf8f4d3ecde3016f76)](https://app.codacy.com/gh/VOLTTRON/volttron?utm_source=github.com&utm_medium=referral&utm_content=VOLTTRON/volttron&utm_campaign=Badge_Grade_Settings) VOLTTRON™ is an open source platform for distributed sensing and control. The @@ -10,7 +10,7 @@ with that data. ## Features -- [Message Bus](https://volttron.readthedocs.io/en/latest/core_services/messagebus/index.html#messagebus-index) allows agents to subcribe to data sources and publish results and messages. +- [Message Bus](https://volttron.readthedocs.io/en/latest/core_services/messagebus/index.html#messagebus-index) allows agents to subscribe to data sources and publish results and messages. - [Driver framework](https://volttron.readthedocs.io/en/latest/core_services/drivers/index.html#volttron-driver-framework) for collecting data from and sending control actions to buildings and devices. - [Historian framework](https://volttron.readthedocs.io/en/latest/core_services/historians/index.html#historian-index) for storing data. - [Agent lifecycle managment](https://volttron.readthedocs.io/en/latest/core_services/control/AgentManagement.html#agentmanagement) in the platform @@ -26,7 +26,7 @@ users unfamiliar with those technologies, the following resources are recommende ### 1. Install prerequisites -(). +[Requirements Reference](https://volttron.readthedocs.io/en/develop/introduction/platform-install.html#step-1-install-prerequisites) From version 7.0, VOLTTRON requires python 3 with a minimum version of 3.6; it is tested only systems supporting that as a native package. On Debian-based systems (Ubuntu bionic, debian buster, raspbian buster), these can all be installed with the following commands: @@ -71,7 +71,7 @@ You can deactivate the environment at any time by running `deactivate`. ##### 1. Install Erlang version 21 packages -For RabbitMQ based VOLTTRON, some of the RabbitMQ specific software packages have to be installed. +For RabbitMQ based VOLTTRON, some RabbitMQ specific software packages must be installed. ###### On Debian based systems and CentOS 6/7 @@ -104,8 +104,8 @@ Also lock your version of Erlang using the [yum-plugin-versionlock](https://acce ##### 2. Configure hostname -Make sure that your hostname is correctly configured in /etc/hosts. -See (). If you are testing with VMs make please make sure to provide unique host names for each of the VM you are using. +Make sure that your hostname is correctly configured in /etc/hosts (See [this StackOverflow Post](https://stackoverflow.com/questions/24797947/os-x-and-rabbitmq-error-epmd-error-for-host-xxx-address-cannot-connect-to-ho)). +If you are testing with VMs make please make sure to provide unique host names for each of the VM you are using. The hostname should be resolvable to a valid IP when running on bridged mode. RabbitMQ checks for this during initial boot. Without this (for example, when running on a VM in NAT mode) RabbitMQ start would fail with the error "unable to @@ -155,13 +155,13 @@ vcfg --rabbitmq single [optional path to rabbitmq_config.yml] Refer to [examples/configurations/rabbitmq/rabbitmq_config.yml](examples/configurations/rabbitmq/rabbitmq_config.yml) for a sample configuration file. -At a minimum you will need to provide the host name and a unique common-name +At a minimum you will need to provide the hostname and a unique common-name (under certificate-data) in the configuration file. Note: common-name must be -unique and the general convention is to use `-root-ca`. +unique. The general convention is to use `-root-ca`. Running the above command without the optional configuration file parameter will -cause the user user to be prompted for all the required data in the command prompt -vcfg will use that data to generate a rabbitmq_config.yml file in the `VOLTTRON_HOME` +cause the user to be prompted for all the required data in the command prompt. +`vcfg` will use that data to generate a rabbitmq_config.yml file in the `VOLTTRON_HOME` directory. If the above configuration file is being used as a basis, be sure to update it with @@ -178,7 +178,7 @@ NOTE: We configure the RabbitMQ instance for a single volttron_home and volttron_instance. This script will confirm with the user the volttron_home to be configured. The VOLTTRON instance name will be read from volttron_home/config if available, if not the user will be prompted for VOLTTRON instance name. To -run the scripts without any prompts, save the the VOLTTRON instance name in +run the scripts without any prompts, save the VOLTTRON instance name in volttron_home/config file and pass the VOLTTRON home directory as a command line argument. For example: `vcfg --vhome /home/vdev/.new_vhome --rabbitmq single` @@ -246,9 +246,9 @@ Notes: ### 4. Test We are now ready to start the VOLTTRON instance. If configured with a RabbitMQ message bus a config file would have been - generated in `$VOLTTRON\_HOME/config` with the entry `message-bus=rmq`. If you need to revert back to ZeroMQ based + generated in `$VOLTTRON\_HOME/config` with the entry `message-bus=rmq`. If you need to revert to ZeroMQ based VOLTTRON, you will have to either remove "message-bus" parameter or set it to default "zmq" in `$VOLTTRON\_HOME/config` - and restart the volttron process. The following command starts the VOLTTORN process in the background: + and restart the volttron process. The following command starts the VOLTTRON process in the background: ```sh volttron -vv -l volttron.log & @@ -263,7 +263,7 @@ Next, start an example listener to see it publish and subscribe to the message b scripts/core/upgrade-listener ``` -This script handles several different commands for installing and starting an agent after removing an old copy. This +This script handles several commands for installing and starting an agent after removing an old copy. This simple agent publishes a heartbeat message and listens to everything on the message bus. Look at the VOLTTRON log to see the activity: @@ -288,12 +288,12 @@ To top the platform run the following command: ## Next Steps -There are several [walkthroughs](https://volttron.readthedocs.io/en/latest/devguides/index.html#devguides-index) to explore additional aspects of the platform: +There are several walkthroughs to explore additional aspects of the platform: -- [Agent Development Walkthrough](https://volttron.readthedocs.io/en/latest/devguides/agent_development/Agent-Development.html#agent-development) -- Demonstration of the [management UI](https://volttron.readthedocs.io/en/latest/devguides/walkthroughs/VOLTTRON-Central-Demo.html#volttron-central-demo) -- RabbitMQ setup with Federation and Shovel plugins -- Backward compatibility with the RabbitMQ message bus +- [Agent Development Walkthrough](https://volttron.readthedocs.io/en/latest/developing-volttron/developing-agents/agent-development.html) +- Demonstration of the [management UI](https://volttron.readthedocs.io/en/latest/deploying-volttron/multi-platform/volttron-central-deployment.html) +- [RabbitMQ setup with Federation and Shovel plugins](https://volttron.readthedocs.io/en/latest/deploying-volttron/multi-platform/multi-platform-rabbitmq-deployment.html) +- [Backward compatibility with the RabbitMQ message bus](https://volttron.readthedocs.io/en/latest/deploying-volttron/multi-platform/multi-platform-multi-bus.html) ## Acquiring Third Party Agent Code @@ -308,21 +308,21 @@ git clone https://github.com/VOLTTRON/volttron-applications.git develop ## Contribute -How to [contribute](http://volttron.readthedocs.io/en/latest/community_resources/index.html#contributing-back) back: +How to [contribute](https://volttron.readthedocs.io/en/latest/developing-volttron/contributing-code.html) back: -- Issue Tracker: -- Source Code: +- [Issue Tracker](https://github.com/VOLTTRON/volttron/issues) +- [Source Code](https://github.com/VOLTTRON/volttron) ## Support -There are several options for VOLTTRONTM [support](https://volttron.readthedocs.io/en/latest/community_resources/index.html#volttron-community). +There are several options for VOLTTRONTM [support](https://volttron.readthedocs.io/en/latest/developing-volttron/community.html). - A VOLTTRONTM office hours telecon takes place every other Friday at 11am Pacific over Zoom. - A mailing list for announcements and reminders - The VOLTTRONTM contact email for being added to office hours, the mailing list, and for inquiries is: volttron@pnnl.gov -- The preferred method for questions is through stackoverflow since this is easily discoverable by others who may have the same issue. -- GitHub issue tracker for feature requests, bug reports, and following development activities -- VOLTTRON now has a Slack channel - Sign up here: +- The preferred method for questions is through [StackOverflow](https://stackoverflow.com/questions/tagged/volttron) since this is easily discoverable by others who may have the same issue. +- [GitHub issue tracker](https://github.com/VOLTTRON/volttron/issues) for feature requests, bug reports, and following development activities +- VOLTTRON now has a [Slack channel](volttron-community.slack.com/signup) ## License diff --git a/bootstrap.py b/bootstrap.py index d9d4a397a4..e7e94ead13 100644 --- a/bootstrap.py +++ b/bootstrap.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- {{{ # vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et: # -# Copyright 2019, Battelle Memorial Institute. +# Copyright 2020, Battelle Memorial Institute. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -137,6 +137,7 @@ def update(operation, verbose=None, upgrade=False, offline=False, optional_requi # Build option_requirements separately to pass install options build_option = '--build-option' if wheeling else '--install-option' + for requirement, options in option_requirements: args = [] for opt in options: @@ -147,9 +148,17 @@ def update(operation, verbose=None, upgrade=False, offline=False, optional_requi # Install local packages and remaining dependencies args = [] target = path + if 'all' in optional_requirements or 'documentation' in optional_requirements: + option_set = set() + + for requirement, options in extras_require.items(): + option_set.add(requirement) + + optional_requirements = list(option_set) if optional_requirements: target += '[' + ','.join(optional_requirements) + ']' args.extend(['--editable', target]) + print(f"Target: {target}") pip(operation, args, verbose, upgrade, offline) try: @@ -281,8 +290,11 @@ def main(argv=sys.argv): # variable at the end of the block. If the option is set then it needs # to be passed on. po = parser.add_argument_group('Extra packaging options') + # If all is specified then install all of the different packages listed in requirements.py + po.add_argument('--all', action='append_const', const='all', dest='optional_args') for arg in extras_require: - po.add_argument('--'+arg, action='append_const', const=arg, dest="optional_args") + if 'dnp' not in arg: + po.add_argument('--'+arg, action='append_const', const=arg, dest="optional_args") # Add rmq download actions. rabbitmq = parser.add_argument_group('rabbitmq options') diff --git a/ci-integration/run-test-docker.sh b/ci-integration/run-test-docker.sh index 5b9435a4c5..8e282e5f07 100755 --- a/ci-integration/run-test-docker.sh +++ b/ci-integration/run-test-docker.sh @@ -19,7 +19,7 @@ fi export FAST_FAIL=${FAST_FAIL:-true} -pip list +python3 -m pip list echo "RUNNING $NUM_PROCESSES PARALLEL PROCESSESS AT A TIME" echo "FAST_FAIL IS $FAST_FAIL" @@ -70,9 +70,8 @@ run_test(){ base_filename="$(basename "$filename")" # Start the docker run module. docker run -e "IGNORE_ENV_CHECK=1" -e "CI=$CI" --name "$base_filename" \ - -t --network="host" -v /var/run/docker.sock:/var/run/docker.sock volttron_test_image \ - pytest "$filename" > "$base_filename.result.txt" 2>&1 & - + -t --network="host" -v /var/run/docker.sock:/var/run/docker.sock volttron_test_image \ + pytest "$filename" > "$base_filename.result.txt" 2>&1 & runningprocs+=($!) outputfiles+=("$base_filename.result.txt") containernames+=("$base_filename") diff --git a/ci-integration/virtualization/Dockerfile.testing b/ci-integration/virtualization/Dockerfile.testing index ded9c5401a..772a78fc50 100644 --- a/ci-integration/virtualization/Dockerfile.testing +++ b/ci-integration/virtualization/Dockerfile.testing @@ -13,6 +13,10 @@ USER root WORKDIR ${VOLTTRON_ROOT} RUN apt-get update + +RUN apt-get install -y \ + sqlite3 + RUN apt-get install -y \ apt-transport-https \ ca-certificates \ diff --git a/ci-integration/virtualization/requirements_test.txt b/ci-integration/virtualization/requirements_test.txt index f658997a36..7b8e453b09 100644 --- a/ci-integration/virtualization/requirements_test.txt +++ b/ci-integration/virtualization/requirements_test.txt @@ -11,3 +11,5 @@ cryptography==2.3 docker psycopg2 mysql-connector-python-rf +pymongo +influxdb diff --git a/docs/requirements.txt b/docs/requirements.txt index 1f2faeb7cd..84232c5b90 100644 --- a/docs/requirements.txt +++ b/docs/requirements.txt @@ -1,7 +1,7 @@ sphinx_rtd_theme sphinx-autobuild -sphinx -recommonmark +sphinx==3.3.0 +m2r2 bacpypes enum34 funcsigs @@ -13,6 +13,7 @@ python-dateutil pytz pyzmq grequests +requests simplejson six Twisted @@ -25,7 +26,4 @@ greenlet zmq ply psutil -pymongo -mysql-connector-python-rf ws4py -paho-mqtt diff --git a/docs/source/agent-framework/agents-overview.rst b/docs/source/agent-framework/agents-overview.rst new file mode 100644 index 0000000000..6208b90f53 --- /dev/null +++ b/docs/source/agent-framework/agents-overview.rst @@ -0,0 +1,43 @@ +.. _Agent-Framework: + +=============== +Agents Overview +=============== + +Agents in VOLTTRON can be loosely defined as software modules communicating on the platform which perform some function +on behalf of the user. Agents may perform a huge variety of tasks, but common use cases involve data collection, +control of ICS and IOT devices, and various platform management tasks. Agents implemented using the VOLTTRON agent +framework inherit a number of capabilities, including message bus connectivity and agent lifecycle. + +Agents deployed on VOLTTRON can perform one or more roles which can be broadly classified into the following groups: + +- Platform Agents: Agents which are part of the platform and provide a service to other agents. Examples are the + Actuator and Platform Driver agents which serve as interfaces between control agents and drivers. +- Control Agents: These agents implement algorithms to control the devices of interest and interact with other + resources to achieve some goal. +- Service Agents: These agents perform various data collection or platform management services. Agents in this + category include weather service agents which collect weather data from remote sources or operations agents which + help users maintain situational awareness of their deployment. +- Cloud Agents: These agents represent a remote application which needs access to the messages and data on the + platform. This agent would subscribe to topics of interest to the remote application and would also allow it publish + data to the platform. + +The platform includes some valuable services which can be leveraged by agents: + +- Message Bus: All agents and services publish and subscribe to topics on the message bus. This provides a single + interface that abstracts the details of devices and agents from each other. Components in the platform basically + produce and consume events. +- Configuration Store: Using the configuration store, agent operations can be altered ad-hoc without significant + disruption or downtime. +- Historian Framework: Historian agents automatically collect data from a subset of topics on the message bus and store + them in a data store of choice. Currently SQL, MongoDB, CrateDB and other historians exist, and more can be + developed to fit the needs of a deployment by inheriting from the base historian. The base historian has been + developed to be fast and reliable, and to handle many common pitfalls of data collection over a network. +- Weather Information: These agents periodically retrieve data from the a remote weather API then format the + response and publish it to the platform message bus on a weather topic. +- Device interfaces: Drivers publish device data onto the message bus and send control signals issued from control + agents to the corresponding device. Drivers are capable of handling the locking of devices to prevent multiple + conflicting directives. +- Application Scheduling: This service allows the scheduling of agents’ access to devices in order to prevent conflicts. +- Logging service: Agents can publish arbitrary strings to a logging topic and this service will push them to a + historian for later analysis. diff --git a/docs/source/core_services/control/Agent-Execution-Environment.rst b/docs/source/agent-framework/aip.rst similarity index 64% rename from docs/source/core_services/control/Agent-Execution-Environment.rst rename to docs/source/agent-framework/aip.rst index 5ea200dc04..28b6fe0ca2 100644 --- a/docs/source/core_services/control/Agent-Execution-Environment.rst +++ b/docs/source/agent-framework/aip.rst @@ -1,3 +1,10 @@ +.. _Agent-Instantiation-and-Packaging: + +======================================= +AIP - Agent Instantiation and Packaging +======================================= + + .. _Agent-Execution-Environment: Used Environmental Variables @@ -6,3 +13,6 @@ Used Environmental Variables - **AGENT_VIP_IDENTITY** - The router address an agent will attempt to connect to. - **AGENT_CONFIG** - The path to a configuration file to use during agent launch. - **VOLTTRON_HOME** - The home directory where the volttron instances is located. + +Documentation coming soon! + diff --git a/docs/source/agent-framework/core-service-agents/dnp3-and-mesa/dnp3-agent.rst b/docs/source/agent-framework/core-service-agents/dnp3-and-mesa/dnp3-agent.rst new file mode 100644 index 0000000000..8df6cf83db --- /dev/null +++ b/docs/source/agent-framework/core-service-agents/dnp3-and-mesa/dnp3-agent.rst @@ -0,0 +1,197 @@ +.. _DNP3-Agent: + +========== +DNP3 Agent +========== + +`DNP3 `_ (Distributed Network Protocol) is a set of communications protocols that +are widely used by utilities such as electric power companies, primarily for +`SCADA `_ purposes. It was adopted in 2010 as +`IEEE Std 1815-2010 `_, +later updated to `1815-2012 `_. + +VOLTTRON's DNP3 Agent is an implementation of a DNP3 Outstation as specified in IEEE Std 1815-2012. It engages in +bidirectional network communications with a DNP3 Master, which might be located at a power utility. + +Like some other VOLTTRON protocol agents (e.g. IEEE2030_5Agent), the DNP3 Agent can optionally be front-ended by a DNP3 +device driver running under VOLTTRON's PlatformDriverAgent. This allows a DNP3 Master to be treated like any other device +in VOLTTRON's ecosystem. + +The VOLTTRON DNP3 Agent implementation of an Outstation is built on PyDNP3, an open-source library from Kisensum +containing Python language bindings for Automatak's C++ `opendnp3 `_ library, the +de facto reference implementation of DNP3. + +The DNP3 Agent exposes DNP3 application-layer functionality, creating an extensible base from which specific custom +behavior can be designed and supported. By default, the DNP3 Agent acts as a simple transfer agent, publishing data +received from the Master on the VOLTTRON Message Bus, and responding to RPCs from other VOLTTRON agents by sending data +to the Master. + + +Requirements +============ + +PyDNP3 can be installed in an activated environment with: + +.. code-block:: bash + + pip install pydnp3 + + +RPC Calls +--------- + +The DNP3 Agent exposes the following VOLTTRON RPC calls: + +.. code-block:: python + + def get_point(self, point_name): + """ + Look up the most-recently-received value for a given output point. + + @param point_name: The point name of a DNP3 PointDefinition. + @return: The (unwrapped) value of a received point. + """ + + def get_point_by_index(self, group, index): + """ + Look up the most-recently-received value for a given point. + + @param group: The group number of a DNP3 point. + @param index: The index of a DNP3 point. + @return: The (unwrapped) value of a received point. + """ + + def get_points(self): + """ + Look up the most-recently-received value of each configured output point. + + @return: A dictionary of point values, indexed by their VOLTTRON point names. + """ + + def set_point(self, point_name, value): + """ + Set the value of a given input point. + + @param point_name: The point name of a DNP3 PointDefinition. + @param value: The value to set. The value's data type must match the one in the DNP3 PointDefinition. + """ + + def set_points(self, point_list): + """ + Set point values for a dictionary of points. + + @param point_list: A dictionary of {point_name: value} for a list of DNP3 points to set. + """ + + def config_points(self, point_map): + """ + For each of the agent's points, map its VOLTTRON point name to its DNP3 group and index. + + @param point_map: A dictionary that maps a point's VOLTTRON point name to its DNP3 group and index. + """ + + def get_point_definitions(self, point_name_list): + """ + For each DNP3 point name in point_name_list, return a dictionary with each of the point definitions. + + The returned dictionary looks like this: + + { + "point_name1": { + "property1": "property1_value", + "property2": "property2_value", + ... + }, + "point_name2": { + "property1": "property1_value", + "property2": "property2_value", + ... + } + } + + If a definition cannot be found for a point name, it is omitted from the returned dictionary. + + :param point_name_list: A list of point names. + :return: A dictionary of point definitions. + """ + + +Pub/Sub Calls +------------- + +The DNP3 Agent uses two topics when publishing data to the VOLTTRON message bus: + + * **Point Values (default topic: `dnp3/point`)**: As the DNP3 Agent communicates with the Master, + it publishes received point values on the VOLTTRON message bus. + + * **Outstation status (default topic: dnp3/status)**: If the status of the DNP3 Agent outstation + changes, for example if it is restarted, it publishes its new status on the VOLTTRON message bus. + + +Data Dictionary of Point Definitions +------------------------------------ + +The DNP3 Agent loads and uses a data dictionary of point definitions, which are maintained by agreement between the +(DNP3 Agent) Outstation and the DNP3 Master. The data dictionary is stored in the agent's registry. + + +Current Point Values +-------------------- + +The DNP3 Agent tracks the most-recently-received value for each point definition in its data dictionary, regardless of +whether the point value's source is a VOLTTRON RPC call or a message from the DNP3 Master. + + +Agent Configuration +------------------- + +The DNP3Agent configuration file specifies the following fields: + + - **local_ip**: (string) Outstation's host address (DNS resolved). Default: ``0.0.0.0``. + - **port**: (integer) Outstation's port number - the port that the remote endpoint (Master) is listening on. Default: + 20000. + - **point_topic**: (string) VOLTTRON message bus topic to use when publishing DNP3 point values. Default: + ``dnp3/point``. + - **outstation_status_topic**: (string) Message bus topic to use when publishing outstation status. Default: + ``dnp3/outstation_status``. + - **outstation_config**: (dictionary) Outstation configuration parameters. All are optional. Parameters include: + + - **database_sizes**: (integer) Size of each outstation database buffer. Default: 10. + - **event_buffers**: (integer) Size of the database event buffers. Default: 10. + - **allow_unsolicited**: (boolean) Whether to allow unsolicited requests. Default: ``True``. + - **link_local_addr**: (integer) Link layer local address. Default: 10. + - **link_remote_addr**: (integer) Link layer remote address. Default: 1. + - **log_levels**: (list) List of bit field names (`OR'd` together) that filter what gets logged by DNP3. Default: + ``NORMAL``. Possible values: ``ALL``, ``ALL_APP_COMMS``, ``ALL_COMMS``, ``NORMAL``, ``NOTHING``. + - **threads_to_allocate**: (integer) Threads to allocate in the manager's thread pool. Default: 1. + +A sample DNP3 Agent configuration file is available in `services/core/DNP3Agent/config`. + + +VOLTTRON DNP3 Device Driver +=========================== + +VOLTTRON's DNP3 device driver exposes `get_point`/`set_point` RPC calls and scrapes for DNP3 points. + +The driver periodically issues DNP3Agent RPC calls to refresh its cached representation of DNP3 data. It issues RPC +calls to the DNP3 Agent as needed when responding to `get_point`, `set_point` and `scrape_all` calls. + +For information about the DNP3 driver, see :ref:`DNP3 Driver `. + + +Installing the DNP3 Agent +========================= + +To install DNP3Agent, please consult the installation advice in `services/core/DNP3Agent/README.md`. `README.md` +specifies a default agent configuration, which can be overridden as needed. + +An agent installation script is available: + +.. code-block:: shell + + $ export VOLTTRON_ROOT= + $ cd $VOLTTRON_ROOT + $ source services/core/DNP3Agent/install_dnp3_agent.sh + +When installing the Mesa Agent, please note that the agent's point definitions must be loaded into the agent's config +store. See `install_dnp3_agent.sh` for an example of how to load them. diff --git a/docs/source/agent-framework/core-service-agents/dnp3-and-mesa/mesa-agent.rst b/docs/source/agent-framework/core-service-agents/dnp3-and-mesa/mesa-agent.rst new file mode 100644 index 0000000000..d5b1ce7ac5 --- /dev/null +++ b/docs/source/agent-framework/core-service-agents/dnp3-and-mesa/mesa-agent.rst @@ -0,0 +1,227 @@ +.. _MESA: + +========== +Mesa Agent +========== + +The Mesa Agent is a VOLTTRON agent that handles MESA-ESS DNP3 outstation communications. It subclasses and extends the +functionality of VOLTTRON's DNP3 Agent. Like the DNP3 Agent, the Mesa Agent models a DNP3 outstation, communicating +with a DNP3 master. + +For a description of DNP3 and the VOLTTRON DNP3 agent, please refer to the :ref:`DNP3 Agent documentation `. + +VOLTTRON's Mesa Agent and DNP3 Agent are implementations of a DNP3 Outstation as specified in IEEE Std 1815-2012. They +engage in bidirectional network communications with a DNP3 Master, which might be located at a power utility. + +MESA-ESS is an extension and enhancement to DNP3. It builds on the basic DNP3 communications protocol, adding support +for more complex structures, including functions, arrays, curves and schedules. The draft specification for MESA-ESS, +as well as a spreadsheet of point definitions, can be found at http://mesastandards.org/mesa-standards/. + +VOLTTRON's DNP3 Agent and Mesa Agent implementations of an Outstation are built on `pydnp3`, an open-source library from +Kisensum containing Python language bindings for Automatak's C++ `opendnp3 `_ +library, the de-facto reference implementation of DNP3. + +MesaAgent exposes DNP3 application-layer functionality, creating an extensible base from which specific custom behavior +can be designed and supported, including support for MESA functions, arrays and selector blocks. By default, the Mesa +Agent acts as a simple transfer agent, publishing data received from the Master on the VOLTTRON Message Bus, and +responding to RPCs from other VOLTTRON agents by sending data to the Master. Properties of the point and function +definitions also enable the use of more complex controls for point data capture and publication. + +The Mesa Agent was developed by Kisensum for use by 8minutenergy, which provided generous financial support for the +open-source contribution to the VOLTTRON platform, along with valuable feedback based on experience with the agent in a +production context. + + +RPC Calls +========= + +The Mesa Agent exposes the following VOLTTRON RPC calls: + +.. code-block:: python + + def get_point(self, point_name): + """ + Look up the most-recently-received value for a given output point. + + @param point_name: The point name of a DNP3 PointDefinition. + @return: The (unwrapped) value of a received point. + """ + + def get_point_by_index(self, data_type, index): + """ + Look up the most-recently-received value for a given point. + + @param data_type: The data_type of a DNP3 point. + @param index: The index of a DNP3 point. + @return: The (unwrapped) value of a received point. + """ + + def get_points(self): + """ + Look up the most-recently-received value of each configured output point. + + @return: A dictionary of point values, indexed by their point names. + """ + + def get_configured_points(self): + """ + Look up the most-recently-received value of each configured point. + + @return: A dictionary of point values, indexed by their point names. + """ + + def set_point(self, point_name, value): + """ + Set the value of a given input point. + + @param point_name: The point name of a DNP3 PointDefinition. + @param value: The value to set. The value's data type must match the one in the DNP3 PointDefinition. + """ + + def set_points(self, point_dict): + """ + Set point values for a dictionary of points. + + @param point_dict: A dictionary of {point_name: value} for a list of DNP3 points to set. + """ + + def config_points(self, point_map): + """ + For each of the agent's points, map its VOLTTRON point name to its DNP3 group and index. + + @param point_map: A dictionary that maps a point's VOLTTRON point name to its DNP3 group and index. + """ + + def get_point_definitions(self, point_name_list): + """ + For each DNP3 point name in point_name_list, return a dictionary with each of the point definitions. + + The returned dictionary looks like this: + + { + "point_name1": { + "property1": "property1_value", + "property2": "property2_value", + ... + }, + "point_name2": { + "property1": "property1_value", + "property2": "property2_value", + ... + } + } + + If a definition cannot be found for a point name, it is omitted from the returned dictionary. + + :param point_name_list: A list of point names. + :return: A dictionary of point definitions. + """ + + def get_selector_block(self, point_name, edit_selector): + """ + Return a dictionary of point values for a given selector block. + + :param point_name: Name of the first point in the selector block. + :param edit_selector: The index (edit selector) of the block. + :return: A dictionary of point values. + """ + + def reset(self): + """ + Reset the agent's internal state, emptying point value caches. Used during iterative testing. + """ + + +Pub/Sub Calls +============= + +MesaAgent uses three topics when publishing data to the VOLTTRON message bus: + + * **Point Values (default topic: dnp3/point)**: As the Mesa Agent communicates with the Master, it publishes received + point values on the VOLTTRON message bus. + + * **Functions (default topic: mesa/function)**: When the Mesa Agent receives a function step with a "publish" action + value, it publishes the current state of the function (all steps received to date) on the VOLTTRON message bus. + + * **Outstation status (default topic: mesa/status)**: If the status of the Mesa Agent outstation + changes, for example if it is restarted, it publishes its new status on the VOLTTRON message bus. + + +Data Dictionaries of Point and Function Definitions +--------------------------------------------------- + +The Mesa Agent loads and uses data dictionaries of point and function definitions, which are maintained by agreement +between the (Mesa Agent) Outstation and the DNP3 Master. The data dictionaries are stored in the agent's registry. + + +Current Point Values +-------------------- + +The Mesa Agent tracks the most-recently-received value for each point definition in its data dictionary, regardless of +whether the point value's source is a VOLTTRON RPC call or a message from the DNP3 Master. + + +Agent Configuration +=================== + +The Mesa Agent configuration specifies the following fields: + + - **local_ip**: (string) Outstation's host address (DNS resolved). Default: ``0.0.0.0``. + - **port**: (integer) Outstation's port number - the port that the remote endpoint (Master) is listening on. + Default: 20000. + - **point_topic**: (string) VOLTTRON message bus topic to use when publishing DNP3 point values. Default: + ``dnp3/point``. + - **function_topic**: (string) Message bus topic to use when publishing MESA-ESS functions. Default: + ``mesa/function``. + - **outstation_status_topic**: (string) Message bus topic to use when publishing outstation status. Default: + ``mesa/outstation_status``. + - **all_functions_supported_by_default**: (boolean) When deciding whether to reject points for unsupported functions, + ignore the values of their 'supported' points: simply treat all functions as supported. Used primarily during + testing. Default: ``False``. + - **function_validation**: (boolean) When deciding whether to support sending single points to the Mesa Agent. If + `function_validation` is ``True``, the Mesa Agent will raise an exception when receiving any invalid point in + current function. If `function_validation` is ``False``, Mesa Agent will reset current function to None instead of + raising the exception. Default: ``False``. + - **outstation_config**: (dictionary) Outstation configuration parameters. All are optional. Parameters include: + + - **database_sizes**: (integer) Size of each outstation database buffer. Default: 10. + - **event_buffers**: (integer) Size of the database event buffers. Default: 10. + - **allow_unsolicited**: (boolean) Whether to allow unsolicited requests. Default: ``True``. + - **link_local_addr**: (integer) Link layer local address. Default: 10. + - **link_remote_addr**: (integer) Link layer remote address. Default: 1. + - **log_levels**: (list) List of bit field names (OR'd together) that filter what gets logged by DNP3. + Default: [NORMAL]. Possible values: ``ALL``, ``ALL_APP_COMMS``, ``ALL_COMMS``, ``NORMAL``, ``NOTHING``. + - **threads_to_allocate**: (integer) Threads to allocate in the manager's thread pool. Default: 1. + +A sample Mesa Agent configuration file is available in ``services/core/DNP3Agent/mesaagent.config``. + + +Installing the Mesa Agent +========================= + +To install the Mesa Agent, please consult the installation advice in ``services/core/DNP3Agent/README.md``, +which includes advice on installing ``pydnp3``, a library upon which the DNP3 Agent depends. + +After installing libraries as described in the Mesa Agent `README.md` file, the agent can be installed from a +command-line shell as follows: + +.. code-block:: shell + + $ export VOLTTRON_ROOT= + $ cd $VOLTTRON_ROOT + $ source services/core/DNP3Agent/install_mesa_agent.sh + +`README.md` specifies a default agent configuration, which can be overridden as needed. + +Here are some things to note when installing the Mesa Agent: + + - The Mesa Agent source code resides in, and is installed from, a DNP3 subdirectory, thus allowing it to be + implemented as a subclass of the base DNP3 agent class. When installing the Mesa Agent, inform the install script + that it should build from the `mesa` subdirectory by exporting the following environment variable: + + .. code-block:: shell + + $ export AGENT_MODULE=dnp3.mesa.agent + + - The agent's point and function definitions must be loaded into the agent's config store. See the + ``install_mesa_agent.sh`` script for an example of how to load them. diff --git a/docs/source/core_services/service_agents/externaldata/externaldata.rst b/docs/source/agent-framework/core-service-agents/external-data/external-data.rst similarity index 100% rename from docs/source/core_services/service_agents/externaldata/externaldata.rst rename to docs/source/agent-framework/core-service-agents/external-data/external-data.rst diff --git a/docs/source/specifications/files/volttron_ieee2030_5.jpg b/docs/source/agent-framework/core-service-agents/ieee-2030_5-agent/files/volttron_ieee2030_5.jpg similarity index 100% rename from docs/source/specifications/files/volttron_ieee2030_5.jpg rename to docs/source/agent-framework/core-service-agents/ieee-2030_5-agent/files/volttron_ieee2030_5.jpg diff --git a/docs/source/agent-framework/core-service-agents/ieee-2030_5-agent/ieee-2030_5-agent.rst b/docs/source/agent-framework/core-service-agents/ieee-2030_5-agent/ieee-2030_5-agent.rst new file mode 100644 index 0000000000..636ee06db8 --- /dev/null +++ b/docs/source/agent-framework/core-service-agents/ieee-2030_5-agent/ieee-2030_5-agent.rst @@ -0,0 +1,118 @@ +.. _IEEE-2030_5-Agent: + +===================== +IEEE 2030.5 DER Agent +===================== + +The IEEE 2030.5 Agent (IEEE2030_5 in the VOLTTRON repository) implements a IEEE 2030.5 server that receives HTTP +`POST`/`PUT` requests from IEEE 2030.5 devices. The requests are routed to the IEEE 2030.5 Agent over the VOLTTRON +message bus by VOLTTRON's Master Web Service. The IEEE 2030.5 Agent returns an appropriate HTTP response. In some +cases (e.g., DERControl requests), this response includes a data payload. + +The IEEE 2030.5 Agent maps IEEE 2030.5 resource data to a VOLTTRON IEEE 2030.5 data model based on SunSpec, using block +numbers and point names as defined in the SunSpec Information Model, which in turn is harmonized with 61850. The data +model is given in detail below. + +Each device's data is stored by the IEEE 2030.5 Agent in an `EndDevice` memory structure. This structure is not +persisted to a database. Each `EndDevice` retains only the most recently received value for each field. + +The IEEE2030_5 Agent exposes RPC calls for getting and setting EndDevice data. + + +VOLTTRON IEEE 2030.5 Device Driver +---------------------------------- + +The :ref:`IEEE 2030.5 device driver ` is a new addition to VOLTTRON Platform Driver Agent's +family of standard device drivers. It exposes ``get_point``/``set_point calls`` for IEEE 2030.5 EndDevice fields. + +The IEEE 2030.5 device driver periodically issues IEEE2030_5 Agent RPC calls to refresh its cached representation of +EndDevice data. It issues RPC calls to IEEE2030_5Agent as needed when responding to ``get_point``, ``set_point`` and +``scrape_all`` calls. + + +Field Definitions +^^^^^^^^^^^^^^^^^ + +These field IDs correspond to the ones in the IEEE 2030.5 device driver's configuration file, ``ieee2030_5.csv``. +They have been used in that file's "Volttron Point Name" column and also in its "Point Name" column. + +================= ============================= ==================================================== ======= ====== +Field ID IEEE 2030.5 Resource/Property Description Units Type +================= ============================= ==================================================== ======= ====== +b1_Md device_information Model (32 char lim). string + mfModel +b1_Opt device_information Long-form device identifier (32 char lim). string + lfdi +b1_SN abstract_device Short-form device identifier (32 char lim). string + sfdi +b1_Vr device_information Version (16 char lim). string + mfHwVer +b113_A mirror_meter_reading AC current. A float + PhaseCurrentAvg +b113_DCA mirror_meter_reading DC current. A float + InstantPackCurrent +b113_DCV mirror_meter_reading DC voltage. V float + LineVoltageAvg +b113_DCW mirror_meter_reading DC power. W float + PhasePowerAvg +b113_PF mirror_meter_reading AC power factor. % float + PhasePFA +b113_WH mirror_meter_reading AC energy. Wh float + EnergyIMP +b120_AhrRtg der_capability Usable capacity of the battery. Ah float + rtgAh Maximum charge minus minimum charge. +b120_ARtg der_capability Maximum RMS AC current level capability of the A float + rtgA inverter. +b120_MaxChaRte der_capability Maximum rate of energy transfer into the device. W float + rtgMaxChargeRate +b120_MaxDisChaRte der_capability Maximum rate of energy transfer out of the device. W float + rtgMaxDischargeRate +b120_WHRtg der_capability Nominal energy rating of the storage device. Wh float + rtgWh +b120_WRtg der_capability Continuous power output capability of the inverter. W float + rtgW +b121_WMax der_settings Maximum power output. Default to WRtg. W float + setMaxChargeRate +b122_ActWh mirror_meter_reading AC lifetime active (real) energy output. Wh float + EnergyEXP +b122_StorConn der_status CONNECTED=0, AVAILABLE=1, OPERATING=2, TEST=3. enum + storConnectStatus +b124_WChaMax der_control Setpoint for maximum charge. This is the only W float + opModFixedFlow field that is writable with a set_point call. +b403_Tmp mirror_meter_reading Pack temperature. C float + InstantPackTemp +b404_DCW PEVInfo Power flow in or out of the inverter. W float + chargingPowerNow +b404_DCWh der_availability Output energy (absolute SOC). Wh float + availabilityDuration Calculated as (availabilityDuration / 3600) * WMax. +b802_LocRemCtl der_status Control Mode: REMOTE=0, LOCAL=1. enum + localControlModeStatus +b802_SoC der_status State of Charge %. % WHRtg float + stateOfChargeStatus +b802_State der_status DISCONNECTED=1, INITIALIZING=2, CONNECTED=3, enum + inverterStatus STANDBY=4, SOC PROTECTION=5, FAULT=99. +================= ============================= ==================================================== ======= ====== + + +Revising and Expanding the Field Definitions +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +The IEEE 2030.5-to-SunSpec field mappings in this implementation are a relatively thin subset of all possible +field definitions. Developers are encouraged to expand the definitions. + +The procedure for expanding the field mappings requires you to make changes in two places: + +1. Update the driver's point definitions in ``services/core/PlatformDriverAgent/platform_driver/ieee2030_5.csv`` +2. Update the IEEE 2030.5-to-SunSpec field mappings in ``services/core/IEEE2030_5Agent/ieee2030_5/end_device.py`` and + ``__init__.py`` + +When updating VOLTTRON's IEEE 2030.5 data model, please use field IDs that conform to the SunSpec +block-number-and-field-name model outlined in the SunSpec Information Model Reference (see the link below). + +View the :ref:`IEEE 2030.5 agent specification document ` to learn more about IEEE 2030.5 and +the IEEE 2030.5 agent and driver. + + +.. toctree:: + + ieee-2030_5-specification diff --git a/docs/source/agent-framework/core-service-agents/ieee-2030_5-agent/ieee-2030_5-specification.rst b/docs/source/agent-framework/core-service-agents/ieee-2030_5-agent/ieee-2030_5-specification.rst new file mode 100644 index 0000000000..361f9e1150 --- /dev/null +++ b/docs/source/agent-framework/core-service-agents/ieee-2030_5-agent/ieee-2030_5-specification.rst @@ -0,0 +1,207 @@ +.. _IEEE-2030_5-Specification: + +======================= +IEEE 2030.5 DER Support +======================= + +Version 1.0 + +Smart Energy Profile 2.0 (SEP 2.0, IEEE 2030.5) specifies a REST architecture built around the core HTTP verbs: GET, +HEAD, PUT, POST and DELETE. A specification for the IEEE 2030.5 protocol can be found +`here `_. + +IEEE 2030.5 EndDevices (clients) POST XML resources representing their state, and GET XML resources containing command +and control information from the server. The server never reaches out to the client unless a "subscription" is +registered and supported for a particular resource type. This implementation does not use IEEE 2030.5 registered +subscriptions. + +The IEEE 2030.5 specification requires HTTP headers, and it explicitly requires RESTful response codes, for example: + + - 201 - "Created" + - 204 - "No Content" + - 301 - "Moved Permanently" + - etc. + +IEEE 2030.5 message encoding may be either XML or EXI. Only XML is supported in this implementation. + +IEEE 2030.5 requires HTTPS/TLS version 1.2 along with support for the cipher suite TLS_ECDHE_ECDSA_WITH_AES_128_CCM_8. +Production installation requires a certificate issued by a IEEE 2030.5 CA. The encryption requirement can be met by +using a web server such as Apache to proxy the HTTPs traffic. + +IEEE 2030.5 discovery, if supported, must be implemented by an xmDNS server. Avahi can be modified to perform this +function. + + +Function Sets +============= + +IEEE 2030.5 groups XML resources into "Function Sets." Some of these function sets provide a core set of functionality +used across higher-level function sets. This implementation implements resources from the following function sets: + + - Time + - Device Information + - Device Capabilities + - End Device + - Function Set Assignments + - Power Status + - Distributed Energy Resources + + +Distributed Energy Resources (DERs) +----------------------------------- + +Distributed energy resources (DERs) are devices that generate energy, e.g., solar inverters, or store energy, e.g., +battery storage systems, electric vehicle supply equipment (EVSEs). These devices are managed by a IEEE 2030.5 DER +server using DERPrograms which are described by the IEEE 2030.5 specification as follows: + + Servers host one or more DERPrograms, which in turn expose DERControl events to DER clients. + DERControl instances contain attributes that allow DER clients to respond to events + that are targeted to their device type. A DERControl instance also includes scheduling + attributes that allow DER clients to store and process future events. These attributes + include start time and duration, as well an indication of the need for randomization of + the start and / or duration of the event. The IEEE 2030.5 DER client model is based on the + SunSpec Alliance Inverter Control Model [SunSpec] which is derived from + IEC 61850-90-7 [61850] and [EPRI]. + +EndDevices post multiple IEEE 2030.5 resources describing their status. The following is an +example of a Power Status resource that might be posted by an EVSE (vehicle charging station): + +.. code-block:: xml + + + 4 + 1487812095 + 1 + 9300 + + + 3 + -5 + + + 3 + 22 + + + 3 + 7 + + 11280 + 10000 + 9223372036854775807 + 1487812095 + + + + +Design Details +-------------- + +.. image:: files/volttron_ieee2030_5.jpg + +VOLTTRON's IEEE 2030.5 implementation includes a IEEE 2030.5 Agent and a IEEE 2030.5 device driver, as described below. + + +VOLTTRON IEEE 2030.5 Device Driver +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +The IEEE 2030.5 device driver is a new addition to VOLTTRON Platform Driver Agent's family of standard device drivers. It +exposes `get_point`/`set_point` calls for IEEE 2030.5 EndDevice fields. + +The IEEE 2030.5 device driver periodically issues the IEEE 2030.5 Agent RPC calls to refresh its cached representation +of EndDevice data. It issues RPC calls to the IEEE 2030.5 Agent as needed when responding to `get_point`, `set_point` +and `scrape_all` calls. + +Field Definitions +^^^^^^^^^^^^^^^^^ + +These field IDs correspond to the ones in the IEEE 2030.5 device driver's configuration file, `ieee2030_5.csv`. +They have been used in that file's `Volttron Point Name` column and also in its `Point Name` column. + +================= ============================= ==================================================== ======= ====== +Field ID IEEE 2030.5 Resource/Property Description Units Type +================= ============================= ==================================================== ======= ====== +b1_Md device_information Model (32 char lim). string + mfModel +b1_Opt device_information Long-form device identifier (32 char lim). string + lfdi +b1_SN abstract_device Short-form device identifier (32 char lim). string + sfdi +b1_Vr device_information Version (16 char lim). string + mfHwVer +b113_A mirror_meter_reading AC current. A float + PhaseCurrentAvg +b113_DCA mirror_meter_reading DC current. A float + InstantPackCurrent +b113_DCV mirror_meter_reading DC voltage. V float + LineVoltageAvg +b113_DCW mirror_meter_reading DC power. W float + PhasePowerAvg +b113_PF mirror_meter_reading AC power factor. % float + PhasePFA +b113_WH mirror_meter_reading AC energy. Wh float + EnergyIMP +b120_AhrRtg der_capability Usable capacity of the battery. Ah float + rtgAh Maximum charge minus minimum charge. +b120_ARtg der_capability Maximum RMS AC current level capability of the A float + rtgA inverter. +b120_MaxChaRte der_capability Maximum rate of energy transfer into the device. W float + rtgMaxChargeRate +b120_MaxDisChaRte der_capability Maximum rate of energy transfer out of the device. W float + rtgMaxDischargeRate +b120_WHRtg der_capability Nominal energy rating of the storage device. Wh float + rtgWh +b120_WRtg der_capability Continuous power output capability of the inverter. W float + rtgW +b121_WMax der_settings Maximum power output. Default to WRtg. W float + setMaxChargeRate +b122_ActWh mirror_meter_reading AC lifetime active (real) energy output. Wh float + EnergyEXP +b122_StorConn der_status CONNECTED=0, AVAILABLE=1, OPERATING=2, TEST=3. enum + storConnectStatus +b124_WChaMax der_control Setpoint for maximum charge. This is the only W float + opModFixedFlow field that is writable with a set_point call. +b403_Tmp mirror_meter_reading Pack temperature. C float + InstantPackTemp +b404_DCW PEVInfo Power flow in or out of the inverter. W float + chargingPowerNow +b404_DCWh der_availability Output energy (absolute SOC). Wh float + availabilityDuration Calculated as (availabilityDuration / 3600) * WMax. +b802_LocRemCtl der_status Control Mode: REMOTE=0, LOCAL=1. enum + localControlModeStatus +b802_SoC der_status State of Charge %. % WHRtg float + stateOfChargeStatus +b802_State der_status DISCONNECTED=1, INITIALIZING=2, CONNECTED=3, enum + inverterStatus STANDBY=4, SOC PROTECTION=5, FAULT=99. +================= ============================= ==================================================== ======= ====== + + +Revising and Expanding the Field Definitions +-------------------------------------------- + +The IEEE 2030.5-to-SunSpec field mappings in this implementation are a relatively thin subset of all possible +field definitions. Developers are encouraged to expand the definitions. + +The procedure for expanding the field mappings requires you to make changes in two places: + +1. Update the driver's point definitions in `services/core/PlatformDriverAgent/platform_driver/ieee2030_5.csv` +2. Update the IEEE 2030.5-to-SunSpec field mappings in `services/core/IEEE2030_5Agent/ieee2030_5/end_device.py` and + `__init__.py` + +When updating VOLTTRON's IEEE 2030.5 data model, please use field IDs that conform to the SunSpec +block-number-and-field-name model outlined in the SunSpec Information Model Reference (see the link below). + + +For Further Information +======================= + +SunSpec References: + + - Information model specification: http://sunspec.org/wp-content/uploads/2015/06/SunSpec-Information-Models-12041.pdf + - Information model reference spreadsheet: http://sunspec.org/wp-content/uploads/2015/06/SunSpec-Information-Model-Reference.xlsx + - Inverter models: http://sunspec.org/wp-content/uploads/2015/06/SunSpec-Inverter-Models-12020.pdf + - Energy storage models: http://sunspec.org/wp-content/uploads/2015/06/SunSpec-Energy-Storage-Models-12032.pdf + +Questions? Please contact: + + - Rob Calvert (rob@kisensum.com) or James Sheridan (james@kisensum.com) diff --git a/docs/source/agent-framework/core-service-agents/index.rst b/docs/source/agent-framework/core-service-agents/index.rst new file mode 100644 index 0000000000..575422b3c0 --- /dev/null +++ b/docs/source/agent-framework/core-service-agents/index.rst @@ -0,0 +1,21 @@ +.. _Core-Service-Agents: + +============= +Core Services +============= + +Agents in the `services/core` directory support the most common use cases of the platform. For details on each, please +refer to the corresponding documents. + +.. toctree:: + :maxdepth: 1 + + platform-driver/platform-driver-agent + market-service/market-service-agent + dnp3-and-mesa/dnp3-agent + dnp3-and-mesa/mesa-agent + external-data/external-data + ieee-2030_5-agent/ieee-2030_5-agent + obix/obix-history-agent + openadr-ven/ven-agent + volttron-central/volttron-central-overview diff --git a/docs/source/core_services/service_agents/market_service/MarketServiceAgent.rst b/docs/source/agent-framework/core-service-agents/market-service/market-service-agent.rst similarity index 64% rename from docs/source/core_services/service_agents/market_service/MarketServiceAgent.rst rename to docs/source/agent-framework/core-service-agents/market-service/market-service-agent.rst index 6e93a44d9e..e13edeaef4 100644 --- a/docs/source/core_services/service_agents/market_service/MarketServiceAgent.rst +++ b/docs/source/agent-framework/core-service-agents/market-service/market-service-agent.rst @@ -7,34 +7,37 @@ Market Service Agent Introduction ============ -The MarketServiceAgent implements a variation of a double-blind auction, in which each market participant bids +The Market Service Agent implements a variation of a double-blind auction, in which each market participant bids to buy or sell a commodity for a given price. -In contrast to other common implementations, participants do not bid single price-quantity pairs. -Instead, they bid a price-quantity curve, or “flexibility curve” into their respective markets. -Market participants may be both buyers in one market and sellers in another. +In contrast to other common implementations, participants do not bid single price-quantity pairs. Instead, they bid a +price-quantity curve, or “flexibility curve” into their respective markets. Market participants may be both buyers in +one market and sellers in another. + Settling of the market is a “single shot” process that begins with bidding that progresses from the bottom up -and concludes with a clearing of the markets from the top down. This is termed “single shot” because there is no +and concludes with a clearing of the markets from the top down. This is termed “single shot” because there is no iteration required to find the clearing price or quantity at any level of the market structure. -Once the market has cleared, the process begins again for the next market interval, and -new bids are submitted based on the updated states of the agents. + +Once the market has cleared, the process begins again for the next market interval, and new bids are submitted based on +the updated states of the agents. + Requirements ------------ -The Market Service Agent requires the Transitions (version 0.6.9) and NumPy (version 1.15.4) packages. These +The Market Service Agent requires the `Transitions` (version 0.6.9) and `NumPy` (version 1.15.4) packages. These packages can be installed in an activated environment with: -:: +.. code-block:: bash pip install transitions==0.6.9 pip install numpy==1.15.4 + Market Timing -------------- +============= -The MarketServiceAgent is driven by the Director. The Director -drives the MarketServiceAgent through a timed loop. The Director has just a few parameters -that are configured by default with adequate values. They are: +The MarketServiceAgent is driven by the Director. The Director drives the MarketServiceAgent through a timed loop. The +Director has just a few parameters that are configured by default with adequate values. They are: 1. The market_period with a default value of 5 minutes 2. The reservation_delay with a default value of 0 minutes @@ -51,13 +54,13 @@ The timing loop works as follows: * Error messages are published when discovered and usually occur at the end of one of the delays. * The cycle repeats. + How to Use the MarketServiceAgent ================================= A given agent participates in one or more markets by inheriting from the -:ref:`base MarketAgent`. -The base MarketAgent handles all of the communication between the agent and the MarketServiceAgent. -The agent only needs to join each market with the +:ref:`base MarketAgent`. The base MarketAgent handles all of the communication between the +agent and the MarketServiceAgent. The agent only needs to join each market with the :py:meth:`join_market ` method and then respond to the appropriate callback methods. The callback methods are described at the :ref:`base MarketAgent`. diff --git a/docs/source/core_services/drivers/Obix-History-Agent.rst b/docs/source/agent-framework/core-service-agents/obix/obix-history-agent.rst similarity index 65% rename from docs/source/core_services/drivers/Obix-History-Agent.rst rename to docs/source/agent-framework/core-service-agents/obix/obix-history-agent.rst index b34eeb6868..b3511be4eb 100644 --- a/docs/source/core_services/drivers/Obix-History-Agent.rst +++ b/docs/source/agent-framework/core-service-agents/obix/obix-history-agent.rst @@ -1,22 +1,23 @@ -.. _Obix-history: +.. _Obix-History: +================== Obix History Agent ------------------- +================== -The Obix History Agent captures data history data from an Obix RESTful interface and publishes -it to the message bus like a driver for capture by agents and historians. The Agent will setup -its queries to ensure that data is only publishes once. For points queried for the first time -it will go back in time and publish old data as configured. +The Obix History Agent captures data history data from an Obix RESTful interface and publishes it to the message bus +like a driver for capture by agents and historians. The Agent will setup its queries to ensure that data is only +publishes once. For points queried for the first time it will go back in time and publish old data as configured. -The data will be colated into device all publishs automatically and will use a timestamp in the -header based on the timestamps reported by the Obix interface. The publishes will be made in chronological order. +The data will be collated into device `all` publishes automatically and will use a timestamp in the header based on the +timestamps reported by the Obix interface. The publishes will be made in chronological order. Units data is automatically read from the device. For sending commands to devices see :ref:`Obix-config`. + Agent Configuration -******************* +=================== There are three arguments for the **driver_config** section of the device configuration file: @@ -30,7 +31,7 @@ There are three arguments for the **driver_config** section of the device config Here is an example device configuration file: -.. code-block:: json +:: { "url": "http://example.com/obix/histories/EXAMPLE/", @@ -48,22 +49,26 @@ Here is an example device configuration file: A sample Obix configuration file can be found in the VOLTTRON repository in ``services/core/ObixHistoryPublish/config`` + Registry Configuration File -*************************** +=========================== Similar to a driver the Obix History Agent requires a registry file to select the points to publish. -The registry configuration file is a `CSV `_ file. Each row configures a point on the device. +The registry configuration file is a `CSV `_ file. Each row +configures a point on the device. The following columns are required for each row: - **Device Name** - Name of the device to associate with this point. - - **Volttron Point Name** - The Volttron Point name to use when publishing this value. - - **Obix Name** - Name of the point on the obix interface. Escaping of spaces and dashes for use with the interface is handled internaly. + - **Volttron Point Name** - The VOLTTRON Point name to use when publishing this value. + - **Obix Name** - Name of the point on the Obix interface. Escaping of spaces and dashes for use with the interface + is handled internally. -Any additional columns will be ignored. It is common practice to include a **Notes** or **Unit Details** for additional information about a point. +Any additional columns will be ignored. It is common practice to include a `Notes` or `Unit Details` for additional +information about a point. -The following is an example of a Obix History Agent registry confugration file: +The following is an example of a Obix History Agent registry configuration file: .. csv-table:: Obix :header: Device Name,Volttron Point Name,Obix Name @@ -74,25 +79,30 @@ The following is an example of a Obix History Agent registry confugration file: device2,Boiler Plant Hourly Gas Usage,Boiler Plant Hourly Gas Usage device3,CG-1 Water Flow H-1,CG-1 Water Flow H-1 -A sample Obix History Agent configuration can be found in the VOLTTRON repository in ``services/core/ObixHistoryPublish/registry_config.csv`` +A sample Obix History Agent configuration can be found in the VOLTTRON repository in +`services/core/ObixHistoryPublish/registry_config.csv` + .. _Obix-History-AutoConfiguration: Automatic Obix Configuration File Creation -****************************************** -A script that will automatically create both a device and register -configuration file for a site is located in the repository at ``scripts/obix/get_obix_history_config.py``. +========================================== + +A script that will automatically create both a device and register configuration file for a site is located in the +repository at `scripts/obix/get_obix_history_config.py`. The utility is invoked with the command: - ``python get_obix_history_config.py -u -p -d `` +.. code-block:: bash + + python get_obix_history_config.py -u -p -d If either the registry_file or driver_file is omitted the script will output those files to stdout. -If either the username or password options are left out the script will ask for them on the command line before proceeding. +If either the username or password options are left out the script will ask for them on the command line before +proceeding. The device name option specifies a default device for every point in the configuration. -The registry file produced by this script assumes that the `Volttron Point Name` and the `Obix Name` have the same value. -Also, it is assumed that all points should be read only. Users are expected to fix this as appropriate. - +The registry file produced by this script assumes that the `Volttron Point Name` and the `Obix Name` have the same +value. Also, it is assumed that all points should be read only. Users are expected to fix this as appropriate. diff --git a/docs/source/agent-framework/core-service-agents/openadr-ven/ven-agent.rst b/docs/source/agent-framework/core-service-agents/openadr-ven/ven-agent.rst new file mode 100644 index 0000000000..5f14dcba5c --- /dev/null +++ b/docs/source/agent-framework/core-service-agents/openadr-ven/ven-agent.rst @@ -0,0 +1,443 @@ +.. _OpenADR-VEN-Agent: + +====================== +OpenADR 2.0b VEN Agent +====================== + +OpenADR (Automated Demand Response) is a standard for alerting and responding to the need to adjust electric power +consumption in response to fluctuations in grid demand. + +OpenADR communications are conducted between Virtual Top Nodes (VTNs) and Virtual End Nodes (VENs). In this +implementation a VOLTTRON agent, the VEN agent, acts as a VEN, communicating with its VTN by means of EIEvent and +EIReport services in conformance with a subset of the OpenADR 2.0b specification. This document's +`VOLTTRON Interface `_ section defines how the VEN agent relays information to, +and receives data from, other VOLTTRON agents. + +The OpenADR 2.0b specification (http://www.openadr.org/specification) is available from the OpenADR Alliance. This +implementation also generally follows the DR program characteristics of the Capacity Program described in Section 9.2 +of the OpenADR Program Guide (http://www.openadr.org/assets/openadr_drprogramguide_v1.0.pdf). + + +DR Capacity Bidding and Events +============================== + +The OpenADR Capacity Bidding program relies on a pre-committed agreement about the VEN’s load shed capacity. This +agreement is reached in a bidding process transacted outside of the OpenADR interaction, typically with a long-term +scope, perhaps a month or longer. The VTN can “call an event,” indicating that a load-shed event should occur +in conformance with this agreement. The VTN indicates the level of load shedding desired, when the event should occur, +and for how long. The VEN responds with an `optIn` acknowledgment. (It can also `optOut`, but since it has been +pre-committed, an `optOut` may incur penalties.) + + +Reporting +--------- + +The VEN agent reports device status and usage telemetry to the VTN, relying on information received periodically from +other VOLTTRON agents. + + +General Approach +================ + +Events: + +- The VEN agent maintains a persistent record of DR events. +- Event updates (including creation) trigger publication of event JSON on the VOLTTRON message bus. +- Other VOLTTRON agents can also call a get_events() RPC to retrieve the current status of + particular events, or of all active events. + +Reporting: + +- The VEN agent configuration defines telemetry values (data points) that can be reported to the VTN. +- The VEN agent maintains a persistent record of telemetry values over time. +- Other VOLTTRON agents are expected to call report_telemetry() to supply the VEN agent + with a regular stream of telemetry values for reporting. +- Other VOLTTRON agents can receive notification of changes in telemetry reporting + requirements by subscribing to publication of telemetry parameters. + + +VEN Agent VOLTTRON Interface +============================ + +The VEN agent implements the following VOLTTRON PubSub and RPC calls. + +PubSub: event update + +.. code-block:: python + + def publish_event(self, an_event): + """ + Publish an event. + + When an event is created/updated, it is published to the VOLTTRON bus + with a topic that includes 'openadr/event_update'. + + Event JSON structure: + { + "event_id" : String, + "creation_time" : DateTime, + "start_time" : DateTime, + "end_time" : DateTime or None, + "signals" : String, # Values: json string describing one or more signals. + "status" : String, # Values: unresponded, far, near, active, + # completed, canceled. + "opt_type" : String # Values: optIn, optOut, none. + } + + If an event status is 'unresponded', the VEN agent is awaiting a decision on + whether to optIn or optOut. The downstream agent that subscribes to this PubSub + message should communicate that choice to the VEN agent by calling respond_to_event() + (see below). The VEN agent then relays the choice to the VTN. + + @param an_event: an EiEvent. + """ + +PubSub: telemetry parameters update + +.. code-block:: python + + def publish_telemetry_parameters_for_report(self, report): + """ + Publish telemetry parameters. + + When the VEN agent telemetry reporting parameters have been updated (by the VTN), + they are published with a topic that includes 'openadr/telemetry_parameters'. + If a particular report has been updated, the reported parameters are for that report. + + Telemetry parameters JSON example: + { + "telemetry": { + "baseline_power_kw": { + "r_id": "baseline_power", + "frequency": "30", + "report_type": "baseline", + "reading_type": "Mean", + "method_name": "get_baseline_power" + } + "current_power_kw": { + "r_id": "actual_power", + "frequency": "30", + "report_type": "reading", + "reading_type": "Mean", + "method_name": "get_current_power" + } + "manual_override": "False", + "report_status": "active", + "online": "False", + } + } + + The above example indicates that, for reporting purposes, telemetry values + for baseline_power and actual_power should be updated -- via report_telemetry() -- at + least once every 30 seconds. + + Telemetry value definitions such as baseline_power and actual_power come from the + agent configuration. + + @param report: (EiReport) The report whose parameters should be published. + """ + +RPC calls: + +.. code-block:: python + + @RPC.export + def respond_to_event(self, event_id, opt_in_choice=None): + """ + Respond to an event, opting in or opting out. + + If an event's status=unresponded, it is awaiting this call. + When this RPC is received, the VENAgent sends an eventResponse to + the VTN, indicating whether optIn or optOut has been chosen. + If an event remains unresponded for a set period of time, + it times out and automatically optsIn to the event. + + Since this call causes a change in the event's status, it triggers + a PubSub call for the event update, as described above. + + @param event_id: (String) ID of an event. + @param opt_in_choice: (String) 'OptIn' to opt into the event, anything else is treated as 'OptOut'. + """ + +.. code-block:: python + + @RPC.export + def get_events(self, event_id=None, in_progress_only=True, started_after=None, end_time_before=None): + """ + Return a list of events as a JSON string. + + Sample request: + self.get_events(started_after=utils.get_aware_utc_now() - timedelta(hours=1), + end_time_before=utils.get_aware_utc_now()) + + Return a list of events. + + By default, return only event requests with status=active or status=unresponded. + + If an event's status=active, a DR event is currently in progress. + + @param event_id: (String) Default None. + @param in_progress_only: (Boolean) Default True. + @param started_after: (DateTime) Default None. + @param end_time_before: (DateTime) Default None. + @return: (JSON) A list of events -- see 'PubSub: event update'. + """ + +.. code-block:: python + + @RPC.export + def get_telemetry_parameters(self): + """ + Return the VEN agent's current set of telemetry parameters. + + @return: (JSON) Current telemetry parameters -- see 'PubSub: telemetry parameters update'. + """ + +.. code-block:: python + + @RPC.export + def set_telemetry_status(self, online, manual_override): + """ + Update the VEN agent's reporting status. + + Set these properties to either 'TRUE' or 'FALSE'. + + @param online: (Boolean) Whether the VEN agent's resource is online. + @param manual_override: (Boolean) Whether resource control has been overridden. + """ + +.. code-block:: python + + @RPC.export + def report_telemetry(self, telemetry): + """ + Receive an update of the VENAgent's report metrics, and store them in the agent's database. + + Examples of telemetry are: + { + 'baseline_power_kw': '15.2', + 'current_power_kw': '371.1', + 'start_time': '2017-11-21T23:41:46.051405', + 'end_time': '2017-11-21T23:42:45.951405' + } + + @param telemetry_values: (JSON) Current value of each report metric, with reporting-interval start/end. + """ + + +PubSub: Event Update +-------------------- + +When an event is created/updated, the event is published with a topic that includes `openadr/event/{ven_id}`. + +Event JSON structure: + +:: + + { + "event_id" : String, + "creation_time" : DateTime - UTC, + "start_time" : DateTime - UTC, + "end_time" : DateTime - UTC, + "priority" : Integer, # Values: 0, 1, 2, 3. Usually expected to be 1. + "signals" : String, # Values: json string describing one or more signals. + "status" : String, # Values: unresponded, far, near, active, completed, canceled. + "opt_type" : String # Values: optIn, optOut, none. + } + +If an event status is 'unresponded', the VEN is awaiting a decision on whether to `optIn` or `optOut`. The downstream +agent that subscribes to this `PubSub` message should communicate that choice to the VEN by calling respond_to_event() +(see below). The VEN then relays the choice to the VTN. + + +PubSub: Telemetry Parameters Update +----------------------------------- + +When the VEN telemetry reporting parameters have been updated (by the VTN), they are published with a topic that +includes `openadr/status/{ven_id}`. + +These parameters include state information about the current report. + +Telemetry parameters structure: + +:: + + { + 'telemetry': '{ + "baseline_power_kw": { + "r_id" : "baseline_power", # ID of the reporting metric + "report_type" : "baseline", # Type of reporting metric, e.g. baseline or reading + "reading_type" : "Direct Read", # (per OpenADR telemetry_usage report requirements) + "units" : "powerReal", # (per OpenADR telemetry_usage reoprt requirements) + "method_name" : "get_baseline_power", # Name of the VEN agent method that gets the metric + "min_frequency" : (Integer), # Data capture frequency in seconds (minimum) + "max_frequency" : (Integer) # Data capture frequency in seconds (maximum) + }, + "current_power_kw": { + "r_id" : "actual_power", # ID of the reporting metric + "report_type" : "reading", # Type of reporting metric, e.g. baseline or reading + "reading_type" : "Direct Read", # (per OpenADR telemetry_usage report requirements) + "units" : "powerReal", # (per OpenADR telemetry_usage report requirements) + "method_name" : "get_current_power", # Name of the VEN agent method that gets the metric + "min_frequency" : (Integer), # Data capture frequency in seconds (minimum) + "max_frequency" : (Integer) # Data capture frequency in seconds (maximum) + } + }' + 'report parameters': '{ + "status" : (String), # active, inactive, completed, or cancelled + "report_specifier_id" : "telemetry", # ID of the report definition + "report_request_id" : (String), # ID of the report request; supplied by the VTN + "request_id" : (String), # Request ID of the most recent VTN report modification + "interval_secs" : (Integer), # How often a report update is sent to the VTN + "granularity_secs" : (Integer), # How often a report update is sent to the VTN + "start_time" : (DateTime - UTC), # When the report started + "end_time" : (DateTime - UTC), # When the report is scheduled to end + "last_report" : (DateTime - UTC), # When a report update was last sent + "created_on" : (DateTime - UTC) # When this set of information was recorded in the VEN db + }', + 'manual_override' : (Boolean) # VEN manual override status, as supplied by Control Agent + 'online' : (Boolean) # VEN online status, as supplied by Control Agent + } + +Telemetry value definitions such as `baseline_power_kw` and `current_power_kw` come from the VEN agent config. + + +.. _OpenADR-VEN-Agent-Config: + +OpenADR VEN Agent: Installation and Configuration +================================================= + +The VEN agent can be configured, built and launched using the VOLTTRON agent installation process described in +http://volttron.readthedocs.io/en/develop/devguides/agent_development/Agent-Development.html#agent-development. + +The VEN agent depends on some third-party libraries that are not in the standard VOLTTRON installation. They should be +installed in the VOLTTRON virtual environment prior to building the agent: + +.. code-block:: bash + + (volttron) $ cd $VOLTTRON_ROOT/services/core/OpenADRVenAgent + (volttron) $ pip install -r requirements.txt + +where :term:`$VOLTTRON_ROOT ` is the base directory of the cloned VOLTTRON code repository. + +The VEN agent is designed to work in tandem with a “control agent,” another VOLTTRON agent that uses VOLTTRON RPC calls +to manage events and supply report data. A sample control agent has been provided in the `test/ControlAgentSim` +subdirectory under OpenADRVenAgent. + +The VEN agent maintains a persistent store of event and report data in ``$VOLTTRON_HOME/data/openadr.sqlite``. Some +care should be taken in managing the disk consumption of this data store. If no events or reports are active, it is +safe to take down the VEN agent and delete the file; the persistent store will be reinitialized automatically on agent +startup. + + +Configuration Parameters +------------------------ + +The VEN agent’s configuration file contains JSON that includes several parameters for configuring VTN server +communications and other behavior. A sample configuration file, `config`, has been provided in the agent +directory. + +The VEN agent supports the following configuration parameters: + +========================= ======================== ==================================================== +Parameter Example Description +========================= ======================== ==================================================== +db_path “$VOLTTRON_HOME/data/ Pathname of the agent's sqlite database. Shell + openadr.sqlite” variables will be expanded if they are present + in the pathname. +ven_id “0” The OpenADR ID of this virtual end node. Identifies + this VEN to the VTN. If automated VEN registration + is used, the ID is assigned by the VTN at that + time. If the VEN is registered manually with the + VTN (i.e., via configuration file settings), then + a common VEN ID should be entered in this config + file and in the VTN's site definition. +ven_name "ven01" Name of this virtual end node. This name is used + during automated registration only, identiying + the VEN before its VEN ID is known. +vtn_id “vtn01” OpenADR ID of the VTN with which this VEN + communicates. +vtn_address “http://openadr-vtn. URL and port number of the VTN. + ki-evi.com:8000” +send_registration “False” (“True” or ”False”) If “True”, the VEN sends + a one-time automated registration request to + the VTN to obtain the VEN ID. If automated + registration will be used, the VEN should be run + in this mode initially, then shut down and run + with this parameter set to “False” thereafter. +security_level “standard” If 'high', the VTN and VEN use a third-party + signing authority to sign and authenticate each + request. The default setting is “standard”: the + XML payloads do not contain Signature elements. +poll_interval_secs 30 (integer) How often the VEN should send an OadrPoll + request to the VTN. The poll interval cannot be + more frequent than the VEN’s 5-second process + loop frequency. +log_xml “False” (“True” or “False”) Whether to write each + inbound/outbound request’s XML data to the + agent's log. +opt_in_timeout_secs 1800 (integer) How long to wait before making a + default optIn/optOut decision. +opt_in_default_decision “optOut” (“True” or “False”) Which optIn/optOut choice + to make by default. +request_events_on_startup "False" ("True" or "False") Whether to ask the VTN for a + list of current events during VEN startup. +report_parameters (see below) A dictionary of definitions of reporting/telemetry + parameters. +========================= ======================== ==================================================== + + +Reporting Configuration +----------------------- + +The VEN’s reporting configuration, specified as a dictionary in the agent configuration, defines each telemetry element +(metric) that the VEN can report to the VTN, if requested. By default, it defines reports named “telemetry” and +"telemetry_status", with a report configuration dictionary containing the following parameters: + +======================================================= =========================== ==================================================== +"telemetry" report: parameters Example Description +======================================================= =========================== ==================================================== +report_name "TELEMETRY_USAGE" Friendly name of the report. +report_name_metadata "METADATA_TELEMETRY_USAGE" Friendly name of the report’s metadata, when sent + by the VEN’s oadrRegisterReport request. +report_specifier_id "telemetry" Uniquely identifies the report’s data set. +report_interval_secs_default "300" How often to send a reporting update to the VTN. +telemetry_parameters (baseline_power_kw): r_id "baseline_power" (baseline_power) Unique ID of the metric. +telemetry_parameters (baseline_power_kw): report_type "baseline" (baseline_power) The type of metric being reported. +telemetry_parameters (baseline_power_kw): reading_type "Direct Read" (baseline_power) How the metric was calculated. +telemetry_parameters (baseline_power_kw): units "powerReal" (baseline_power) The reading's data type. +telemetry_parameters (baseline_power_kw): method_name "get_baseline_power" (baseline_power) The VEN method to use when + extracting the data for reporting. +telemetry_parameters (baseline_power_kw): min_frequency 30 (baseline_power) The metric’s minimum sampling + frequency. +telemetry_parameters (baseline_power_kw): max_frequency 60 (baseline_power) The metric’s maximum sampling + frequency. +telemetry_parameters (current_power_kw): r_id "actual_power" (current_power) Unique ID of the metric. +telemetry_parameters (current_power_kw): report_type "reading" (current_power) The type of metric being reported. +telemetry_parameters (current_power_kw): reading_type "Direct Read" (current_power) How the metric was calculated. +telemetry_parameters (current_power_kw): units "powerReal" (baseline_power) The reading's data type. +telemetry_parameters (current_power_kw): method_name "get_current_power" (current_power) The VEN method to use when + extracting the data for reporting. +telemetry_parameters (current_power_kw): min_frequency 30 (current_power) The metric’s minimum sampling + frequency. +telemetry_parameters (current_power_kw): max_frequency 60 (current_power) The metric’s maximum sampling + frequency. +======================================================= =========================== ==================================================== + +======================================================= =========================== ==================================================== +"telemetry_status" report: parameters Example Description +======================================================= =========================== ==================================================== +report_name "TELEMETRY_STATUS" Friendly name of the report. +report_name_metadata "METADATA_TELEMETRY_STATUS" Friendly name of the report’s metadata, when sent + by the VEN’s oadrRegisterReport request. +report_specifier_id "telemetry_status" Uniquely identifies the report’s data set. +report_interval_secs_default "300" How often to send a reporting update to the VTN. +telemetry_parameters (Status): r_id "Status" Unique ID of the metric. +telemetry_parameters (Status): report_type "x-resourceStatus" The type of metric being reported. +telemetry_parameters (Status): reading_type "x-notApplicable" How the metric was calculated. +telemetry_parameters (Status): units "" The reading's data type. +telemetry_parameters (Status): method_name "" The VEN method to use when extracting the data + for reporting. +telemetry_parameters (Status): min_frequency 60 The metric’s minimum sampling frequency. +telemetry_parameters (Status): max_frequency 120 The metric’s maximum sampling frequency. +======================================================= =========================== ==================================================== diff --git a/docs/source/agent-framework/core-service-agents/platform-driver/global-override-specification.rst b/docs/source/agent-framework/core-service-agents/platform-driver/global-override-specification.rst new file mode 100644 index 0000000000..cce0fcce23 --- /dev/null +++ b/docs/source/agent-framework/core-service-agents/platform-driver/global-override-specification.rst @@ -0,0 +1,71 @@ +.. _Global-Override-Specification: + +============================= +Global Override Specification +============================= + +This document describes the specification for the global override feature. By default, every user is allowed write +access to the devices by the platform driver. The override feature will allow the user (for example, a building +administrator) to override this default behavior and enable the user to lock the write access on the devices for a +specified duration of time or indefinitely. + + +Functional Capabilities +======================= + +1. User shall be able to specify the following when turning on the override behavior on the devices. + + * Override pattern examples: + + * If pattern is ``campus/building1/*`` - Override condition is turned on for all the devices under + `campus/building1/`. + + * If pattern is ``campus/building1/ahu1`` - Override condition is turned on for only `campus/building1/ahu1` + + * The pattern matching shall use bash style filename matching semantics. + + * Time duration over which override behavior is applicable - If the time duration is negative, then the override + condition is applied indefinitely. + + * Optional `revert-to-fail-safe-state` flag - If the flag is set, the platform driver shall set all the set points + falling under the override condition to its default state/value immediately. This is to ensure that the devices + are in fail-safe state when the override/lock feature is removed. If the flag is not set, the device state/value + is untouched. + + * Optional staggered revert flag - If this flag is set, reverting of devices will be staggered. + +2. User shall be able to disable/turn off the override behavior on devices by specifying: + + * Pattern on which the override/lock feature has be disabled. (example: ``campus/building/\*``) + +3. User shall be able to get a list of all the devices with the override condition set. + +4. User shall be able to get a list of all the override patterns that are currently active. + +5. User shall be able to clear all the overrides. + +6. Any changes to override patterns list shall be stored in the config store. On startup, list of override patterns and + corresponding end times are retrieved from the config store. If the end time is indefinite or greater than current + time for any pattern, then override is set on the matching devices for remaining duration of time. + +7. Whenever a device is newly configured, a check is made to see if it is part of the overridden patterns. If yes, it + is added to list of overridden devices. + +8. When a device is being removed, a check is made to see if it is part of the overridden devices. If yes, it is + removed from the list of overridden devices. + + +Driver RPC Methods +****************** + +- *set_override_on(pattern, duration=0.0, failsafe_revert=True, staggered_revert=True)* - Turn on override condition on all the devices matching the pattern. Time duration for the override condition has to be in seconds. For indefinite duration, the time duration has to be <= 0.0. + +- *set_override_off(pattern)* - Turn off override condition on all the devices matching the pattern. The specified + pattern will be removed from the override patterns list. All the devices falling under the given pattern will be + removed from the list of overridden devices. + +- *get_override_devices()* - Get a list of all the devices with override condition. + +- *get_override_patterns()* - Get a list of override patterns that are currently active. + +- *clear_overrides()* - Clear all the overrides. diff --git a/docs/source/agent-framework/core-service-agents/platform-driver/platform-driver-agent.rst b/docs/source/agent-framework/core-service-agents/platform-driver/platform-driver-agent.rst new file mode 100644 index 0000000000..5566db3142 --- /dev/null +++ b/docs/source/agent-framework/core-service-agents/platform-driver/platform-driver-agent.rst @@ -0,0 +1,71 @@ +.. _Platform-Driver-Agent: + +=================== +Platform Driver Agent +=================== + +The Platform Driver Agent manages all device communication. To communicate with devices you must setup and deploy the +Platform Driver Agent. For more information on the Platform Driver Agent's operations, read about the +:ref:`Platform Driver ` in the driver framework docs. + + +.. _Platform-Driver-Config: + +Configuring the Platform Driver +============================= + +The Platform Driver requires a configuration file (described in brief below) to set global settings for all drivers. Once +the user has copied the example or created their own config, the Platform Driver Agent is deployed with this command: + +.. code-block:: bash + + python scripts/install-agent.py -s services/core/PlatformDriverAgent -c + + +Requirements +------------ + +VOLTTRON drivers operated by the platform driver may have additional requirements for installation. +Required libraries: + +:: + + BACnet driver - bacpypes + Modbus driver - pymodbus + Modbus_TK driver - modbus-tk + DNP3 and IEEE 2030.5 drivers - pydnp3 + +The easiest way to install the requirements for drivers included in the VOLTTRON repository is to use ``bootstrap.py`` +(see :ref:`platform installation for more detail `) + +.. code-block:: bash + + python bootstrap.py --drivers + + +Platform Driver Agent Configuration +--------------------------------- + +The Platform Driver Agent configuration consists of general settings for all devices. Below is an example config from the +repository: + +.. code-block:: json + + { + "driver_scrape_interval": 0.05, + "publish_breadth_first_all": false, + "publish_depth_first": false, + "publish_breadth_first": false + } + + +The example platform driver configuration file above can be found in the VOLTTRON repository in +`services/core/PlatformDriverAgent/platform-driver.agent`. + +For information on configuring the Platform Driver with devices, including creating driver configs and using the config +store, please read ref`configuration ` the section in the Driver Framework docs. + + +.. toctree:: + + global-override-specification diff --git a/docs/source/devguides/walkthroughs/files/01-add-devices.png b/docs/source/agent-framework/core-service-agents/volttron-central/files/01-add-devices.png similarity index 100% rename from docs/source/devguides/walkthroughs/files/01-add-devices.png rename to docs/source/agent-framework/core-service-agents/volttron-central/files/01-add-devices.png diff --git a/docs/source/devguides/walkthroughs/files/02-install-devices.png b/docs/source/agent-framework/core-service-agents/volttron-central/files/02-install-devices.png similarity index 100% rename from docs/source/devguides/walkthroughs/files/02-install-devices.png rename to docs/source/agent-framework/core-service-agents/volttron-central/files/02-install-devices.png diff --git a/docs/source/devguides/walkthroughs/files/03-start-scan.png b/docs/source/agent-framework/core-service-agents/volttron-central/files/03-start-scan.png similarity index 100% rename from docs/source/devguides/walkthroughs/files/03-start-scan.png rename to docs/source/agent-framework/core-service-agents/volttron-central/files/03-start-scan.png diff --git a/docs/source/devguides/walkthroughs/files/04-devices-found.png b/docs/source/agent-framework/core-service-agents/volttron-central/files/04-devices-found.png similarity index 100% rename from docs/source/devguides/walkthroughs/files/04-devices-found.png rename to docs/source/agent-framework/core-service-agents/volttron-central/files/04-devices-found.png diff --git a/docs/source/devguides/walkthroughs/files/05-get-device-points.png b/docs/source/agent-framework/core-service-agents/volttron-central/files/05-get-device-points.png similarity index 100% rename from docs/source/devguides/walkthroughs/files/05-get-device-points.png rename to docs/source/agent-framework/core-service-agents/volttron-central/files/05-get-device-points.png diff --git a/docs/source/devguides/walkthroughs/files/07-edit-points.png b/docs/source/agent-framework/core-service-agents/volttron-central/files/07-edit-points.png similarity index 100% rename from docs/source/devguides/walkthroughs/files/07-edit-points.png rename to docs/source/agent-framework/core-service-agents/volttron-central/files/07-edit-points.png diff --git a/docs/source/devguides/walkthroughs/files/07-select-point-a.png b/docs/source/agent-framework/core-service-agents/volttron-central/files/07-select-point-a.png similarity index 100% rename from docs/source/devguides/walkthroughs/files/07-select-point-a.png rename to docs/source/agent-framework/core-service-agents/volttron-central/files/07-select-point-a.png diff --git a/docs/source/devguides/walkthroughs/files/07-select-point-b.png b/docs/source/agent-framework/core-service-agents/volttron-central/files/07-select-point-b.png similarity index 100% rename from docs/source/devguides/walkthroughs/files/07-select-point-b.png rename to docs/source/agent-framework/core-service-agents/volttron-central/files/07-select-point-b.png diff --git a/docs/source/devguides/walkthroughs/files/07-select-point-c.png b/docs/source/agent-framework/core-service-agents/volttron-central/files/07-select-point-c.png similarity index 100% rename from docs/source/devguides/walkthroughs/files/07-select-point-c.png rename to docs/source/agent-framework/core-service-agents/volttron-central/files/07-select-point-c.png diff --git a/docs/source/devguides/walkthroughs/files/08-filter-points-button.png b/docs/source/agent-framework/core-service-agents/volttron-central/files/08-filter-points-button.png similarity index 100% rename from docs/source/devguides/walkthroughs/files/08-filter-points-button.png rename to docs/source/agent-framework/core-service-agents/volttron-central/files/08-filter-points-button.png diff --git a/docs/source/devguides/walkthroughs/files/09-filter-set.png b/docs/source/agent-framework/core-service-agents/volttron-central/files/09-filter-set.png similarity index 100% rename from docs/source/devguides/walkthroughs/files/09-filter-set.png rename to docs/source/agent-framework/core-service-agents/volttron-central/files/09-filter-set.png diff --git a/docs/source/devguides/walkthroughs/files/10-clear-filter.png b/docs/source/agent-framework/core-service-agents/volttron-central/files/10-clear-filter.png similarity index 100% rename from docs/source/devguides/walkthroughs/files/10-clear-filter.png rename to docs/source/agent-framework/core-service-agents/volttron-central/files/10-clear-filter.png diff --git a/docs/source/devguides/walkthroughs/files/11-add-new-point.png b/docs/source/agent-framework/core-service-agents/volttron-central/files/11-add-new-point.png similarity index 100% rename from docs/source/devguides/walkthroughs/files/11-add-new-point.png rename to docs/source/agent-framework/core-service-agents/volttron-central/files/11-add-new-point.png diff --git a/docs/source/devguides/walkthroughs/files/12-add-point-dialog.png b/docs/source/agent-framework/core-service-agents/volttron-central/files/12-add-point-dialog.png similarity index 100% rename from docs/source/devguides/walkthroughs/files/12-add-point-dialog.png rename to docs/source/agent-framework/core-service-agents/volttron-central/files/12-add-point-dialog.png diff --git a/docs/source/devguides/walkthroughs/files/13-remove-points-button.png b/docs/source/agent-framework/core-service-agents/volttron-central/files/13-remove-points-button.png similarity index 100% rename from docs/source/devguides/walkthroughs/files/13-remove-points-button.png rename to docs/source/agent-framework/core-service-agents/volttron-central/files/13-remove-points-button.png diff --git a/docs/source/devguides/walkthroughs/files/14-confirm-remove-points.png b/docs/source/agent-framework/core-service-agents/volttron-central/files/14-confirm-remove-points.png similarity index 100% rename from docs/source/devguides/walkthroughs/files/14-confirm-remove-points.png rename to docs/source/agent-framework/core-service-agents/volttron-central/files/14-confirm-remove-points.png diff --git a/docs/source/devguides/walkthroughs/files/15-edit-column-button.png b/docs/source/agent-framework/core-service-agents/volttron-central/files/15-edit-column-button.png similarity index 100% rename from docs/source/devguides/walkthroughs/files/15-edit-column-button.png rename to docs/source/agent-framework/core-service-agents/volttron-central/files/15-edit-column-button.png diff --git a/docs/source/devguides/walkthroughs/files/16-edit-column-menu.png b/docs/source/agent-framework/core-service-agents/volttron-central/files/16-edit-column-menu.png similarity index 100% rename from docs/source/devguides/walkthroughs/files/16-edit-column-menu.png rename to docs/source/agent-framework/core-service-agents/volttron-central/files/16-edit-column-menu.png diff --git a/docs/source/devguides/walkthroughs/files/17-name-column.png b/docs/source/agent-framework/core-service-agents/volttron-central/files/17-name-column.png similarity index 100% rename from docs/source/devguides/walkthroughs/files/17-name-column.png rename to docs/source/agent-framework/core-service-agents/volttron-central/files/17-name-column.png diff --git a/docs/source/devguides/walkthroughs/files/18-duplicated-column.png b/docs/source/agent-framework/core-service-agents/volttron-central/files/18-duplicated-column.png similarity index 100% rename from docs/source/devguides/walkthroughs/files/18-duplicated-column.png rename to docs/source/agent-framework/core-service-agents/volttron-central/files/18-duplicated-column.png diff --git a/docs/source/devguides/walkthroughs/files/19-find-in-column-b.png b/docs/source/agent-framework/core-service-agents/volttron-central/files/19-find-in-column-b.png similarity index 100% rename from docs/source/devguides/walkthroughs/files/19-find-in-column-b.png rename to docs/source/agent-framework/core-service-agents/volttron-central/files/19-find-in-column-b.png diff --git a/docs/source/devguides/walkthroughs/files/19-find-in-column.png b/docs/source/agent-framework/core-service-agents/volttron-central/files/19-find-in-column.png similarity index 100% rename from docs/source/devguides/walkthroughs/files/19-find-in-column.png rename to docs/source/agent-framework/core-service-agents/volttron-central/files/19-find-in-column.png diff --git a/docs/source/devguides/walkthroughs/files/20-replace-in-column.png b/docs/source/agent-framework/core-service-agents/volttron-central/files/20-replace-in-column.png similarity index 100% rename from docs/source/devguides/walkthroughs/files/20-replace-in-column.png rename to docs/source/agent-framework/core-service-agents/volttron-central/files/20-replace-in-column.png diff --git a/docs/source/devguides/walkthroughs/files/21-edit-point-button.png b/docs/source/agent-framework/core-service-agents/volttron-central/files/21-edit-point-button.png similarity index 100% rename from docs/source/devguides/walkthroughs/files/21-edit-point-button.png rename to docs/source/agent-framework/core-service-agents/volttron-central/files/21-edit-point-button.png diff --git a/docs/source/devguides/walkthroughs/files/22-edit-point-dialog.png b/docs/source/agent-framework/core-service-agents/volttron-central/files/22-edit-point-dialog.png similarity index 100% rename from docs/source/devguides/walkthroughs/files/22-edit-point-dialog.png rename to docs/source/agent-framework/core-service-agents/volttron-central/files/22-edit-point-dialog.png diff --git a/docs/source/devguides/walkthroughs/files/23-start-keyboard-commands.png b/docs/source/agent-framework/core-service-agents/volttron-central/files/23-start-keyboard-commands.png similarity index 100% rename from docs/source/devguides/walkthroughs/files/23-start-keyboard-commands.png rename to docs/source/agent-framework/core-service-agents/volttron-central/files/23-start-keyboard-commands.png diff --git a/docs/source/devguides/walkthroughs/files/24-keyboard-highlight.png b/docs/source/agent-framework/core-service-agents/volttron-central/files/24-keyboard-highlight.png similarity index 100% rename from docs/source/devguides/walkthroughs/files/24-keyboard-highlight.png rename to docs/source/agent-framework/core-service-agents/volttron-central/files/24-keyboard-highlight.png diff --git a/docs/source/devguides/walkthroughs/files/25-keyboard-select.png b/docs/source/agent-framework/core-service-agents/volttron-central/files/25-keyboard-select.png similarity index 100% rename from docs/source/devguides/walkthroughs/files/25-keyboard-select.png rename to docs/source/agent-framework/core-service-agents/volttron-central/files/25-keyboard-select.png diff --git a/docs/source/devguides/walkthroughs/files/26-keyboard-shortcuts-button.png b/docs/source/agent-framework/core-service-agents/volttron-central/files/26-keyboard-shortcuts-button.png similarity index 100% rename from docs/source/devguides/walkthroughs/files/26-keyboard-shortcuts-button.png rename to docs/source/agent-framework/core-service-agents/volttron-central/files/26-keyboard-shortcuts-button.png diff --git a/docs/source/devguides/walkthroughs/files/27-keyboard-shortcuts.png b/docs/source/agent-framework/core-service-agents/volttron-central/files/27-keyboard-shortcuts.png similarity index 100% rename from docs/source/devguides/walkthroughs/files/27-keyboard-shortcuts.png rename to docs/source/agent-framework/core-service-agents/volttron-central/files/27-keyboard-shortcuts.png diff --git a/docs/source/devguides/walkthroughs/files/28-save-registry-button.png b/docs/source/agent-framework/core-service-agents/volttron-central/files/28-save-registry-button.png similarity index 100% rename from docs/source/devguides/walkthroughs/files/28-save-registry-button.png rename to docs/source/agent-framework/core-service-agents/volttron-central/files/28-save-registry-button.png diff --git a/docs/source/devguides/walkthroughs/files/29-registry-preview-table.png b/docs/source/agent-framework/core-service-agents/volttron-central/files/29-registry-preview-table.png similarity index 100% rename from docs/source/devguides/walkthroughs/files/29-registry-preview-table.png rename to docs/source/agent-framework/core-service-agents/volttron-central/files/29-registry-preview-table.png diff --git a/docs/source/devguides/walkthroughs/files/30-preview-registry-csv.png b/docs/source/agent-framework/core-service-agents/volttron-central/files/30-preview-registry-csv.png similarity index 100% rename from docs/source/devguides/walkthroughs/files/30-preview-registry-csv.png rename to docs/source/agent-framework/core-service-agents/volttron-central/files/30-preview-registry-csv.png diff --git a/docs/source/devguides/walkthroughs/files/31-name-registry-file.png b/docs/source/agent-framework/core-service-agents/volttron-central/files/31-name-registry-file.png similarity index 100% rename from docs/source/devguides/walkthroughs/files/31-name-registry-file.png rename to docs/source/agent-framework/core-service-agents/volttron-central/files/31-name-registry-file.png diff --git a/docs/source/devguides/walkthroughs/files/32-registry-saved.png b/docs/source/agent-framework/core-service-agents/volttron-central/files/32-registry-saved.png similarity index 100% rename from docs/source/devguides/walkthroughs/files/32-registry-saved.png rename to docs/source/agent-framework/core-service-agents/volttron-central/files/32-registry-saved.png diff --git a/docs/source/devguides/walkthroughs/files/33-configure-device-dialog.png b/docs/source/agent-framework/core-service-agents/volttron-central/files/33-configure-device-dialog.png similarity index 100% rename from docs/source/devguides/walkthroughs/files/33-configure-device-dialog.png rename to docs/source/agent-framework/core-service-agents/volttron-central/files/33-configure-device-dialog.png diff --git a/docs/source/devguides/walkthroughs/files/34-save-device-config.png b/docs/source/agent-framework/core-service-agents/volttron-central/files/34-save-device-config.png similarity index 100% rename from docs/source/devguides/walkthroughs/files/34-save-device-config.png rename to docs/source/agent-framework/core-service-agents/volttron-central/files/34-save-device-config.png diff --git a/docs/source/devguides/walkthroughs/files/35-subdevice-path.png b/docs/source/agent-framework/core-service-agents/volttron-central/files/35-subdevice-path.png similarity index 100% rename from docs/source/devguides/walkthroughs/files/35-subdevice-path.png rename to docs/source/agent-framework/core-service-agents/volttron-central/files/35-subdevice-path.png diff --git a/docs/source/devguides/walkthroughs/files/36-subdevice2.png b/docs/source/agent-framework/core-service-agents/volttron-central/files/36-subdevice2.png similarity index 100% rename from docs/source/devguides/walkthroughs/files/36-subdevice2.png rename to docs/source/agent-framework/core-service-agents/volttron-central/files/36-subdevice2.png diff --git a/docs/source/devguides/walkthroughs/files/37-device-added-b.png b/docs/source/agent-framework/core-service-agents/volttron-central/files/37-device-added-b.png similarity index 100% rename from docs/source/devguides/walkthroughs/files/37-device-added-b.png rename to docs/source/agent-framework/core-service-agents/volttron-central/files/37-device-added-b.png diff --git a/docs/source/devguides/walkthroughs/files/37-device-added.png b/docs/source/agent-framework/core-service-agents/volttron-central/files/37-device-added.png similarity index 100% rename from docs/source/devguides/walkthroughs/files/37-device-added.png rename to docs/source/agent-framework/core-service-agents/volttron-central/files/37-device-added.png diff --git a/docs/source/devguides/walkthroughs/files/38-select-saved-registry-file.png b/docs/source/agent-framework/core-service-agents/volttron-central/files/38-select-saved-registry-file.png similarity index 100% rename from docs/source/devguides/walkthroughs/files/38-select-saved-registry-file.png rename to docs/source/agent-framework/core-service-agents/volttron-central/files/38-select-saved-registry-file.png diff --git a/docs/source/devguides/walkthroughs/files/39-saved-registry-selector.png b/docs/source/agent-framework/core-service-agents/volttron-central/files/39-saved-registry-selector.png similarity index 100% rename from docs/source/devguides/walkthroughs/files/39-saved-registry-selector.png rename to docs/source/agent-framework/core-service-agents/volttron-central/files/39-saved-registry-selector.png diff --git a/docs/source/devguides/walkthroughs/files/40-file-import-button.png b/docs/source/agent-framework/core-service-agents/volttron-central/files/40-file-import-button.png similarity index 100% rename from docs/source/devguides/walkthroughs/files/40-file-import-button.png rename to docs/source/agent-framework/core-service-agents/volttron-central/files/40-file-import-button.png diff --git a/docs/source/devguides/walkthroughs/files/41-reload-points-from-device.png b/docs/source/agent-framework/core-service-agents/volttron-central/files/41-reload-points-from-device.png similarity index 100% rename from docs/source/devguides/walkthroughs/files/41-reload-points-from-device.png rename to docs/source/agent-framework/core-service-agents/volttron-central/files/41-reload-points-from-device.png diff --git a/docs/source/devguides/walkthroughs/files/43-reconfigure-device-button.png b/docs/source/agent-framework/core-service-agents/volttron-central/files/43-reconfigure-device-button.png similarity index 100% rename from docs/source/devguides/walkthroughs/files/43-reconfigure-device-button.png rename to docs/source/agent-framework/core-service-agents/volttron-central/files/43-reconfigure-device-button.png diff --git a/docs/source/devguides/walkthroughs/files/44-reconfiguring-device.png b/docs/source/agent-framework/core-service-agents/volttron-central/files/44-reconfiguring-device.png similarity index 100% rename from docs/source/devguides/walkthroughs/files/44-reconfiguring-device.png rename to docs/source/agent-framework/core-service-agents/volttron-central/files/44-reconfiguring-device.png diff --git a/docs/source/devguides/walkthroughs/files/45-reconfigure-option-selector.png b/docs/source/agent-framework/core-service-agents/volttron-central/files/45-reconfigure-option-selector.png similarity index 100% rename from docs/source/devguides/walkthroughs/files/45-reconfigure-option-selector.png rename to docs/source/agent-framework/core-service-agents/volttron-central/files/45-reconfigure-option-selector.png diff --git a/docs/source/devguides/walkthroughs/files/46-reconfigure-device-config.png b/docs/source/agent-framework/core-service-agents/volttron-central/files/46-reconfigure-device-config.png similarity index 100% rename from docs/source/devguides/walkthroughs/files/46-reconfigure-device-config.png rename to docs/source/agent-framework/core-service-agents/volttron-central/files/46-reconfigure-device-config.png diff --git a/docs/source/devguides/walkthroughs/files/47-file-export-button.png b/docs/source/agent-framework/core-service-agents/volttron-central/files/47-file-export-button.png similarity index 100% rename from docs/source/devguides/walkthroughs/files/47-file-export-button.png rename to docs/source/agent-framework/core-service-agents/volttron-central/files/47-file-export-button.png diff --git a/docs/source/agent-framework/core-service-agents/volttron-central/files/connect_vc_to_vcp.png b/docs/source/agent-framework/core-service-agents/volttron-central/files/connect_vc_to_vcp.png new file mode 100644 index 0000000000..16b9db0977 Binary files /dev/null and b/docs/source/agent-framework/core-service-agents/volttron-central/files/connect_vc_to_vcp.png differ diff --git a/docs/source/agent-framework/core-service-agents/volttron-central/files/vc_vcp_connection_overview.png b/docs/source/agent-framework/core-service-agents/volttron-central/files/vc_vcp_connection_overview.png new file mode 100644 index 0000000000..288428690a Binary files /dev/null and b/docs/source/agent-framework/core-service-agents/volttron-central/files/vc_vcp_connection_overview.png differ diff --git a/docs/source/agent-framework/core-service-agents/volttron-central/vc-device-configuration-demo.rst b/docs/source/agent-framework/core-service-agents/volttron-central/vc-device-configuration-demo.rst new file mode 100644 index 0000000000..7a6758c497 --- /dev/null +++ b/docs/source/agent-framework/core-service-agents/volttron-central/vc-device-configuration-demo.rst @@ -0,0 +1,402 @@ +.. _Device-Configuration-in-VOLTTRON-Central: + +======================================== +Device Configuration in VOLTTRON Central +======================================== + +Devices in your network can be detected and configured through the VOLTTRON Central UI. The current version of VOLTTRON +enables device detection and configuration for BACnet devices. The following sections describe the processes involved +with performing scans to detect physical devices and get their points, and configuring them as virtual devices installed +on VOLTTRON instances. + +- `Launching Device Configuration `__ +- `Scanning for Devices `__ +- `Scanning for Points `__ +- `Registry Configuration File `__ +- `Additional Attributes `__ +- `Quick Edit Features `__ +- `Keyboard Commands `__ +- `Registry Preview `__ +- `Registry Configuration Options `__ +- `Reloading Device Points `__ +- `Device Configuration Form `__ +- `Configuring Sub-devices `__ +- `Reconfiguring Devices `__ +- `Exporting Registry Configuration Files `__ + + +Launching Device Configuration +------------------------------ + +To begin device configuration in VOLTTRON Central, extend the side panel on the left and find the cogs button next to +the platform instance you want to add a device to. Click the cogs button to launch the device configuration feature. + +|Add Devices| + +|Install Devices| + +Currently the only method of adding devices is to conduct a scan to detect BACnet devices. A BACnet Proxy Agent must be +running in order to do the scan. If more than one BACnet Proxy is installed on the platform, choose the one that will +be used for the scan. + +The scan can be conducted using default settings that will search for all physical devices on the network. However, +optional settings can be used to focus on specific devices or change the duration of the scan. Entering a range of +device IDs will limit the scan to return only devices with IDs in that range. Advanced options include the ability to +specify the IP address of a device to detect as well as the ability to change the duration of the scan from the default +of five seconds. + + +Scanning for Devices +-------------------- + +To start the scan, click the large cog button to the right of the scan settings. + +|Start Scan| + +Devices that are detected will appear in the space below the scan settings. Scanning can be repeated at any time by +clicking the large cog button again. + +|Devices Found| + + +Scanning for Points +------------------- + +Another scan can be performed on each physical device to retrieve its available points. This scan is initiated by +clicking the triangle next to the device in the list. The first time the arrow is clicked, it initiates the scan. +After the points are retrieved, the arrow becomes a hide-and-show toggle button and won't re-initiate scanning the +device. + +|Get Device Points| + +After the points have been retrieved once, the only way to scan the same device for points again is to relaunch the +device configuration process from the start by clicking on the small cogs button next to the platform instance in the +panel tree. + + +Registry Configuration File +^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +The registry configuration determines which points on the physical device will be associated with the virtual device +that uses that particular registry configuration. The registry configuration determines which points' data will be +published to the message bus and recorded by the historian, and it determines how the data will be presented. + +When all the points on the device have been retrieved, the points are loaded into the registry configuration editor. +There, the points can be modified and selected to go into the registry configuration file for a device. + +Each row in the registry configuration editor represents a point, and each cell in the row represents an attribute of +the point. + +Only points that have been selected will be included in the registry configuration file. To select a point, check the +box next to the point in the editor. + +|Select Point Before| + +|Select Point During| + +|Select Point After| + +Type directly in a cell to change an attribute value for a point. + +|Edit Points| + + +Additional Attributes +--------------------- + +The editor's default view shows the attributes that are most likely to be changed during configuration: the VOLTTRON +point name, the writable setting, and the units. Other attributes are present but not shown in the default view. To +see the entire set of attributes for a point, click the `Edit Point` button (the three dots) at the end of the point +row. + +|Edit Point Button| + +In the window that opens, point attributes can be changed by typing in the fields and clicking the Apply button. + +|Edit Point Dialog| + +Checking or unchecking the `Show in Table` box for an attribute will add or remove it as a column in the registry +configuration editor. + + +Quick Edit Features +------------------- + +Several quick-edit features are available in the registry configuration editor. + +The list of points can be filtered based on values in the first column by clicking the filter button in the first +column's header and entering a filter term. + +|Filter Points Button| + +|Filter Set| + +The filter feature allows points to be edited, selected, or deselected more quickly by narrowing down potentially large +lists of points. However, the filter doesn't select points, and if the registry configuration is saved while a filter +is applied, any selected points not included in the filter will still be included in the registry file. + +To clear the filter, click on the `Clear Filter` button in the filter popup. + +|Clear Filter| + +To add a new point to the points listed in the registry configuration editor, click on the `Add Point` button in the +header of the first column. + +|Add New Point| + +|Add Point Dialog| + +Provide attribute values, and click the `Apply` button to add the new point, which will be appended to the bottom of the +list. + +To remove points from the list, select the points and click the `Remove Points` button in the header of the first +column. + +|Remove Points| + +|Confirm Remove Points| + +Each column has an `Edit Column` button in its header. + +|Edit Columns| + +Click on the button to display a popup menu of operations to perform on the column. The options include inserting a +blank new column, duplicating an existing column, removing a column, or searching for a value within a column. + +|Edit Column Menu| + +A duplicate or new column has to be given a unique name. + +|Name Column| + +|Duplicated Column| + +To search for values in a column, choose the `Find and Replace` option in the popup menu. + +|Find in Column| + +Type the term to search for, and click the `Find Next` button to highlight all the matched fields in the column. + +|Find Next| + +Click the `Find Next` button again to advance the focus down the list of matched terms. + +To quickly replace the matched term in the cell with focus, type a replacement term, and click on the `Replace` button. + +|Replace in Column| + +To replace all the matched terms in the column, click on the `Replace All` button. Click the `Clear Search` button to +end the search. + + +Keyboard Commands +----------------- + +Some keyboard commands are available to expedite the selection or de-selection of points. To initiate use of the +keyboard commands, strike the `Control` key on the keyboard. For keyboard commands to be activated, the registry +configuration editor has to have focus, which comes from interacting with it. But the commands won't be activated if +the cursor is in a type-able field. + +If the keyboard commands have been successfully activated, a faint highlight will appear over the first row in the +registry configuration editor. + +|Start Keyboard Commands| + +Keyboard commands are deactivated when the mouse cursor moves over the configuration editor. If unintentional +deactivation occurs, strike the `Control` key again to reactivate the commands. + +With keyboard commands activated, the highlighted row can be advanced up or down by striking the `up` or `down arrow` on +the keyboard. A group of rows can be highlighted by striking the up or down arrow while holding down the `Shift` key. + +|Keyboard Highlight| + +To select the highlighted rows, strike the `Enter` key. + +|Keyboard Select| + +Striking the `Enter` key with rows highlighted will also deselect any rows that were already selected. + +Click on the `Keyboard Shortcuts` button to show a popup list of the available keyboard commands. + +|Keyboard Shortcuts Button| + +|Keyboard Shortcuts| + + +Registry Preview +---------------- + +To save the registry configuration, click the `Save` button at the bottom of the registry configuration editor. + +|Save Registry Button| + +A preview will appear to let you confirm that the configuration is what you intended. + +|Registry Preview Table| + +The configuration also can be inspected in the comma-separated format of the actual registry configuration file. + +|Registry Preview CSV| + +Provide a name for the registry configuration file, and click the `Save` button to save the file. + +|Name Registry File| + +|Registry Saved| + + +Registry Configuration Options +------------------------------ + +Different subsets of configured points can be saved from the same physical device and used to create separate registry +files for multiple virtual devices and sub-devices. Likewise, a single registry file can be reused by multiple virtual +devices and sub-devices. + +To reuse a previously saved registry file, click on the `Select Registry File (CSV)` button at the end of the physical +device's listing. + +|Select Saved Registry File| + +The `Previously Configured Registry Files` window will appear, and a file can be selected to load it into the registry +configuration editor. + +|Saved Registry Selector| + +Another option is to import a registry configuration file from the computer running the VOLTTRON Central web +application, if one has been saved to local storage connected to the computer. To import a registry configuration file +from local storage, click on the `Import Registry File (CSV)` button at the end of the physical device's listing, and +use the file selector window to locate and load the file. + +|File Import Button| + + +Reloading Device Points +----------------------- + +Once a physical device has been scanned, the original points from the scan can be reloaded at any point during device +configuration by clicking on the `Reload Points From Device` button at the end of the device's listing. + +|Reload Points| + + +Device Configuration Form +^^^^^^^^^^^^^^^^^^^^^^^^^ + +After the registry configuration file has been saved, the device configuration form appears. Creating the device +configuration results in the virtual device being installed in the platform and determines the device's position in the +side panel tree. It also contains some settings that determine how data is collected from the device. + +|Configure Device Dialog| + +After the device configuration settings have been entered, click the `Save` button to save the configuration and add the +device to the platform. + +|Save Device Config| + +|Device Added| + + +Configuring Sub-devices +----------------------- + +After a device has been configured, sub-devices can be configured by pointing to their position in the ``Path`` +attribute of the device configuration form. But a sub-device can't be configured until its parent device has been +configured first. + +|Sub-device Path| + +|Sub-device 2| + +As devices are configured, they're inserted into position in the side panel tree, along with their configured points. + +|Device Added to Tree| + + +Reconfiguring Devices +^^^^^^^^^^^^^^^^^^^^^ + +A device that's been added to a VOLTTRON instance can be reconfigured by changing its registry configuration or its +device configuration. To launch reconfiguration, click on the wrench button next to the device in the side panel tree. + +|Reconfigure Device Button| + +Reconfiguration reloads the registry configuration editor and the device configuration form for the virtual device. The +editor and the form work the same way in reconfiguration as during initial device configuration. + +|Reconfiguring Device| + +The reconfiguration view shows the name, address, and ID of the physical device that the virtual device was configured +from. It also shows the name of the registry configuration file associated with the virtual device as well as its +configured path. + +A different registry configuration file can be associated with the device by clicking on the `Select Registry File +(CSV)` button or the `Import Registry File (CSV)` button. + +The registry configuration can be edited by making changes to the configuration in the editor and clicking the `Save` +button. + +To make changes to the device configuration form, click on the `File to Edit` selector and choose `Device Config`. + +|Reconfigure Option Selector| + +|Reconfigure Device Config| + + +Exporting Registry Configuration Files +-------------------------------------- + +The registry configuration file associated with a virtual device can be exported from the web browser to the computer's +local storage by clicking on the `File Export` Button in the device reconfiguration view. + +|File Export Button| + +.. |Add Devices| image:: files/01-add-devices.png +.. |Install Devices| image:: files/02-install-devices.png +.. |Start Scan| image:: files/03-start-scan.png +.. |Devices Found| image:: files/04-devices-found.png +.. |Get Device Points| image:: files/05-get-device-points.png +.. |Select Point Before| image:: files/07-select-point-a.png +.. |Select Point During| image:: files/07-select-point-b.png +.. |Select Point After| image:: files/07-select-point-c.png +.. |Edit Points| image:: files/07-edit-points.png +.. |Edit Point Button| image:: files/21-edit-point-button.png +.. |Edit Point Dialog| image:: files/22-edit-point-dialog.png +.. |Filter Points Button| image:: files/08-filter-points-button.png +.. |Filter Set| image:: files/09-filter-set.png +.. |Clear Filter| image:: files/10-clear-filter.png +.. |Add New Point| image:: files/11-add-new-point.png +.. |Add Point Dialog| image:: files/12-add-point-dialog.png +.. |Remove Points| image:: files/13-remove-points-button.png +.. |Confirm Remove Points| image:: files/14-confirm-remove-points.png +.. |Edit Columns| image:: files/15-edit-column-button.png +.. |Edit Column Menu| image:: files/16-edit-column-menu.png +.. |Name Column| image:: files/17-name-column.png +.. |Duplicated Column| image:: files/18-duplicated-column.png +.. |Find in Column| image:: files/19-find-in-column.png +.. |Find Next| image:: files/19-find-in-column-b.png +.. |Replace in Column| image:: files/20-replace-in-column.png +.. |Start Keyboard Commands| image:: files/23-start-keyboard-commands.png +.. |Keyboard Highlight| image:: files/24-keyboard-highlight.png +.. |Keyboard Select| image:: files/25-keyboard-select.png +.. |Keyboard Shortcuts Button| image:: files/26-keyboard-shortcuts-button.png +.. |Keyboard Shortcuts| image:: files/27-keyboard-shortcuts.png +.. |Save Registry Button| image:: files/28-save-registry-button.png +.. |Registry Preview Table| image:: files/29-registry-preview-table.png +.. |Registry Preview CSV| image:: files/30-preview-registry-csv.png +.. |Name Registry File| image:: files/31-name-registry-file.png +.. |Registry Saved| image:: files/32-registry-saved.png +.. |Select Saved Registry File| image:: files/38-select-saved-registry-file.png +.. |Saved Registry Selector| image:: files/39-saved-registry-selector.png +.. |File Import Button| image:: files/40-file-import-button.png +.. |Reload Points| image:: files/41-reload-points-from-device.png +.. |Configure Device Dialog| image:: files/33-configure-device-dialog.png +.. |Save Device Config| image:: files/34-save-device-config.png +.. |Device Added| image:: files/37-device-added.png +.. |Sub-device Path| image:: files/35-subdevice-path.png +.. |Sub-device 2| image:: files/36-subdevice2.png +.. |Device Added to Tree| image:: files/37-device-added-b.png +.. |Reconfigure Device Button| image:: files/43-reconfigure-device-button.png +.. |Reconfiguring Device| image:: files/44-reconfiguring-device.png +.. |Reconfigure Option Selector| image:: files/45-reconfigure-option-selector.png +.. |Reconfigure Device Config| image:: files/46-reconfigure-device-config.png +.. |File Export Button| image:: files/47-file-export-button.png diff --git a/docs/source/agent-framework/core-service-agents/volttron-central/volttron-central-agent.rst b/docs/source/agent-framework/core-service-agents/volttron-central/volttron-central-agent.rst new file mode 100644 index 0000000000..2e70a6cb93 --- /dev/null +++ b/docs/source/agent-framework/core-service-agents/volttron-central/volttron-central-agent.rst @@ -0,0 +1,56 @@ +.. _VOLTTRON-Central-Agent: + +===================== +VOLTTRON Central (VC) +===================== + +The VC Agent is responsible for controlling multiple VOLTTRON instances through a single web interface. +The VOLTTRON instances can be either local or remote. VC leverages an internal VOLTTRON web server providing a +interface to our JSON-RPC based web API. Both the web api and the interface are served through the VC agent. + +Instance Configuration +====================== + +In order for any web agent to be enabled, there must be a port configured to serve the content. The easiest way to do +this is to create a config file in the root of your :term:`VOLTTRON_HOME` directory (to do this automatically see +:ref:`VOLTTRON Config `.) + +The following is an example of the configuration file + +:: + + [volttron] + instance-name = volttron1 + message-bus = rmq + vip-addres = tcp://127.0.0.1:22916 + bind-web-address = https://localhost:8443 + volttron-central-address = https://localhost:8443 + + +.. Note: + + The above configuration will open a discoverable port for the volttron instance. In addition, the opening of this + web address allows you to serve both static as well as dynamic pages. + +Verify that the instance is serving properly by pointing your web browser to ``https://localhost:8443/index.html`` + + +Agent Execution +=============== + +To setup an instance of VC, it is recommended to follow one of the following guides depending on your use case. +For a single instance, please consult the :ref:`VOLTTRON Central Demo `. +For controlling multiple instances with different message busses, consider the +:ref:`VOLTTRON Central Multi-Platform Multi-Bus Demo `. + +However, if you already have an instance of VOLTTRON configured that you wish to make an instance of VOLTTRON Central. +you may install and start it as follows: + +.. code-block:: bash + + # Arguments are package to execute, config file to use, tag to use as reference + ./scripts/core/pack_install.sh services/core/VolttronCentral services/core/VolttronCentral/config vc + + # Start the agent + vctl start --tag vc + diff --git a/docs/source/agent-framework/core-service-agents/volttron-central/volttron-central-overview.rst b/docs/source/agent-framework/core-service-agents/volttron-central/volttron-central-overview.rst new file mode 100644 index 0000000000..49849f0e52 --- /dev/null +++ b/docs/source/agent-framework/core-service-agents/volttron-central/volttron-central-overview.rst @@ -0,0 +1,48 @@ +.. _VOLTTRON-Central: + +==================================== +VOLTTRON Central Management Overview +==================================== + +VOLTTRON Central is responsible for controlling multiple VOLTTRON instances with a single management instance. +The managed VOLTTRON instances can be either local or remote. Each managed instance will have a VOLTTRON Central +Platform agent installed and running to interface with the primary VOLTTRON Central agent. + +|VC-VCP Overview| + +There is a :ref:`VOLTTRON Central Deployment Demo ` that will allow you to quickly setup +and see the current offerings of the interface. + +VOLTTRON Central will allow you to: + +- See a list of platforms being managed. +- Add and remove platforms. +- Install, start and stop agents on the managed platforms. +- Create dynamic graphs from the historians based upon data points. +- Execute functions on remote platforms. + + +Volttron Central Agent +====================== + +The VOLTTRON Central (VC) agent serves a web-based management UI that interfaces with the +VOLTTRON Central web API. + + +VOLTTRON Central Platform Agent +=============================== + +The VOLTTRON Central Platform (VCP) Agent allows communication with a VOLTTRON Central instance. +Each VOLTTRON instance managed by VOLTTRON Central should have only one Platform Agent. +The Platform Agent must have the :term:`VIP Identity` of `platform.agent` which is specified as default by VOLTTRON +:ref:`known identities `. + +.. toctree:: + + volttron-central-agent + volttron-central-platform + vc-device-configuration-demo + webservice-api + + +.. |VC-VCP Overview| image:: files/vc_vcp_connection_overview.png diff --git a/services/core/VolttronCentralPlatform/README.rst b/docs/source/agent-framework/core-service-agents/volttron-central/volttron-central-platform.rst similarity index 67% rename from services/core/VolttronCentralPlatform/README.rst rename to docs/source/agent-framework/core-service-agents/volttron-central/volttron-central-platform.rst index 8ee790236b..97915a2afe 100644 --- a/services/core/VolttronCentralPlatform/README.rst +++ b/docs/source/agent-framework/core-service-agents/volttron-central/volttron-central-platform.rst @@ -1,34 +1,40 @@ -.. _Volttron_Central_Platform: +.. _VOLTTRON-Central-Platform-Agent: =============================== Volttron Central Platform (VCP) =============================== -The VCP exposes a VOLTTRON instance to a Volttron Central (VC) agent. The VC -agent can either be on the same or a different VOLTTRON instance. The VCP agent will, -once authenticated (with the VC agent's instance), auto connect to the VC -agent's instance and register itself on startup. The VCP instance will attempt -to reconnect to the VC agent's instance if connection is disrupted. VCP has -many configuration options available that can be set via the configuration store -and/or initial configuration file. +The VCP agent exposes a VOLTTRON instance to a Volttron Central (VC) agent. The VC +agent can either be on the same or a remote VOLTTRON instance. The VCP agent will, +once authenticated with the VC agent's instance, auto connect to the VC +agent's instance and register itself on startup. The VCP instance will attempt +to reconnect to the VC agent's instance if connection is disrupted.VCP has +many configuration options available that can be set via the configuration store. +An example config is provided below with information on these options. Publish Specifications ---------------------- During connection to the VC agent's instance the instance-name from the VCP will be used to connect to the VC agent's instance. It will have the form -vcp-instancename with all invalid characters replaced with an underscore. See +vcp-instance name with all invalid characters replaced with an underscore. See :py:meth:`volttron.platform.agent.util.normalize_identity` for how the instance name is normalized into a identity. FAQ / Notes ----------- -* VCP agent has an identity of 'platform.agent' this cannot be changed. -* There may only be a single agent connected to a VOLTTRON instances with the identiy of 'platform.agent' -* VCP will publish to VC under the topic platforms/vcp-(normalized instance name)/ -* VC communicates through the :py:class:`vcplatform.vcconnection.VCConnection` rpc methods. -* VCP uses the :py:class:`vcplatform.vcconnection.VCConnection` to connect with the VC agent's instance. +* VCP agent has an identity of 'platform.agent'. This cannot be changed. +* There may only be a single agent connected to a VOLTTRON instance with the identity of 'platform.agent' +* From VC's perspective, the VCP agent's identity is .platform.agent +* VCP will publish to the remote platform under the topic platforms/vcp-(normalized instance name)/ +* VC subscribes to platforms/* and gets status of the remote instance. +* VCP connects to the remote instance using the auth subsystem. Once is connected to the remote instance, VCP's rpc + functions are available for the remote VC to call. +* VC looks at the peers connected to the instance in order to determine what remote instances are connected to the + platform (*.platform.agent are assumed to be VCP instances*). + +|VCP-VC Connection| Configuration Options --------------------- @@ -79,3 +85,5 @@ by the VCP agent. By default an empty config file is used. "from1": "to1" } } + +.. |VCP-VC Connection| image:: files/connect_vc_to_vcp.png diff --git a/docs/source/core_services/service_agents/central_management/Webservice-API.rst b/docs/source/agent-framework/core-service-agents/volttron-central/webservice-api.rst similarity index 73% rename from docs/source/core_services/service_agents/central_management/Webservice-API.rst rename to docs/source/agent-framework/core-service-agents/volttron-central/webservice-api.rst index 2959b6010e..1594c34dbe 100644 --- a/docs/source/core_services/service_agents/central_management/Webservice-API.rst +++ b/docs/source/agent-framework/core-service-agents/volttron-central/webservice-api.rst @@ -1,30 +1,28 @@ -.. _VCM-Webservice-API: +.. _VCM-Web-Service-API: =============================================== VOLTTRON Central Web Services Api Documentation =============================================== -VOLTTRON Central (VC) is meant to be the hub of communication within a cluster of -VOLTTRON instances. VC exposes a -`JSON-RPC 2.0 `_ based api that allows -a user to control multple instances of VOLTTRON. +VOLTTRON Central (VC) is meant to be the hub of communication within a cluster of VOLTTRON instances. VC exposes a +`JSON-RPC 2.0 `_ based API that allows a user to control multiple instances of +VOLTTRON. Why JSON-RPC ============ -SOAP messaging is unfriendly to many developers, especially those wanting to -make calls in a browser from AJAX environment. We have therefore have -implemented a JSON-RPC API capability to VC, as a more JSON/JavaScript -friendly mechanism. +SOAP messaging is unfriendly to many developers, especially those wanting to make calls in a browser from AJAX +environment. We have therefore have implemented a JSON-RPC API capability to VC, as a more JSON/JavaScript friendly +mechanism. How the API is Implemented ========================== * All calls are made through a POST to `/vc/jsonrpc` -* All calls (not including the call to authenticate) will - include an authorization token (a json-rpc extension). +* All calls (not including the call to authenticate) will include an authorization token (a json-rpc extension). + JSON-RPC Request Payload ------------------------ @@ -59,15 +57,15 @@ As an alternative, the params can be an array as illustrated by the following: "authorization": "server_authorization_token" } -For full documentation of the Request object please see section 4 of the +For full documentation of the `Request` object please see section 4 of the `JSON-RPC 2.0 `_ specification. + JSON-RPC Response Payload ------------------------- -All responses shall have either an either an error response or a result -response. The result key shown below can be a single instance of a json -type, an array or a JSON object. +All responses shall have either an either an error response or a result response. The result key shown below can be a +single instance of a JSON type, an array or a JSON object. A result response will have the following format: @@ -92,9 +90,10 @@ An error response will have the following format: "id": "sent_in_unique_message_id_or_null" } -For full documenation of the Response object please see section 5 of the +For full documentation of the Response object please see section 5 of the `JSON-RPC 2.0 `_ specification. + JSON-RPC Data Objects ===================== @@ -205,15 +204,18 @@ Retrieve Authorization Token # 200 OK { "jsonrpc": "2.0", - "result": "somAuthorizationToken", + "result": "someAuthorizationToken", "id": "someID" } Failure - HTTP Status Code 401 + + :: + + HTTP Status Code 401 -Register A Volttron Platform Instance (Using Discovery) +Register a VOLTTRON Platform Instance (Using Discovery) .. code-block:: Python # POST /vc/jsonrpc @@ -244,21 +246,6 @@ Register A Volttron Platform Instance (Using Discovery) } -TODO: Request Registration of an External Platform - .. code-block:: Python - - # POST /vc/jsonrpc - { - "jsonrpc": "2.0", - "method": "register_platform", - "params": { - "uri": "127.0.0.2:8080?serverkey=...&publickey=...&secretkey=..." - } - "authorization": "someAuthorizationToken", - "id": # - } - - Unregister a Volttron Platform Instance .. code-block:: Python @@ -324,7 +311,8 @@ Retrieve Managed Instances "id": # } -TODO: change repsonse Retrieve Installed Agents From platform1 + +Retrieve Installed Agents From Platform .. code-block:: Python # POST /vc/jsonrpc @@ -420,57 +408,51 @@ TODO: change repsonse Retrieve Installed Agents From platform1 } -TODO: Start An Agent +Install Agent .. code-block:: Python # POST /vc/jsonrpc { "jsonrpc": "2.0", - "method": "platforms.uuid.0987fedc-65ba-43fe-21dc-098765bafedc.start_agent", - "params": ["a1b2c3d4-e5f6-a7b8-c9d0-e1f2a3b4c5d6"], + "method": "platforms.uuid.0987fedc-65ba-43fe-21dc-098765bafedc.install", + "params": { + "files": [ + { + "file_name": "helloagent-0.1-py2-none-any.whl", + "file": "data:application/octet-stream;base64,..." + }, + { + "file_name": "some-non-wheel-file.txt", + "file": "data:application/octet-stream;base64,..." + }, + ... + ], + } "authorization": "someAuthorizationToken", "id": # } - Response Success + Success Response .. code-block:: Python 200 OK { "jsonrpc": "2.0", "result": { - "process_id": 1000, - "return_code": null + [ + { + "uuid": "a1b2c3d4-e5f6-a7b8-c9d0-e1f2a3b4c5d6" + }, + { + "error": "Some error message" + }, + ... + ] }, "id": # } -TODO: Stop An Agent - .. code-block:: Python - - # POST /vc/jsonrpc - { - "jsonrpc": "2.0", - "method": "platforms.uuid.0987fedc-65ba-43fe-21dc-098765bafedc.stop_agent", - "params": ["a1b2c3d4-e5f6-a7b8-c9d0-e1f2a3b4c5d6"], - "authorization": "someAuthorizationToken", - "id": # - } - - Response Success - .. code-block:: Python - - 200 OK - { - "jsonrpc": "2.0", - "result": { - "process_id": 1000, - "return_code": 0 - }, - "id": # - } - -TODO: Remove An Agent +Remove An Agent .. code-block:: Python # POST /vc/jsonrpc @@ -495,132 +477,6 @@ TODO: Remove An Agent "id": # } -TODO: Retrieve Running Agents - .. code-block:: Python - # POST /vc/jsonrpc - { - "jsonrpc": "2.0", - "method": "platforms.uuid.0987fedc-65ba-43fe-21dc-098765bafedc.status_agents", - "authorization": "someAuthorizationToken", - "id": # - } - Response Success - .. code-block:: Python - 200 OK - { - "jsonrpc": "2.0", - "result": [ - { - "name": "RunningAgent", - "uuid": "a1b2c3d4-e5f6-a7b8-c9d0-e1f2a3b4c5d6" - "process_id": 1234, - "return_code": null - }, - { - "name": "StoppedAgent", - "uuid": "a1b2c3d4-e5f6-a7b8-c9d0-e1f2a3b4c5d6" - "process_id": 1000, - "return_code": 0 - } - ], - "id": # - } - -TODO: currently getting 500 error Retrieve An Agent's RPC Methods - .. code-block:: Python - - # POST /vc/jsonrpc - { - "jsonrpc": "2.0", - "method": "platforms.uuid.0987fedc-65ba-43fe-21dc-098765bafedc.agents.uuid.a1b2c3d4-e5f6-a7b8-c9d0-e1f2a3b4c5d6.inspect", - "authorization": "someAuthorizationToken", - "id": # - } - - Response Success - .. code-block:: Python - - 200 OK - { - "jsonrpc": "2.0", - "result": [ - { - "method": "sayHello", - "params": { - "name": "string" - } - } - ], - "id": # - } - -TODO: Perform Agent Action - .. code-block:: Python - - # POST /vc/jsonrpc - { - "jsonrpc": "2.0", - "method": "platforms.uuid.0987fedc-65ba-43fe-21dc-098765bafedc.agents.uuid.a1b2c3d4-e5f6-a7b8-c9d0-e1f2a3b4c5d6.methods.say_hello", - "params": { - "name": "Dorothy" - }, - "authorization": "someAuthorizationToken", - "id": # - } - - Success Response - .. code-block:: Python - - 200 OK - { - "jsonrpc": "2.0", - "result": "Hello, Dorothy!", - "id": # - } - -TODO: Install Agent - .. code-block:: Python - - # POST /vc/jsonrpc - { - "jsonrpc": "2.0", - "method": "platforms.uuid.0987fedc-65ba-43fe-21dc-098765bafedc.install", - "params": { - "files": [ - { - "file_name": "helloagent-0.1-py2-none-any.whl", - "file": "data:application/octet-stream;base64,..." - }, - { - "file_name": "some-non-wheel-file.txt", - "file": "data:application/octet-stream;base64,..." - }, - ... - ], - } - "authorization": "someAuthorizationToken", - "id": # - } - - Success Response - .. code-block:: Python - - 200 OK - { - "jsonrpc": "2.0", - "result": { - [ - { - "uuid": "a1b2c3d4-e5f6-a7b8-c9d0-e1f2a3b4c5d6" - }, - { - "error": "Some error message" - }, - ... - ] - }, - "id": # - } diff --git a/docs/source/agent-framework/historian-agents/crate/crate-historian.rst b/docs/source/agent-framework/historian-agents/crate/crate-historian.rst new file mode 100644 index 0000000000..bb12a915ae --- /dev/null +++ b/docs/source/agent-framework/historian-agents/crate/crate-historian.rst @@ -0,0 +1,79 @@ +.. _Crate-Historian: + +=============== +Crate Historian +=============== + +Crate is an open source SQL database designed on top of a No-SQL design. It allows automatic data replication and +self-healing clusters for high availability, automatic sharding, and fast joins, aggregations and sub-selects. + +Find out more about crate from ``_. + + +Prerequisites +============= + +1. Crate Database +----------------- + +For Arch Linux, Debian, RedHat Enterprise Linux and Ubuntu distributions there is a simple installer to get Crate up and +running on your system. + +.. code-block:: bash + + sudo bash -c "$(curl -L https://try.crate.io)" + +This command will download and install all of the requirements for running Crate, create a Crate user and install a +Crate service. After the installation the service will be available for viewing at ``http://localhost:4200`` by +default. + +.. note:: + + There is no authentication support within crate. + + +2. Crate Driver +--------------- + +There is a Python library for crate that must be installed in the VOLTTRON Python virtual environment in order to access +Crate. From an activated environment, in the root of the volttron folder, execute the following command: + +.. code-block:: bash + + python bootstrap.py --crate + +or + +.. code-block:: bash + + python bootstrap.py --databases + + +or + +.. code-block:: bash + + pip install crate + + +Configuration +============= + +Because there is no authorization to access a crate database the configuration for the Crate Historian is very easy. + +.. code-block:: python + + { + "connection": { + "type": "crate", + # Optional table prefix defaults to historian + "schema": "testing", + "params": { + "host": "localhost:4200" + } + } + } + +Finally package, install and start the Crate Historian agent. + +.. seealso:: :ref:`Agent Development Walk-through ` diff --git a/docs/source/agent-framework/historian-agents/data-mover/data-mover-historian.rst b/docs/source/agent-framework/historian-agents/data-mover/data-mover-historian.rst new file mode 100644 index 0000000000..ad0d283015 --- /dev/null +++ b/docs/source/agent-framework/historian-agents/data-mover/data-mover-historian.rst @@ -0,0 +1,55 @@ +.. _Data-Mover-Historian: + +==================== +Data Mover Historian +==================== + +The Data Mover sends data from its platform to a remote platform in cases where there are not sufficient resources to +store data locally. It shares this functionality with the :ref:`Forward Historian `, however the +Data Mover does not have the goal of data appearing "live" on the remote platform. This allows DataMover to be more +efficient by both batching data and by sending an RPC call to a remote historian instead of publishing data on the +remote message bus. This allows allows the Data Mover to be more robust by ensuring that the receiving historian is +running. If the target is unreachable, the Data Mover will cache data until it is available. + + +Configuration +============= + +The default configuration file is `services/core/DataMover/config`. Change the `destination-vip` value to +point towards the foreign Volttron instance. + +The following is an example configuration: + +:: + + { + "destination-vip": "ipc://@/home/volttron/.volttron/run/vip.socket", + "destination-serverkey": null, + "required_target_agents": [], + "custom_topic_list": [], + "services_topic_list": [ + "devices", "analysis", "record", "datalogger", "actuators" + ], + "topic_replace_list": [ + #{"from": "FromString", "to": "ToString"} + ] + } + + +The `services_topic_list` allows you to specify which of the main data topics to forward. If there is no entry, the +historian defaults to sending all. + +`topic_replace_list` allows you to replace portions of topics if needed. This could be used to correct or standardize +topics or to replace building/device names with an anonymous version. The receiving platform will only see the +replaced values. + +Adding the configuration option below will limit the backup cache to `n` gigabytes. This will keep a hard drive from +filling up if the agent is disconnected from its target for a long time. + +:: + + "backup_storage_limit_gb": n + +.. seealso:: + + :ref:`Historian Framework ` diff --git a/docs/source/agent-framework/historian-agents/forwarder/forward-historian.rst b/docs/source/agent-framework/historian-agents/forwarder/forward-historian.rst new file mode 100644 index 0000000000..1e415fdc3e --- /dev/null +++ b/docs/source/agent-framework/historian-agents/forwarder/forward-historian.rst @@ -0,0 +1,276 @@ +.. _Forward-Historian: + +================= +Forward Historian +================= + +The primary use case for the Forward Historian or Forwarder is to send data to another instance of VOLTTRON as if the +data were live. This allows agents running on a more secure and/or more powerful machine to run analysis on data being +collected on a potentially less secure/powerful board. + +Given this use case, it is not optimized for batching large amounts of data when "live-ness" is not needed. For this +use case, please see the :ref:`Data Mover Historian `. + +The Forward Historian can be found in the `services/core directory`. + +Forward Historian can be used to forward data between two ZMQ instances, two RMQ instances, or between ZMQ and +RMQ instances. For Forward Historian to establish a successful connection to the destination VOLTTRON instance: + + 1. forward historian should be configured to connect and authenticate the destination instance, and + 2. the remote instance should be configured to accept incoming connection from the forward historian + +How we setup the above two depends on the message bus used in source instance and destination instance + +*************************************************************************** +Setup for two ZMQ VOLTTRON instance or a ZMQ and RabbitMQ VOLTTRON instance +*************************************************************************** + +When forwarder is used between two ZeroMQ instances it relies on the CurveMQ authentication mechanism used by ZMQ +based VOLTTRON. When the communication is between a ZeroMQ and RabbitMQ instance, the forward historian uses the +proxy ZMQ router agent on the RabbitMQ instance and hence once again uses the CurveMQ authentication + +.. seealso:: + + For more details about VIP authentication in ZMQ based VOLTTRON refer to :ref:`VIP Authentication` + + +Configuring Forwarder Agent +=========================== + +At a minimum, a forward historian's configuration should contain enough details to connect to and authenticate the +remote destination. For this it needs + + 1. the destination's :term:`VIP address` (`destination-vip`) + 2. the public key of the destination server (`destination-serverkey`) + +There are two ways to provide these information + +Minimum configuration: Option 1 +------------------------------- + +Provide the needed information in the configuration file. For example + +.. code-block:: json + + { + "destination-vip": "tcp://172.18.0.4:22916" + "destination-serverkey": "D3tIAPOFf7wS3787FgEOLjoPfXUT9rAGpv80ryloZGE" + } + +The destination server key can be found by running the following command on the **destination volttron instance**: + +.. code-block:: bash + + vctl auth serverkey + + +.. note:: + + The example above uses the local IP address, the IP address for your configuration should match the intended target + +An example configuration with above parameters is available at `services/core/ForwardHistorian/config`. + +.. _config_option_2: + +Minimum configuration: Option 2 +------------------------------- + +If the destination volttron instance is web enabled then the forward historian can find the destination's vip address +and public key using the destination's web discovery page. All web enabled volttron instances provide a +**/discovery/** page that provides the following server information + 1. server key + 2. vip address + 3. instance name + 4. RabbitMQ server's AMQP address (Only on RabbitMQ instances) + 5. RabbitMQ server's CA cert (Only on RabbitMQ instances) + +To forward data to a web enabled volttron instance, forwarder can configured with the destination's web address +instead of destination's vip address and public key. For example + +.. code-block:: json + + { + "destination-address": "https://centvolttron2:8443" + } + +An example configuration with above parameters is available at `services/core/ForwardHistorian/config_web_address`. + +Optional Configurations +----------------------- + +The most common use case for a forwarder is to forward data to a remote historian. Therefore, forward historians' by +default forward the default topics a historian subscribes to - devices, analysis, log, and record. +However, a forward historian can be configured to forward any custom topic or disable forwarding devices, analysis, log and/or +record topic data. For example + +.. code-block:: json + + { + "destination-address": "https://centvolttron2:8443", + "custom_topic_list": ["heartbeat"], + "capture_log_data": false + } + +See `Configuration Options <../../../volttron-api/services/ForwardHistorian/README.html#configuration-options>`_ for all +available forward historian configuration + +Since forward historian extends BaseHistorian all BaseHistorian's configuration can be added to forwarder. Please see +`BaseHistorian Configurations <../../../agent-framework/historian-agents/historian-framework.html#configuration>`_ for the list +of available BaseHistorian configurations + +Installation +------------ + +Once we have our configuration file ready we can install the forwarder agent using the command + +.. code-block:: bash + + vctl install --agent-config services/core/ForwardHistorian + +But before we start the agent we should configure the destination volttron instance to accept the connection from the +forwarder. + +Configuring destination volttron instance +========================================= + +When a forwarder tries to connect to a destination volttron instance, the destination instance will check the ip address +of the source and public key of connecting agent against its list of accepted peers. So before forwarder can connect to the +destination instance, we should add these two details to the destination's auth.json file. + +To do this we can use the command + +.. code-block:: bash + + vctl auth add --address
--credentials + +Only the address and credential keys are mandatory. You can add additional fields such as comments or user id for reference. +In the above command address is the ip address of the source instance in which the forwarder is installed. Credentials +is the public key of the installed forwarder agent. You can get the forwarder agent's public key by running the following +command on the **source instance** + +.. code-block:: bash + + vctl auth publickey + +.. seealso:: + + For more details about VIP authentication in ZMQ based VOLTTRON refer to :ref:`VIP Authentication` + +***************************************** +Setup for two RabbitMQ VOLTTRON instances +***************************************** + +RabbitMQ based VOLTTRON instances use x509 certificate based authentication. A forward historian that forwards data from +one RMQ instance to another RMQ instance would need a x509 certificate that is signed by the destination volttron instance's +root certificate for authentication. To obtain a signed certificate, on start, the forward historian creates a certificate +signing request (CSR) and sends it to destination's instance for approval. An admin on the destination end, needs to +login into the admin web interface and approve the request. On approval a certificate signed by the destination CA is +returned to the forward historian and the forward historian can use this certificate for communication. + +.. seealso:: + + For more details about CSR approval process see + :ref:`Agent communication to Remote RabbitMQ instance ` + For an example CSR approval process see + :ref:`VOLTTRON Central Multi-Platform Multi-Bus Demo ` + +Forwarder Configuration +======================= + +Since destination instance would have web enabled to approve the incoming CSR requests, forward historian can be configured +with just the destination instance web address similar to ref:`Minimum configuration: Option 2` + +.. code-block:: json + + { + "destination-address": "https://centvolttron2:8443" + } + +On start, the forwarder makes Certificate signing request and retries periodically till the certificate is approved. + +************************* +Testing Forward Historian +************************* + +Once forward historian is configured and installed and the destination is configured to accept incoming connection from +the forwarder (either by adding to destination's auth.json as in the case of ZMQ or after CSR is approved in case of RMQ) +forwarder can forward any message published to the configured set of topics and re-publish on the destination's messagebus. + +Testing with custom topic +========================= + +1. Configure Forward historian to forward the topic heartbeat by adding the following to the forward historian's + configuration + + .. code-block:: json + + "custom_topic_list": ["heartbeat"], + +2. If forwarder is not already running start the forwarder agent. If it is already running the configuration change + should get picked up automatically in a few seconds. + +3. If there are no other agent in the source volttron instance, install a listener agent that periodically publishes to + the topic 'heartbeat' + + .. code-block:: bash + + vctl install examples/ListenerAgent + + + .. note:: + + As of VOLTTRON 8.0, all agents by default publish a heartbeat message periodically unless the agent explicitly + opted out of it. So if you already have other installed agents that publish heartbeat message you don't have to add the + listener agent + +4. On the destination instance install a listener agent and tail the volttron log file. You should be able to see the + listener or any other source agent's heartbeat message on the destination volttron's log file + +Testing with default topics +=========================== + +Forward historian by default forwards the default topics a historian subscribes to - devices, analysis, log, and record. +On the source instance, we can install a platform driver and configure it with a fake device to publish data to the devices +topic. Once the platform driver is started and data gets published to the devices topic, forwarder can re-publish these +to the destination message bus + +1. Configure and install forward historian as explained in the sections above + +2. Configure destination to accept incoming connection as explained in the above sections + +3. Shutdown source volttron instance + + .. code-block:: bash + + vctl shutdown --platform + +4. On source install platform driver using the below vcfg command. When prompted, choose to configure a fake device for + the platform driver + + .. code-block:: bash + + vcfg --agent platform_driver + + Below is an example command with prompts + + .. code-block:: bash + + (volttron) [volttron@centvolttron1 myvolttron]$ vcfg --agent platform_driver + + Your VOLTTRON_HOME currently set to: /home/volttron/vhomes/rmq_instance1 + + Is this the volttron you are attempting to setup? [Y]: + Configuring /home/volttron/git/myvolttron/services/core/PlatformDriverAgent. + ['volttron', '-vv', '-l', '/home/volttron/vhomes/rmq_instance1/volttron.cfg.log'] + Would you like to install a fake device on the platform driver? [N]: y + Should the agent autostart? [N]: n + +5. Start source volttron instance + + .. code-block:: bash + + ./start-volttron + +6. Start platform driver and forwarder on source volttron instance +7. On the destination volttron instance install a listener agent and tail the volttron log. You should see the devices + data periodically getting logged in the destination volttron instance. diff --git a/docs/source/agent-framework/historian-agents/historian-framework.rst b/docs/source/agent-framework/historian-agents/historian-framework.rst new file mode 100644 index 0000000000..b76879751b --- /dev/null +++ b/docs/source/agent-framework/historian-agents/historian-framework.rst @@ -0,0 +1,223 @@ +.. _Historian-Framework: + +=================== +Historian Framework +=================== + +Historian Agents are the way by which `device`, `actuator`, `datalogger`, and `analysis` topics are automatically +captured and stored in some sort of data store. Historians exist for the following storage options: + +- A general :ref:`SQL Historian ` implemented for MySQL, SQLite, PostgreSQL, and Amazon Redshift +- :ref:`MongoDB Historian ` +- :ref:`Crate Historian ` +- :ref:`Forward Historian ` for sending data to another VOLTTRON instance +- :ref:`OpenEIS Historian ` +- :ref:`MQTT Historian ` Forwards data to an MQTT broker +- :ref:`InfluxDB Historian ` + +Other implementations of Historians can be created by following the +:ref:`Developing Historian Agents ` guide. + +A video tutorial of historian framework can be found :ref:`here` + +Base Historian +============== + +Historians are all built upon the `BaseHistorian` which provides general functionality the specific implementations are +built upon. + +This base Historian will cache all received messages to a local database before publishing it to the Historian. This +allows recovery from unexpected happenings before the successful writing of data to the Historian. + + +Configuration +============= + +In most cases the default configuration settings are fine for all deployments. + +All Historians support the following settings: + +.. code-block:: python + + { + # Maximum amount of time to wait before retrying a failed publish in seconds. + # Will try more frequently if new data arrives before this timelime expires. + # Defaults to 300 + "retry_period": 300.0, + + # Maximum number of records to submit to the historian at a time. + # Defaults to 1000 + "submit_size_limit": 1000, + + # In the case where a historian needs to catch up after a disconnect + # the maximum amount of time to spend writing to the database before + # checking for and caching new data. + # Defaults to 30 + "max_time_publishing": 30.0, + + # Limit how far back the historian will keep data in days. + # Partial days supported via floating point numbers. + # A historian must implement this feature for it to be enforced. + "history_limit_days": 366, + + # Limit the size of the historian data store in gigabytes. + # A historian must implement this feature for it to be enforced. + "storage_limit_gb": 2.5 + + # Size limit of the backup cache in Gigabytes. + # Defaults to no limit. + "backup_storage_limit_gb": 8.0, + + # How full should the backup storage be for an alert to be raised. + # percentage as decimal. For example set value as 0.9 to get alerted when cache becomes more than 90% configured + # size limit + "backup_storage_report" : 0.9, + + # Do not actually gather any data. Historian is query only. + "readonly": false, + + # capture_device_data + # Defaults to true. Capture data published on the `devices/` topic. + "capture_device_data": true, + + # capture_analysis_data + # Defaults to true. Capture data published on the `analysis/` topic. + "capture_analysis_data": true, + + # capture_log_data + # Defaults to true. Capture data published on the `datalogger/` topic. + "capture_log_data": true, + + # capture_record_data + # Defaults to true. Capture data published on the `record/` topic. + "capture_record_data": true, + + # After publishing every "message_publish_count" number of records, historian writes + # INFO level log with total number of records published since start of historian + "message_publish_count": 10000, + + # If historian should subscribe to the configured topics from all platform (instead of just local platform) + # by default subscription is only to local topics + "all_platforms": false, + + # Replace a one topic with another before saving to the database. + "topic_replace_list": [ + #{"from": "FromString", "to": "ToString"} + ], + + # For historian developers. Adds benchmarking information to gathered data. + # Defaults to false and should be left that way. + "gather_timing_data": false + + # Allow for the custom topics or for limiting topics picked up by a historian instance. + # the key for each entry in custom topics is the data handler. The topic and data must + # conform to the syntax the handler expects (e.g., the capture_device_data handler expects + # data the driver framework). Handlers that expect specific data format are + # capture_device_data, capture_log_data, and capture_analysis_data. All other handlers will be + # treated as record data. The list associated with the handler is a list of custom + # topics to be associated with that handler. + # + # To restrict collection to only the custom topics, set the following config variables to False + # capture_device_data + # capture_analysis_data + # capture_log_data + # capture_record_data + "custom_topics": { + "capture_device_data": ["devices/campus/building/device/all"], + "capture_analysis_data": ["analysis/application_data/example"], + "capture_record_data": ["example"] + }, + + # To restrict the points processed by a historian for a device or set of devices (i.e., this configuration + # parameter only filters data on topics with base 'devices). If the 'device' is in the + # topic (e.g.,'devices/campus/building/device/all') then only points in the list will be passed to the + # historians capture_data method, and processed by the historian for storage in its database (or forwarded to a + # remote platform (in the case of the ForwardHistorian). The key in the device_data_filter dictionary can + # be made more restrictive (e.g., "device/subdevice") to limit unnecessary searches through topics that may not + # contain the point(s) of interest. + "device_data_filter":{ + "device": ["point_name1", "point_name2"] + }, + + # list of topics for which incoming record's timestamp should be compared with current timestamp to see if it + # within the configured tolerance limit. Default value: "devices" + "time_tolerance_topics": ["devices"], + + # If this is set, timestamp of incoming records on time_tolerance_topics(by default, "devices" topics) are + # compared with current timestamp. If the difference between current timestamp and the record's timestamp + # exceeds the configured time_tolerance (seconds), then those records are added to a separate time_error table + # in cache and are not sent to concrete historian for publishing. An alert is raised when records are entered + # into the time_error table. Units: seconds + "time_tolerance": 5, + } + + +Topics +====== + +By default the base historian will listen to 4 separate root topics: + +* `datalogger/*` +* `record/*` +* `analysis/*` +* `devices/*` + +Each root topic has a :ref:`specific message syntax ` that it is expecting for incoming data. + +Messages published to `datalogger` will be assumed to be `timepoint` data that is composed of units and specific types +with the assumption that they have the ability to be plotted easily. + +Messages published to `devices` are data that comes directly from drivers. + +Messages published to `analysis` are analysis data published by agents in the form of key value pairs. + +Finally, messages that are published to `record` will be handled as string data and can be customized to the user +specific situation. + + +.. _Platform-Historian: + +Platform Historian +================== + +A platform historian is a :ref:`"friendly named" ` historian on a VOLTTRON instance. It always has +the identity of `platform.historian`. A platform historian is made available to a VOLTTRON Central agent for monitoring +of the VOLTTRON instances health and plotting topics from the platform historian. In order for one of the historians to +be turned into a platform historian the `identity` keyword must be added to it's configuration with the value of +`platform.historian`. The following configuration file shows a SQLite based platform historian configuration: + +.. code-block:: json + + { + "agentid": "sqlhistorian-sqlite", + "identity": "platform.historian", + "connection": { + "type": "sqlite", + "params": { + "database": "~/.volttron/data/platform.historian.sqlite" + } + } + } + +.. _historian-video-tutorial: + +Historian Video Tutorial +======================== + +.. raw:: html + +
+ +
+ +.. toctree:: + + historian-topic-syntax + crate/crate-historian + influxdb/influxdb-historian + mongodb/mongo-historian + mqtt/mqtt-historian + openeis/openeis-historian + sql-historian/sql-historian + data-mover/data-mover-historian + forwarder/forward-historian diff --git a/docs/source/agent-framework/historian-agents/historian-topic-syntax.rst b/docs/source/agent-framework/historian-agents/historian-topic-syntax.rst new file mode 100644 index 0000000000..f01d55ed92 --- /dev/null +++ b/docs/source/agent-framework/historian-agents/historian-topic-syntax.rst @@ -0,0 +1,116 @@ +.. _Historian-Topic-Syntax: + +====================== +Historian Topic Syntax +====================== + +Each historian will subscribe to the following message bus topics: + +* `datalogger/*` +* `anaylsis/*` +* `record/\*` +* `devices/\*` + +For each of these topics there is a different message syntax that must be adhered to in order for the correct +interpretation of the data being specified. + + +record/\* +--------- +The record topic is the most flexible of all of the topics. This topic allows any serializable message to be published +to any topic under the root topic `record/`. + +.. Note:: + + This topic is not recommended to plot, as the structure of the messages are not necessarily numeric + +:: + + # Example messages that can be published + + # Dictionary data + {'foo': 'world'} + + # Numerical data + 52 + + # Time data (note: not a `datetime` object) + '2015-12-02T11:06:32.252626' + + +devices/\* +---------- + +The `devices` topic is meant to be data structured from a scraping of a Modbus or BACnet device. Currently drivers for +both of these protocols write data to the message bus in the proper format. VOLTTRON drivers also publish an +aggregation of points in an `all` topic. + +**Only the `all` topic messages are read and published to a historian.** + +Both the all topic and point topic have the same header information, but the message body for each is slightly +different. For a complete working example of these messages please see +:py:mod:`examples.ExampleSubscriber.subscriber.subscriber_agent` + +The format of the header and message for device topics (i.e. messages published to topics with pattern "devices/\*/all") +follows the following pattern: + +:: + + # Header contains the data associated with the message. + { + # python code to get this is + # from datetime import datetime + # from volttron.platform.messaging import headers as header_mod + # from volttron.platform.agent import utils + # now = utils.format_timestamp( datetime.utcnow()) + # { + # headers_mod.DATE: now, + # headers_mod.TIMESTAMP: now + # } + "Date": "2015-11-17 21:24:10.189393+00:00", + "TimeStamp": "2015-11-17 21:24:10.189393+00:00" + } + + # Message Format: + + # WITH METADATA + # Messages contains a two element list. The first element contains a + # dictionary of all points under a specific parent. While the second + # element contains a dictionary of meta data for each of the specified + # points. For example devices/pnnl/building/OutsideAirTemperature and + # devices/pnnl/building/MixedAirTemperature ALL message would be created as: + [ + {"OutsideAirTemperature ": 52.5, "MixedAirTemperature ": 58.5}, + { + "OutsideAirTemperature ": {'units': 'F', 'tz': 'UTC', 'type': 'float'}, + "MixedAirTemperature ": {'units': 'F', 'tz': 'UTC', 'type': 'float'} + } + ] + + #WITHOUT METADATA + # Message contains a dictionary of all points under a specific parent + {"OutsideAirTemperature ": 52.5, "MixedAirTemperature ": 58.5} + + +analysis/\* +----------- + +Data sent to `analysis/*` topics is result of analysis done by applications. The format of data sent to `analysis/*` +topics is similar to data sent to `devices/\*/all` topics. + + +datalogger/\* +------------- +Messages published to `datalogger/\*` will be assumed to be time point data that is composed of units and specific types +with the assumption that they have the ability to be graphed easily. + +:: + + {"MixedAirTemperature": {"Readings": ["2015-12-02T00:00:00", + `_. + + +Prerequisites +============= + +InfluxDB Installation +--------------------- + +To install InfluxDB on an Ubuntu or Debian operating system, run the script: + + :: + + services/core/InfluxdbHistorian/scripts/install-influx.sh + +For installation on other operating systems, +see ``_. + +Authentication in InfluxDB +-------------------------- + +By default, the InfluxDB *Authentication* option is disabled, and no user authentication is required to access any +InfluxDB database. You can enable authentication by updating the InfluxDB configuration file. For detailed information +on enabling authentication, see: +``_. + +If *Authentication* is enabled, authorization privileges are enforced. There must be at least one defined admin user +with access to administrative queries as outlined in the linked document above. Additionally, you must pre-create the +``user`` and ``database`` that are specified in the configuration file (the default configuration file for InfluxDB +is `services/core/InfluxdbHistorian/config`). If your ``user`` is a non-admin user, they must be granted a full set of +privileges on the desired ``database``. + + +InfluxDB Driver +--------------- + +In order to connect to an InfluxDb client, the Python library for InfluxDB must be installed in VOLTTRON's virtual +environment. From the command line, after enabling the virtual environment, install the InfluxDB library as follows: + +.. code-block:: bash + + python bootstrap.py --influxdb + +or + +.. code-block:: bash + + python bootstrap.py --databases + +or + +.. code-block:: bash + + pip install influxdb + + +Configuration +============= + +The default configuration file for VOLTTRON's InfluxDB Historian agent should be in the format: + +.. code-block:: python + + { + "connection": { + "params": { + "host": "localhost", + "port": 8086, # Don't change this unless default bind port + # in influxdb config is changed + "database": "historian", + "user": "historian", # user is optional if authentication is turned off + "passwd": "historian" # passwd is optional if authentication is turned off + } + }, + "aggregations": { + "use_calendar_time_periods": true + } + } + + +The InfluxDB Historian agent can be packaged, installed and started according to the standard VOLTTRON agent creation +procedure. A sample VOLTTRON configuration file has been provided: `services/core/InfluxdbHistorian/config`. + +.. seealso:: + + :ref:`Agent Development Walk-through ` + + +Connection +---------- + +The ``host``, ``database``, ``user`` and ``passwd`` values in the VOLTTRON configuration file +can be modified. ``user`` and ``passwd`` are optional if InfluxDB *Authentication* is disabled. + +.. note:: + + Be sure to initialize or pre-create the ``database`` and ``user`` defined in the configuration file, and if ``user`` + is a non-admin user, be make sure to grant privileges for the user on the specified ``database``. For more + information, see `Authentication in InfluxDB`_. + + +Aggregations +------------ + +In order to use aggregations, the VOLTTRON configuration file must also specify a value, either ``true`` or ``false``, +for ``use_calendar_time_periods``, indicating whether the aggregation period should align to calendar time periods. If +this value is omitted from the configuration file, aggregations cannot be used. + +For more information on historian aggregations, see: +:ref:`Aggregate Historian Agent Specification `. + +Supported Influxdb aggregation functions: + + * Aggregations: COUNT(), DISTINCT(), INTEGRAL(), MEAN(), MEDIAN(), MODE(), SPREAD(), STDDEV(), SUM() + + * Selectors: FIRST(), LAST(), MAX(), MIN() + + * Transformations: CEILING(),CUMULATIVE_SUM(), DERIVATIVE(), DIFFERENCE(), ELAPSED(), NON_NEGATIVE_DERIVATIVE(), + NON_NEGATIVE_DIFFERENCE() + +More information how to use those functions: ``_ + +.. note:: + + Historian aggregations in InfluxDB are different from aggregations employed by other historian agents in VOLTTRON. + InfluxDB doesn't have a separate agent for aggregations. Instead, aggregation is supported through the + ``query_historian`` function. Other agents can execute an aggregation query directly in InfluxDB by calling the + `RPC.export` method ``query``. For an example, see + :ref:`Aggregate Historian Agent Specification ` + + +Database Schema +=============== + +Each InfluxDB database has a `meta` table as well as other tables for different measurements, e.g. one table for +"power_kw", one table for "energy", one table for "voltage", etc. (An InfluxDB `measurement` is similar to a +relational table, so for easier understanding, InfluxDB measurements will be referred to below as tables.) + + +Measurement Table +----------------- + +Example: If a topic name is `CampusA/Building1/Device1/Power_KW`, the `power_kw` table might look as follows: + ++-------------------------------+-----------+---------+----------+-------+------+ +|time |building |campus |device |source |value | ++-------------------------------+-----------+---------+----------+-------+------+ +|2017-12-28T20:41:00.004260096Z |building1 |campusa |device1 |scrape |123.4 | ++-------------------------------+-----------+---------+----------+-------+------+ +|2017-12-30T01:05:00.004435616Z |building1 |campusa |device1 |scrape |567.8 | ++-------------------------------+-----------+---------+----------+-------+------+ +|2018-01-15T18:08:00.126345Z |building1 |campusa |device1 |scrape |10 | ++-------------------------------+-----------+---------+----------+-------+------+ + +``building``, ``campus``, ``device``, and ``source`` are InfluxDB *tags*. ``value`` is an InfluxDB *field*. + +.. note:: + + The topic is converted to all lowercase before being stored in the table. In other words, a set of *tag* names, as + well as a table name, are created by splitting `topic_id` into substrings (see `meta table`_ below). + + +In this example, where the typical format of a topic name is `///`, `campus`, +`building` and `device` are each stored as tags in the database. + +A topic name might not confirm to that convention: + + #. The topic name might contain additional substrings, e.g. `CampusA/Building1/LAB/Device/OutsideAirTemperature`. + In this case, `campus` will be ``campusa/building``, `building` will be ``lab``, and `device` will be ``device``. + + #. The topic name might contain fewer substrings, e.g. `LAB/Device/OutsideAirTemperature`. In this case, the + `campus` tag will be empty, `building` will be ``lab``, and `device` will be ``device``. + + +Meta Table +========== + +The meta table will be structured as in the following example: + ++---------------------+---------------------------------+------------------------------------------------------------------+-------------------------------------+--------------------------------------+ +|time |last_updated |meta_dict |topic |topic_id | ++---------------------+---------------------------------+------------------------------------------------------------------+-------------------------------------+--------------------------------------+ +|1970-01-01T00:00:00Z |2017-12-28T20:47:00.003051+00:00 |{u'units': u'kw', u'tz': u'US/Pacific', u'type': u'float'} |CampusA/Building1/Device1/Power_KW |campusa/building1/device1/power_kw | ++---------------------+---------------------------------+------------------------------------------------------------------+-------------------------------------+--------------------------------------+ +|1970-01-01T00:00:00Z |2017-12-28T20:47:00.003051+00:00 |{u'units': u'kwh', u'tz': u'US/Pacific', u'type': u'float'} |CampusA/Building1/Device1/Energy_KWH |campusa/building1/device1/energy_kwh | ++---------------------+---------------------------------+------------------------------------------------------------------+-------------------------------------+--------------------------------------+ + +In the InfluxDB, `last_updated`, `meta_dict` and `topic` are *fields* and `topic_id` is a *tag*. + +Since InfluxDB is a time series database, the ``time`` column is required, and a dummy value (``time=0``, which is +``1970-01-01T00:00:00Z`` based on epoch unix time) is assigned to all topics for easier metadata updating. Hence, if the +contents of `meta_dict` change for a specific topic, both `last_updated` and `meta_dict` values for that topic will be +replaced in the table. diff --git a/docs/source/agent-framework/historian-agents/mongodb/mongo-historian.rst b/docs/source/agent-framework/historian-agents/mongodb/mongo-historian.rst new file mode 100644 index 0000000000..9fd13532d8 --- /dev/null +++ b/docs/source/agent-framework/historian-agents/mongodb/mongo-historian.rst @@ -0,0 +1,110 @@ +.. _Mongo-Historian: + +=============== +Mongo Historian +=============== + +MongoDB is a NoSQL document database, which allows for great performance for transactional data. Because MongoDB +documents do not have a schema, it is easy to store and query data which changes over time. MongoDB also scales +horizontally using sharding. + +For more information about MongoDB, read the `MongoDB documentation `_ + + +Prerequisites +============= + + +1. Mongodb +---------- + +Setup mongodb based on using one of the three installation scripts for the corresponding environment: + +1. Install as root on Redhat or Cent OS + + .. code-block:: bash + + sudo scripts/historian-scripts/root_install_mongo_rhel.sh + + The above script will prompt user for os version, db user name, password and database name. Once installed you can + start and stop the service using the command: + + .. code-block:: bash + + **sudo service mongod [start|stop|service]** + +2. Install as root on Ubuntu + + .. code-block:: bash + + sudo scripts/historian-scripts/root_install_mongo_ubuntu.sh + + The above script will prompt user for os version, db user name, password and database name. Once installed you can + start and stop the service using the command: + + .. code-block:: bash + + **sudo service mongod [start|stop|service]** + +3. Install as non root user on any Linux machine + + .. code-block:: bash + + scripts/historian-scripts/install_mongodb.sh + + Usage: + + .. code-block:: bash + + install_mongodb.sh [-h] [-d download_url] [-i install_dir] [-c config_file] [-s] + + Optional arguments: + + -s setup admin user and test collection after install and startup + + -d download url. defaults to https://fastdl.mongodb.org/linux/mongodb-linux-x86_64-3.2.4.tgz + + -i install_dir. defaults to current_dir/mongo_install + + -c config file to be used for mongodb startup. Defaults to default_mongodb.conf in the same directory as this + script. Any data path mentioned in the config file should already exist and should have write access to the + current user + + -h print the help message + + +2. Mongodb connector +-------------------- +This historian requires a mongodb connector installed in your activated VOLTTRON virtual environment to talk to MongoDB. +Please execute the following from an activated shell in order to install it: + +.. code-block:: bash + + python bootstrap.py --mongo + + +or + +.. code-block:: bash + + python bootstrap.py --databases + + +or + +.. code-block:: bash + + pip install pymongo + + +3. Configuration Options +------------------------ + +The historian configuration file can specify + +:: + + "history_limit_days": + +which will remove entries from the data and rollup collections older than `n` days. Timestamps passed to the +``manage_db_size`` method are truncated to the day. diff --git a/docs/source/core_services/historians/MQTT-Historian.rst b/docs/source/agent-framework/historian-agents/mqtt/mqtt-historian.rst similarity index 53% rename from docs/source/core_services/historians/MQTT-Historian.rst rename to docs/source/agent-framework/historian-agents/mqtt/mqtt-historian.rst index 1874240fa3..2ed25a0b54 100644 --- a/docs/source/core_services/historians/MQTT-Historian.rst +++ b/docs/source/agent-framework/historian-agents/mqtt/mqtt-historian.rst @@ -1,27 +1,27 @@ .. _MQTT-Historian: +============== MQTT Historian ============== Overview --------- -The MQTT Historian agent publishes data to an MQTT broker. +======== + +The MQTT Historian agent publishes data to an MQTT broker. The ``mqttlistener.py`` script will connect to the broker +and print all messages. -The mqttlistener.py script will connect to the broker and print -all messages. Dependencies ------------- -The Paho MQTT library from Eclipse is needed for the agent and can -be installed with: +============ +The Paho MQTT library from Eclipse is needed for the agent and can be installed with: -:: +.. code-block:: bash pip install paho-mqtt The Mosquitto MQTT broker may be useful for testing and can be installed with -:: +.. code-block:: bash apt-get install mosquitto diff --git a/docs/source/core_services/historians/OpenEIS-Historian.rst b/docs/source/agent-framework/historian-agents/openeis/openeis-historian.rst similarity index 80% rename from docs/source/core_services/historians/OpenEIS-Historian.rst rename to docs/source/agent-framework/historian-agents/openeis/openeis-historian.rst index 97b4a3724c..7909e3c2fb 100644 --- a/docs/source/core_services/historians/OpenEIS-Historian.rst +++ b/docs/source/agent-framework/historian-agents/openeis/openeis-historian.rst @@ -1,17 +1,23 @@ .. _OpenEIS-Historian: +================= OpenEIS Historian -=================== +================= -An OpenEIS Historian has been developed to integrate real -time data ingestion into the OpenEIS platform. In order for the OpenEIS -historian to be able to communicate with an OpenEIS server a datasource -must be created on the OpenEIS server. The process of creating a dataset -is documented in the `OpenEIS User's -Guide `__ -under *Creating a Dataset* heading. Once a dataset is created you will -be able to add datasets through the configuration file. An example -configuration for the historian is as follows: +An OpenEIS Historian has been developed to integrate real time data ingestion into the OpenEIS platform. In order for +the OpenEIS Historian to be able to communicate with an OpenEIS server a datasource must be created on the OpenEIS +server. + +The process of creating a dataset is documented in the +`OpenEIS User's Guide `__ +under `Creating a Dataset` heading. + + +Configuration +============= + +Once a dataset is created you will be able to add datasets through the configuration file. An example configuration for +the historian is as follows: :: @@ -84,4 +90,3 @@ configuration for the historian is as follows: # } } } - diff --git a/docs/source/agent-framework/historian-agents/sql-historian/sql-historian.rst b/docs/source/agent-framework/historian-agents/sql-historian/sql-historian.rst new file mode 100644 index 0000000000..200c67e98d --- /dev/null +++ b/docs/source/agent-framework/historian-agents/sql-historian/sql-historian.rst @@ -0,0 +1,225 @@ +.. _SQL-Historian: + +============= +SQL Historian +============= + +An SQL Historian is available as a core service (`services/core/SQLHistorian` in the VOLTTRON repository). + +The SQL Historian has been programmed to handle for inconsistent network connectivity (automatic re-connection to tcp +based databases). All additions to the historian are batched and wrapped within a transaction with commit and rollback +functions. This allows the maximum throughput of data with the most protection. + + +Configuration +============= + +The following example configurations show the different options available for configuring the SQL Historian Agent: + + +MySQL Specifics +--------------- + +MySQL requires a third party driver (mysql-connector) to be installed in +order for it to work. Please execute the following from an activated +shell in order to install it. + +:: + + pip install --allow-external mysql-connector-python mysql-connector-python + +or + +:: + + python bootstrap.py --mysql + +or + +:: + + python bootstrap.py --databases + +| In addition, the mysql database must be created and permissions + granted for select, insert and update before the agent is started. In + order to support timestamp with microseconds you need at least MySql + 5.6.4. Please see this `MySql + documentation `__ + for more details +| The following is a minimal configuration file for using a MySQL based + historian. Other options are available and are documented + http://dev.mysql.com/doc/connector-python/en/connector-python-connectargs.html. + **Not all parameters have been tested, use at your own risk**. + +:: + + { + "agentid": "sqlhistorian-mysql", + "connection": { + "type": "mysql", + "params": { + "host": "localhost", + "port": 3306, + "database": "volttron", + "user": "user", + "passwd": "pass" + } + } + } + + +Sqlite3 Specifics +----------------- + +An Sqlite Historian provides a convenient solution for under powered systems. The database is a parameter to a location on the file system; 'database' should be a non-empty string. +By default, the location is relative to the agent's installation directory, however it will respect a rooted or relative path to the database. + +If 'database' does not have a rooted or relative path, the location of the database depends on whether the volttron platform is in secure mode. For more information on secure mode, see :ref:`Running-Agents-as-Unix-User`. +In secure mode, the location will be under /.agent-data directory because this will be the only directory in which the agent will have write-access. +In regular mode, the location will be under /data for backward compatibility. + +The following is a minimal configuration file that uses a relative path to the database. + +:: + + { + "agentid": "sqlhistorian-sqlite", + "connection": { + "type": "sqlite", + "params": { + "database": "data/historian.sqlite" + } + } + } + + +PostgreSQL and Redshift +----------------------- + +Installation notes +^^^^^^^^^^^^^^^^^^ + +1. The PostgreSQL database driver supports recent PostgreSQL versions. It has been tested on 10.x, but should work with + 9.x and 11.x. + +2. The user must have SELECT, INSERT, and UPDATE privileges on historian tables. + +3. The tables in the database are created as part of the execution of the SQL Historian Agent, but this will fail if the + database user does not have CREATE privileges. + +4. Care must be exercised when using multiple historians with the same database. This configuration may be used only if + there is no overlap in the topics handled by each instance. Otherwise, duplicate topic IDs may be created, producing + strange results. + +5. Redshift databases do not support unique constraints. Therefore, it is possible that tables may contain some + duplicate data. The Redshift driver handles this by using distinct queries. It does not remove duplicates from the + tables. + + +Dependencies +^^^^^^^^^^^^ + +The PostgreSQL and Redshift database drivers require the `psycopg2` Python package. + + From an activated shell execute: + + .. code-block:: bash + + pip install psycopg2-binary + + +PostgreSQL and Redshift Configuration +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +The following are minimal configuration files for using a psycopg2-based historian. Other options are available and are +`documented `_. + +.. warning:: + + Not all parameters have been tested, use at your own risk. + + +Local PostgreSQL Database +""""""""""""""""""""""""" + +The following snippet demonstrates how to configure the SQL Historian Agent to use a PostgreSQL database on the local +system that is configured to use Unix domain sockets. The user executing VOLTTRON must have appropriate privileges. + +.. code-block:: json + + { + "connection": { + "type": "postgresql", + "params": { + "dbname": "volttron" + } + } + } + + +Remote PostgreSQL Database +"""""""""""""""""""""""""" + +The following snippet demonstrates how to configure the SQL Historian Agent to use a remote PostgreSQL database. + +.. code-block:: json + + { + "connection": { + "type": "postgresql", + "params": { + "dbname": "volttron", + "host": "historian.example.com", + "port": 5432, + "user": "volttron", + "password": "secret" + } + } + } + + +TimescaleDB Support +""""""""""""""""""" + +Both of the above PostgreSQL connection types can make use of TimescaleDB's high performance Hypertable backend for the +primary time-series table. The agent assumes you have completed the TimescaleDB installation and setup +the database by following the instructions `here `_. + +To use, simply add ``timescale_dialect: true`` to the connection params in the Agent Config as below: + +.. code-block:: json + + { + "connection": { + "type": "postgresql", + "params": { + "dbname": "volttron", + "host": "historian.example.com", + "port": 5432, + "user": "volttron", + "password": "secret", + "timescale_dialect": true + } + } + } + + +Redshift Database +""""""""""""""""" + +The following snippet demonstrates how to configure the SQL Historian Agent to use a Redshift database. + +.. code-block:: json + + { + "connection": { + "type": "redshift", + "params": { + "dbname": "volttron", + "host": "historian.example.com", + "port": 5432, + "user": "volttron", + "password": "secret" + } + } + } diff --git a/docs/source/agent-framework/integrating-simulations/Simulation-Configuration.rst b/docs/source/agent-framework/integrating-simulations/Simulation-Configuration.rst new file mode 100644 index 0000000000..a3951bf6c5 --- /dev/null +++ b/docs/source/agent-framework/integrating-simulations/Simulation-Configuration.rst @@ -0,0 +1,114 @@ + .. _Simulation-Integration-Configuration: + +======================================================= +Configuration for Integrating With Simulation Platforms +======================================================= + +Configurations for interfacing with simulation platforms will vary depending on the specifications of that platform but +there may be few common configuration options that we can group together as separate sections such as: + +* Config parameters that help us setup the simulation such as connection parameters (connection address), unique name + for the participant, total simulation time +* List of topics for subscribing with simulation platform +* List of topics for publishing to the simulation platform +* List of topics subscribing with VOLTTRON message bus + +We have grouped these four categories of configuration into four different sections - `properties`, `inputs`, `outputs` +and `volttron_subscriptions`. The simulation integration class will read these four sections and register with +simulation platform appropriately. If an agent needs to interface with EnergyPlus or HELICS using the simulation +integration framework, then it will need to group the configurations into above four sections. + +.. note:: + + GridAPPS-D can run complex power system simulations using variety of simulators such as GridLAB-D, HELICS, MatPower + etc. So the configuration for GridAPPS-D cannot follow the above format. Because of this, the configuration for + GridAPPSD is taken in the raw format and passed directly to the GridAPPS-D simulation. + + +Example Configuration +--------------------- + +The configuration for interfacing with a simulation platform is described by using integration with HELICS as an +example. Each participant in a HELICS co-simulation environment is called a federate. + +Below is an example HELICS config file. + +.. code-block:: yaml + + # Config parameters for setting up HELICS federate + properties: + name: federate1 # unique name for the federate + loglevel: 5 # log level + coreType: zmq # core type + timeDelta: 1.0 # time delta (defaults to 1s) + uninterruptible: true + simulation_length: 360 # simulation length in seconds (defaults to 360s) + + # configuration for subscribing to HELICS simulation + outputs: + # List of subscription information, typically contains + # - subscription topic, + # - datatype + # - publication topic for VOLTTRON (optional) to republish the + # message on VOLTTRON message bus + # - additional/optional simulation specific configuration + - sim_topic: federate2/totalLoad + volttron_topic: helics/abc + type: complex + required: true + - sim_topic: federate2/charge_EV6 + volttron_topic: helics/ev6 + type: complex + required: true + + # configuration for publishing to HELICS simulation + inputs: + # List of publication information, containing + # - HELICS publication topic, + # - datatype + # - metadata associated with the topic (for example unit) + # - subscription topic for VOLTTRON message bus (optional) which can then be + # republished on HELICS with HELICS publication topic + # - additional/optional publication specific configuration + - sim_topic: pub1 # HELICS publication key + type: double # datatype + unit: m # unit + info: this is an information string for use by the application #additional info + volttron_topic: pub1/all # topic to subscribe on VOLTTRON bus + global: true + - sim_topic: pub2 + type: double + volttron_topic: pub2/all + + volttron_subscriptions: + - feeder0_output/all + + +The properties section may contain the following. + +* Unique name for the federate +* core type (for example, zmq, tcp, mpi) +* time step delta in seconds +* total simulation time etc + +.. note:: + + The individual fields under this section may vary depending on whether the agent is interfacing with HELICS or + EnergyPlus. + +In the inputs section, list of subscriptions (if any) need to be provided. Each subscription will contain the following. + +* subscription topic +* data type +* VOLTTRON topic to republish the message on VOLTTRON message bus (optional) +* required flag (optional) + +In the outputs section, list of publications (if any) need to be provided. Each publication will contain the following. + +* publication topic +* data type +* metadata associated with the topic +* VOLTTRON topic to subscribe on the VOLTTRON message bus which will be republished on simulation bus (optional) +* additional information (optional) + +In the volttron_subscriptions, list of topics need to be subscribed on VOLTTRON bus can be provided. diff --git a/docs/source/agent-framework/integrating-simulations/Simulation-Integration.rst b/docs/source/agent-framework/integrating-simulations/Simulation-Integration.rst new file mode 100644 index 0000000000..2f719a2952 --- /dev/null +++ b/docs/source/agent-framework/integrating-simulations/Simulation-Integration.rst @@ -0,0 +1,189 @@ + .. _Simulation-Integration: + +===================================== +Integrating With Simulation Platforms +===================================== + +An agent wanting to integrate with a simulation platform has to create an object of concrete simulation integration +class (HELICSSimIntegration). This is best described with an example agent. The example agent will interface with +HELICS co-simulation platform. For more info about HELICS, please refer to +https://helics.readthedocs.io/en/latest/installation/linux.html. + +.. code-block:: python + + class HelicsExample(Agent): + """ + HelicsExampleAgent demonstrates how VOLTTRON agent can interact with HELICS simulation environment + """ + def __init__(self, config, **kwargs): + super(HelicsExample, self).__init__(enable_store=False, **kwargs) + self.config = config + self.helics_sim = HELICSSimIntegration(config, self.vip.pubsub) + + +.. _Register-Simulation: + +Register With Simulation Platform +================================= + +The agent has to first load the configuration file containing parameters such as connection address, simulation +duration, input and output topics etc., and register with simulation platform. The concrete simulation object will then +register the agent with simulation platform (in this case, HELICS) using appropriate APIs. The registration steps +include connecting to the simulation platform, passing the input and outputs topics to the simulation etc. In addition +to that, the agent has to provide a callback method in order for the concrete simulation object to pass the messages +received from the simulation to the agent. The best place to call the `register_inputs` API is within the `onstart` +method of the agent. + +.. code-block:: python + + @Core.receiver("onstart") + def onstart(self, sender, **kwargs): + """ + Register config parameters with HELICS. + Start HELICS simulation. + """ + # Register inputs with HELICS and provide callback method to receive messages from simulation + try: + self.helics_sim.register_inputs(self.config, self.do_work) + except ValueError as ex: + _log.error("Unable to register inputs with HELICS: {}".format(ex)) + self.core.stop() + return + + +Start the Simulation Platform +============================= + +After registering with the simulation platform, the agent can start the simulation. + +.. code-block:: python + + # Register inputs with HELICS and provide callback method to receive messages from simulation + try: + self.helics_sim.start_simulation() + except ValueError as ex: + _log.error("Unable to register inputs with HELICS: {}".format(ex)) + self.core.stop() + return + + +Receive outputs from the simulation +=================================== + +The concrete simulation object spawns a continuous loop that waits for any incoming messages (subscription messages) +from the simulation platform. On receiving a message, it passes the message to the callback method registered by the +agent during the :ref:`register with simulation step `. The agent can now choose to work on the +incoming message based on it's use case. The agent can also choose to publish some message back to the simulation at +this point of time as shown in below example. This is totally optional and is based on agent's use-case. At the end of +the callback method, the agent needs to make time request to the simulation, so that it can advance forward in the +simulation. Please note, this is a necessary step for HELICS co-simulation integration as the HELICS broker waits for +time requests from all it's federates before advancing the simulation. If no time request is made, the broker blocks +the simulation. + + +.. code-block:: python + + def do_work(self): + """ + Perform application specific work here using HELICS messages + :return: + """ + current_values = self.helics_sim.current_values + _log.debug("Doing work: {}".format(self.core.identity)) + _log.debug("Current set of values from HELICS: {}".format(current_values)) + # Do something with HELICS messages + # agent specific work!!! + + for pub in self.publications: + key = pub['sim_topic'] + # Check if VOLTTRON topic has been configured. If no, publish dummy value for the HELICS + # publication key + volttron_topic = pub.get('volttron_topic', None) + if volttron_topic is None: + value = 90.5 + global_flag = pub.get('global', False) + # If global flag is False, prepend federate name to the key + if not global_flag: + key = "{fed}/{key}".format(fed=self._federate_name, key=key) + value = 67.90 + self.helics_sim.publish_to_simulation(key, value) + + self.helics_sim.make_time_request() + + +Publish to the simulation +========================= + +The agent can publish messages to the simulation using publish_to_simulation API. The code snippet iterates over all the +publication keys (topics) and uses `publish_to_simulation` API to publish a dummy value of ``67.90`` for every +publication key. + +.. code-block:: python + + for pub in self.publications: + key = pub['sim_topic'] + value = 67.90 + self.helics_sim.publish_to_simulation(key, value) + + +Advance the simulation +====================== + +With some simulation platforms such as HELICS, the federate can make explicit time request to advance in time by certain +number of time steps. There will be a global time keeper (in this case HELICS broker) which will be responsible for +maintaining time within the simulation. In the time request mode, each federate has to request for time advancement +after it has completed it's work. The global time keeper grants the lowest time among all time requests. All the +federates receive the granted time and advance forward in simulation time together in a synchronized manner. Please +note, the granted time may not be the same as the requested time by the agent. + +Typically, the best place to make the time request is in the callback method provided to the simulation integration +object. + +.. code-block:: python + + self.helics_sim.make_time_request() + + +Pause the simulation +==================== + +Some simulation platforms such as GridAPPS-D have the capability to pause the simulation. The agent can make use of +this functionality by calling the appropriate wrapper API exposed by the concrete simulation class. In the case of +HELICS, we do not have capability of pause/resume simulation, so calling the `pause_simulation` API will result in no +operation. + +.. code-block:: python + + self.helics_sim.pause_simulation() + + +Resume the simulation +===================== + +If the simulation platform provides the pause simulation functionality then it will also provide capability to resume +the simulation. The agent can call resume_simulation API to resume the simulation. In case of HELICS, we do not have the +capability of pause/resume simulation, so calling the `resume_simulation` API will result in no operation. + +.. code-block:: python + + self.helics_sim.resume_simulation() + + +Stop the simulation +=================== + +The agent can stop the simulation at any point of point. In the case of `HELICSSimIntegration object`, it will +disconnect the federate from the HELICS core and close the library. Generally, it is a good practice to call the +`stop_simulation` API within the `onstop` method of the agent. In this way, the agent stops the simulation before +exiting the process. + +.. code-block:: python + + @Core.receiver("onstop") + def onstop(self, sender, **kwargs): + """ + This method is called when the Agent is about to shutdown, but before it + disconnects from the message bus. + """ + self.helics_sim.stop_simulation() + diff --git a/docs/source/agent-framework/integrating-simulations/index.rst b/docs/source/agent-framework/integrating-simulations/index.rst new file mode 100644 index 0000000000..0c349d3864 --- /dev/null +++ b/docs/source/agent-framework/integrating-simulations/index.rst @@ -0,0 +1,23 @@ +.. _Simulation_Overview: + +================================ +Simulation Integration Framework +================================ + +This framework provides a way to integrate different types of simulation platforms with VOLTTRON. Integration with +specific simulation platforms are all built upon the `BaseSimIntegration` class which provides common APIs needed to +interface with different types of simulation platforms. Each of the concrete simulation class extends the +`BaseSimIntegration` class and is responsible for interfacing with a particular simulation platform. Using these +concrete simulation objects, agents will be able to use the APIs provided by them to participate in a simulation, send +inputs to the simulation and receive outputs from the simulation and act on them. Currently, we have implementations +for integrating with HELICS, GridAPPSD and EnergyPlus. If one wants to integrate with a new simulation platform, then +one has to extend the `BaseSimIntegration` class and provide concrete implementation for each of the APIs provided by +the `BaseSimIntegration` class. For details on the `BaseSimIntegration` class, please refer to +``volttron/platform/agent/base_simulation_integration/base_sim_integration.py`` + + +.. toctree:: + specifications/simulation_integration + Simulation-Configuration + Simulation-Integration + diff --git a/docs/source/agent-framework/integrating-simulations/specifications/simulation_integration.rst b/docs/source/agent-framework/integrating-simulations/specifications/simulation_integration.rst new file mode 100644 index 0000000000..621f0f7ff6 --- /dev/null +++ b/docs/source/agent-framework/integrating-simulations/specifications/simulation_integration.rst @@ -0,0 +1,132 @@ +.. _Simulation-Integration-Spec: + +=================================================================== +Specification For Simplifying Integration With Simulation Platforms +=================================================================== + +There are several simulation platforms that can be integrated with VOLTTRON +to run as a single cohesive simulated environment for different type of +applications. Some of the platforms are FNCS, HELICS, GridAPPS-D and +EnergyPlus. They all have unique application areas and differ in the type +of simulations they run, inputs they accept and outputs they produce. There +are some similarities in the some of the basic steps of integrating with +VOLTTRON such as: + +1. Start simulation +2. Subscribe to outputs from the simulation +3. Publish outputs from simulation to VOLTTRON +4. Subscribe to topics from VOLTTRON +5. Send inputs to simulation +6. Advance simulation time step +7. Pause simulation +8. Resume simulation +9. Stop simulation + +Currently, VOLTTRON has individual implementations for integrating with +many of the above simulation platforms. For example, an example of +integrating with GridAPPSD can be found in `examples/GridAPPS-D/gridappsd_example/agent.py`. +EnergyPlus agent can be found in `examples/EnergyPlusAgent/energyplus/agent.py`. These implementations +will still be available for users. Instead, in this specification +we are proposing a base simulation integration class that will provide +common APIs and concrete simulation integration classes that will have +implementation of the these APIs as per the needs of the individual +simulation platforms. Users can use appropriate simulation classes based on +which simulation platform they want to integrate with. + + +Features +======== + +#. Start simulation + + This will start the simulation or register itself to be participant in + the simulation. + +#. Register for inputs from simulation + + A list of points need to be made available in a config file. The inputs + are then read from the config file and registered with simulation platform. + Whenever there is any change in those particular points, they are made + available to this class to process. The agent using this class object + can process it or publish it over VOLTTRON message bus to be consumed by + other agents. + +#. Send inputs to simulation + + Send inputs such as set points (for example, charge_EV5), + data points etc to the simulation. The simulation would then act on these + inputs. + +#. Receive outputs from simulation + Receive outputs generated by the simulation (for example, OutdoorAirTemperature + for a energyPlus simulation). The agent can then act on these output values. + If the config file has an associated topic, the output value will be republished + on the VOLTTRON message bus. + +#. Simulation time management + + Typically, in a simulation environment, one can run applications in real + time mode or in fast execution mode. All the participants in the simulation + have to be in sync with respect to time for simulation to be correct. There + is typically a central unit which acts as a global timekeeper. This timekeeper + can possibly be configured to use periodic time keeping, which means it + periodically advances in time (based on pre-configured time period) or + based on time advancement message. After each advancement, it would send + out all the output messages to the registered participants. Another way of + advancing the simulation would be based on concept of time request-time grant. Each of the + participants would request for certain time after it is done with its + work and get blocked until that is granted. The global time keeper would + grant time (and hence advance in simulation) that is lowest among the list + of time requests and all participants would advance to that time. + +#. Pause the simulation + + Some simulation platforms can pause the simulation if needed. We need provide + wrapper API to call simulation specific pause API. + +#. Resume the simulation + + Some simulation platforms can resume the simulation if needed. We need provide + API to call simulation specific resume API. + +#. Stop the simulation + + This will unregister itself from the simulation and stop the simulation. + + +APIs +==== + +#. **start_simulation()** + + - Connect to the simulation platform. + - Register with the platform as a participant + +#. **register_inputs(config=None, callback=None)** + + - Register the config containing inputs and outputs with the simulation platform. + - If agent provides a callback method, this will be called when new output values is received from simulation + +#. **publish_to_simulation(topic, message)** + + - Send message to simulation + +#. **make_time_request(time_steps)** + + - Make request to simulation to advance to next time delta + +#. **pause_simulation()** + + - Pause simulation + +#. **resume_simulation()** + + - Resume simulation + +#. **stop_simulation()** + + - Stops the simulation + +#. **is_sim_installed()** + + - Flag indicating if simulation is installed diff --git a/docs/source/core_services/service_agents/emailer/EmailerAgent.rst b/docs/source/agent-framework/operations-agents/emailer/emailer-agent.rst similarity index 91% rename from docs/source/core_services/service_agents/emailer/EmailerAgent.rst rename to docs/source/agent-framework/operations-agents/emailer/emailer-agent.rst index 0df6c9ddbc..f5afff0a28 100644 --- a/docs/source/core_services/service_agents/emailer/EmailerAgent.rst +++ b/docs/source/agent-framework/operations-agents/emailer/emailer-agent.rst @@ -1,8 +1,9 @@ -.. _EmailerAgent: +.. _Emailer-Agent: ============= Emailer Agent ============= + Emailer agent is responsible for sending emails for an instance. It has been written so that any agent on the instance can send emails through it via the "send_email" method or through the pubsub message bus using the topic "platform/send_email". @@ -10,8 +11,10 @@ can send emails through it via the "send_email" method or through the pubsub mes By default any alerts will be sent through this agent. In addition all emails will be published to the "record/sent_email" topic for a historian to be able to capture that data. + Configuration -~~~~~~~~~~~~~ +============= + A typical configuration for this agent is as follows. We need to specify the SMTP server address, email address of the sender, email addresses of all the recipients and minimum time for duplicate emails based upon the key. @@ -25,4 +28,5 @@ sender, email addresses of all the recipients and minimum time for duplicate ema "allow-frequency-minutes": 10 } -Finally package, install and start the agent. For more details, see :ref:`Agent Creation Walkthrough ` +Finally package, install and start the agent. For more details, see +:ref:`Agent Creation Walk-through ` diff --git a/docs/source/core_services/service_agents/failover/Failover.rst b/docs/source/agent-framework/operations-agents/failover/failover.rst similarity index 97% rename from docs/source/core_services/service_agents/failover/Failover.rst rename to docs/source/agent-framework/operations-agents/failover/failover.rst index ad68b5a4a3..7f317620a4 100644 --- a/docs/source/core_services/service_agents/failover/Failover.rst +++ b/docs/source/agent-framework/operations-agents/failover/failover.rst @@ -76,6 +76,6 @@ primary and secondary configuration files are shown below. - **simple_behavior** - Switch to turn on or off simple behavior. Both instances should match. - **remote_vip** - Address where *remote_id* can be reached. - **remote_serverkey** - The public key of the platform where *remote_id* lives. -- **agent_vip_identity** - The vip identity of the agent that we want to manage. +- **agent_vip_identity** - The :term:`VIP Identity` of the agent that we want to manage. - **heartbeat_period** - Send a message to *remote_id* with this period. Measured in seconds. - **timeout** - Consider a platform inactive if a heartbeat has not been received for *timeout* seconds. diff --git a/docs/source/core_services/service_agents/file_watch_publisher/FileWatchPublisher-Agent.rst b/docs/source/agent-framework/operations-agents/file-watch-publisher/file-watch-publisher-agent.rst similarity index 61% rename from docs/source/core_services/service_agents/file_watch_publisher/FileWatchPublisher-Agent.rst rename to docs/source/agent-framework/operations-agents/file-watch-publisher/file-watch-publisher-agent.rst index 4b6cf01e94..474c6aab02 100644 --- a/docs/source/core_services/service_agents/file_watch_publisher/FileWatchPublisher-Agent.rst +++ b/docs/source/agent-framework/operations-agents/file-watch-publisher/file-watch-publisher-agent.rst @@ -1,4 +1,4 @@ -FileWatchPulblisher Agent +File Watch Publisher Agent ~~~~~~~~~~~~~~~~~~~~~~~~~~ @@ -15,16 +15,18 @@ A simple configuration for FileWatchPublisher with two files to monitor is as fo :: - [ - { - "file": "/var/log/syslog", - "topic": "platform/syslog" - }, - { - "file": "/home/volttron/tempfile.txt", - "topic": "temp/filepublisher" - } - ] + { + "files": [ + { + "file": "/var/log/syslog", + "topic": "platform/syslog" + }, + { + "file": "/home/volttron/tempfile.txt", + "topic": "temp/filepublisher" + } + ] + } Using this example configuration, FileWatchPublisher will watch syslog and tempFile.txt files and publish the changes per line on their respective topics. diff --git a/docs/source/agent-framework/operations-agents/index.rst b/docs/source/agent-framework/operations-agents/index.rst new file mode 100644 index 0000000000..a59d17e0a8 --- /dev/null +++ b/docs/source/agent-framework/operations-agents/index.rst @@ -0,0 +1,19 @@ +.. _Operations-Agents: + +========== +Operations +========== + +Operations agents assist with the operations of the platform systems and provide alerts for various platform and +environmental conditions. For details on each, please refer to the corresponding documents. + +.. toctree:: + :maxdepth: 2 + + emailer/emailer-agent + failover/failover + file-watch-publisher/file-watch-publisher-agent + message-debugger/message-debugging + sysmon/sysmon + threshold/threshold-agent + topic-watcher/topic-watcher-agent diff --git a/docs/source/devguides/agent_development/files/40-message-debugger.jpg b/docs/source/agent-framework/operations-agents/message-debugger/files/40-message-debugger.jpg similarity index 100% rename from docs/source/devguides/agent_development/files/40-message-debugger.jpg rename to docs/source/agent-framework/operations-agents/message-debugger/files/40-message-debugger.jpg diff --git a/docs/source/specifications/message-debugging.rst b/docs/source/agent-framework/operations-agents/message-debugger/message-debugging-specification.rst similarity index 98% rename from docs/source/specifications/message-debugging.rst rename to docs/source/agent-framework/operations-agents/message-debugger/message-debugging-specification.rst index 9796963369..d864fb1ad3 100644 --- a/docs/source/specifications/message-debugging.rst +++ b/docs/source/agent-framework/operations-agents/message-debugger/message-debugging-specification.rst @@ -1,13 +1,14 @@ -.. _MessageDebugging: +.. _Message-Debugging-Specification: -Message Bus Visualization and Debugging - Specification -======================================================= +=================================== +Message Bus Debugging Specification +=================================== NOTE: This is a planning document, created prior to implementation of the VOLTTRON Message Debugger. It describes the tool's general goals, but it's not always accurate about specifics of the ultimate implementation. For a description of Message Debugging as implemented, with advice on how to configure and -use it, please see :doc:`Message-Debugging <../devguides/agent_development/Message-Debugging>`. +use it, please see :ref:`Message-Debugging `. Description ----------- diff --git a/docs/source/devguides/agent_development/Message-Debugging.rst b/docs/source/agent-framework/operations-agents/message-debugger/message-debugging.rst similarity index 97% rename from docs/source/devguides/agent_development/Message-Debugging.rst rename to docs/source/agent-framework/operations-agents/message-debugger/message-debugging.rst index f5dcce121c..fcbcca1720 100644 --- a/docs/source/devguides/agent_development/Message-Debugging.rst +++ b/docs/source/agent-framework/operations-agents/message-debugger/message-debugging.rst @@ -1,11 +1,14 @@ .. _Message-Debugging: +================= Message Debugging ================= -VOLTTRON agent messages are routed over the VOLTTRON message bus. -The Message Debugger Agent provides enhanced examination of this message stream's -contents as an aid to debugging and troubleshooting agents and drivers. +VOLTTRON agent messages are routed over the VOLTTRON message bus. The Message Debugger Agent provides enhanced +examination of this message stream's contents as an aid to debugging and troubleshooting agents and drivers. + +This feature is implemented to provide visibility into the ZeroMQ message bus. The RabbitMQ message bus includes +methods for message debugging by default in the `RabbitMQ management UI `_. When enabled, the Message Debugger Agent captures and records each message as it is routed. A second process, Message Viewer, provides a user interface that optimizes and filters the @@ -24,8 +27,9 @@ a given capture period by sender, receiver and topic. Another view displays the most-recently-published message, or message exchange, that satisfies the current filter criteria, continuously updated as new messages are routed. + Enabling the Message Debugger -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +============================= In order to use the Message Debugger, two steps are required: @@ -40,6 +44,7 @@ should be employed during development/debugging only, and should not be left ena a production environment. Example of starting VOLTTRON with the ``--msgdebug`` command line option: + :: (volttron) ./start-volttron ``--msgdebug`` @@ -47,6 +52,7 @@ Example of starting VOLTTRON with the ``--msgdebug`` command line option: If VOLTTRON is running in this mode, the stream of routed messages is available to a subscribing Message Debugger Agent. It can be started from volttron-ctl in the same fashion as other agents, for example: + :: (volttron) $ vctl status @@ -66,14 +72,15 @@ fashion as other agents, for example: e1 vcplatformagent-3.5.4 platform.agent vcp 47 volttroncentralagent-3.5.5 volttron.central vc -See :ref:`Agent Creation Walkthrough ` for further details on -installing and starting agents from vctl. +See :ref:`Agent Creation Walk-through ` for further details on +installing and starting agents from `vctl`. Once the Message Debugger Agent is running, it begins capturing message data and writing it to a SQLite database. + Message Viewer -~~~~~~~~~~~~~~ +============== The Message Viewer is a separate process that interacts with the Message Debugger Agent primarily via VOLTTRON RPC calls. These calls allow it to request and report on filtered sets @@ -85,12 +92,13 @@ created to satisfy a variety of specific debugging needs. For example, a viewer browser-based message debugging with a graphical user interface, or a viewer could transform message data into PCAP format for consumption by WireShark. -The Message Viewer in services/ops/MessageDebuggerAgent/messageviewer/viewer.py implements a +The Message Viewer in `services/ops/MessageDebuggerAgent/messageviewer/viewer.py` implements a command-line UI, subclassing Python's ``Cmd`` class. Most of the command-line options that it displays result in a MessageDebuggerAgent RPC request. The Message Viewer formats and displays the results. In Linux, the Message Viewer can be started as follows, and displays the following menu: + :: (volttron) $ cd services/ops/MessageDebuggerAgent/messageviewer @@ -122,12 +130,14 @@ In Linux, the Message Viewer can be started as follows, and displays the followi Please enter a command. Viewer> + Command-Line Help -~~~~~~~~~~~~~~~~~ +----------------- The Message Viewer offers two help levels. Simply typing ``help`` gives a list of available commands. If a command name is provided as an argument, advice is offered on how to use that command: + :: Viewer> help @@ -159,12 +169,14 @@ that command: . endtime : Matches rows with timestamps before the supplied time . (etc. -- see the structures of DebugMessage and DebugMessageExchange) + Debug Sessions -~~~~~~~~~~~~~~ +============== The Message Debugger Agent tags each message with a debug session ID (a serial number), which groups a set of messages that are bounded by a start time and an end time. The ``list_sessions`` command describes each session in the database: + :: Viewer> list_sessions @@ -179,13 +191,14 @@ but no session is active (i.e., because ``stop_session`` was used to stop it), m still written to the database, but they have no session ID. Filtered Display -~~~~~~~~~~~~~~~~ +---------------- The ``set_filter `` command enables filtered display of messages. A variety of properties can be filtered. In the following example, message filters are defined by session_id and sender, and the ``display_messages`` command displays the results: + :: Viewer> set_filter session_id 4 @@ -201,8 +214,9 @@ command displays the results: 11:51:01 incoming testagent pubsub 1197886248649056373.284581649 RPC pubsub.publish test_topic/test_subtopic - - - 11:51:01 outgoing testagent pubsub 1197886248649056373.284581649 RPC - - - - None + Debug Message Exchanges -~~~~~~~~~~~~~~~~~~~~~~~ +======================= A VOLTTRON message's request ID is not unique to a single message. A group of messages in an "exchange" (essentially a small conversation among agents) will often share a common request ID, for instance during RPC @@ -212,6 +226,7 @@ The following example uses the same filters as above, and then uses ``display_ex to display a single line for each message exchange, reducing the number of displayed rows from 6 to 2. Note that not all messages have a request ID; messages with no ID are absent from the responses to exchange queries. + :: Viewer> list_filters @@ -221,8 +236,9 @@ exchange queries. testagent platform.driver 11:51:00 - chargepoint1 Status AVAILABLE testagent pubsub 11:51:01 test_topic/test_subtopic - - None + Special Filters -~~~~~~~~~~~~~~~ +--------------- Most filters that can be set with the ``set_filter`` command are simple string matches on one or another property of a message. Some filters have special characteristics, though. @@ -232,6 +248,7 @@ inequalities that test for messages after a start time or before an end time. In the following example, note the use of quotes in the endtime value supplied to set_filter. Any filter value can be delimited with quotes. Quotes must be used when a value contains embedded spaces, as is the case here: + :: Viewer> list_sessions @@ -261,6 +278,7 @@ Another filter type with special behavior is ``set_filter topic ``. Ordina match on a message property. Since message topics are often expressed as hierarchical substrings, though, the ``topic`` filter does a substring match on the left edge of a message's topic, as in the following example: + :: Viewer> set_filter topic test_topic @@ -273,7 +291,7 @@ as in the following example: Viewer> Another filter type with special behavior is ``set_filter results_only 1``. In the JSON representation of a -response to an RPC call, for example an RPC call to a Master Driver interface, the response to the +response to an RPC call, for example an RPC call to a Platform Driver interface, the response to the RPC request typically appears as the value of a 'result' tag. The ``results_only`` filter matches only those messages that have a non-empty value for this tag. @@ -284,6 +302,7 @@ set_filter command syntax requires that a value be supplied as a parameter. In the following example, note the use of ``clear_filter `` to remove a single named filter from the list of filters that are currently in effect. There is also a ``clear_filters`` command, which clears all current filters. + :: Viewer> clear_filter topic @@ -296,8 +315,9 @@ command, which clears all current filters. testagent platform.driver 11:51:25 - chargepoint1 Status AVAILABLE testagent platform.driver 11:51:26 - chargepoint1 Status AVAILABLE + Streamed Display -~~~~~~~~~~~~~~~~ +---------------- In addition to exposing a set of RPC calls that allow other agents (like the Message Viewer) to query the Message Debugger Agent's SQLite database of recent messages, the Agent can also @@ -312,6 +332,7 @@ the message stream as it arrives. In the following ``display_message_stream`` example, the Message Viewer displays all messages sent by the agent named 'testagent', as they arrive. It continues to display messages until execution is interrupted with ctrl-C: + :: Viewer> clear_filters @@ -343,14 +364,16 @@ Message Viewer has incomplete information about how wide to make each column. In make guesses based on header widths, data widths in the first row received, and min/max values, and then wrap the data when it overflows the column boundaries.) + Single-Line Display -~~~~~~~~~~~~~~~~~~~ +------------------- Another filter with special behavior is ``set_filter freq ``. This filter, which takes a number N as its value, displays only one row, the most recently captured row that satisfies the filter criteria. (Like other filters, this filter can be used with either ``display_messages`` or ``display_exchanges``.) It then waits N seconds, reissues the query, and overwrites the old row with the new one. It continues this periodic single-line overwritten display until it is interrupted with ctrl-C: + :: Viewer> list_filters @@ -365,13 +388,15 @@ It continues this periodic single-line overwritten display until it is interrupt width of each column. In this single-line display format, data gets truncated if it doesn't fit, because no wrapping can be performed -- only one display line is available.) + Displaying Exchange Details -~~~~~~~~~~~~~~~~~~~~~~~~~~~ +--------------------------- The ``display_exchange_details `` command provides a way to get more specific details about an exchange, i.e. about all messages that share a common request ID. At low or medium verbosity, when this command is used (supplying the relevant request ID, which can be obtained from the output of other commands), it displays one row for each message: + :: Viewer> set_filter sender testagent @@ -391,6 +416,7 @@ from the output of other commands), it displays one row for each message: At high verbosity, ``display_exchange_details`` switches display formats, showing all properties for each message in a json-like dictionary format: + :: Viewer> set_verbosity high @@ -451,12 +477,14 @@ each message in a json-like dictionary format: "vip_signature": "VIP1" } + Verbosity -~~~~~~~~~ +========= As mentioned in the previous section, Agent and Viewer behavior can be adjusted by changing the current verbosity with the ``set_verbosity `` command. The default verbosity is low. low, medium and high levels are available: + :: Viewer> set_verbosity high @@ -484,6 +512,7 @@ At low verbosity: The following "interesting" columns are displayed at low and medium verbosity levels (at high verbosity levels, all properties are displayed): + :: Debug Message Debug Message Exchange Debug Session @@ -502,6 +531,7 @@ The following "interesting" columns are displayed at low and medium verbosity le Messages from the following senders, or to the following receivers, are excluded at low and medium verbosity levels: + :: Sender Receiver @@ -521,7 +551,7 @@ are defined as parameters in Message Viewer, and can be adjusted as necessary by global value lists in viewer.py. Session Statistics -~~~~~~~~~~~~~~~~~~ +================== One useful tactic for starting at a summary level and drilling down is to capture a set of messages for a session and then examine the counts of sending and receiving agents, @@ -535,6 +565,7 @@ the counts and can also reduce the number of columns and rows. The following example shows the command being used to list all senders and receivers for messages sent during debug session 7: + :: Viewer> list_sessions @@ -561,6 +592,7 @@ messages sent during debug session 7: The ``display_session_details_by_topic `` command is similar to ``display_session_details_by_agent``, but each row contains statistics for a topic instead of for a receiving agent: + :: Viewer> display_session_details_by_topic 7 @@ -627,8 +659,9 @@ but each row contains statistics for a topic instead of for a receiving agent: heartbeat/pubsub - - - - - 2 - test_topic/test_subtopic - - - - - 8 8 + Database Administration -~~~~~~~~~~~~~~~~~~~~~~~ +======================= The Message Debugger Agent stores message data in a SQLite database's DebugMessage, DebugMessageExchange and DebugSession tables. If the database isn't present already @@ -642,6 +675,7 @@ The ``delete_session `` command deletes the database's DebugSession with the indicated ID, and also deletes all DebugMessage and DebugMessageExchange rows with that session ID. In the following example, ``delete_session`` deletes the 60,000 DebugMessages that were captured during a 20-minute period as session 6: + :: Viewer> list_sessions @@ -669,6 +703,7 @@ DebugMessages that were captured during a 20-minute period as session 6: The ``delete_database`` command deletes the entire SQLite database, removing all records of previously-captured DebugMessages, DebugMessageExchanges and DebugSessions. The database will be re-created the next time a debug session is started. + :: Viewer> delete_database @@ -689,8 +724,9 @@ of this kind prevents use of the Message Viewer's ``delete_database`` command, t database can be deleted directly from the filesystem. By default, it is located in $VOLTTRON_HOME's ``run`` directory. + Implementation Details -~~~~~~~~~~~~~~~~~~~~~~ +====================== .. image:: files/40-message-debugger.jpg @@ -739,3 +775,12 @@ which are formatted for display by MessageViewerCmd. MessageDebuggerAgent RPC calls, which are agent-agent interactions, it builds a "connection" that manages a temporary agent. This is a standard VOLTTRON pattern that is also used, for instance, by Volttron Central. + +View the :ref:`message debugging specification ` for more information on the message +debugging implementation for ZeroMQ. + + +.. toctree:: + + message-debugging-specification + diff --git a/docs/source/core_services/service_agents/sysmon/sysmon.rst b/docs/source/agent-framework/operations-agents/sysmon/sysmon.rst similarity index 100% rename from docs/source/core_services/service_agents/sysmon/sysmon.rst rename to docs/source/agent-framework/operations-agents/sysmon/sysmon.rst diff --git a/docs/source/core_services/service_agents/threshold/ThresholdAgent.rst b/docs/source/agent-framework/operations-agents/threshold/threshold-agent.rst similarity index 86% rename from docs/source/core_services/service_agents/threshold/ThresholdAgent.rst rename to docs/source/agent-framework/operations-agents/threshold/threshold-agent.rst index b9abbadbc9..8f0ac3d4c3 100644 --- a/docs/source/core_services/service_agents/threshold/ThresholdAgent.rst +++ b/docs/source/agent-framework/operations-agents/threshold/threshold-agent.rst @@ -1,5 +1,6 @@ -.. _ThresholdAgent: +.. _Threshold-Agent: +========================= Threshold Detection Agent ========================= @@ -8,9 +9,9 @@ topic exceeds or falls below a configured value. The agent can be configured to watch topics are associated with a single value or to watch devices' all topics. Configuration -------------- +============= -The ThresholdDetectionAgent supports the :ref:`configstore ` +The Threshold Detection Agent supports the :ref:`config store ` and can be configured with a file named "config". The file must be in the following format: diff --git a/docs/source/core_services/service_agents/topic_watcher/TopicWatcherAgent.rst b/docs/source/agent-framework/operations-agents/topic-watcher/topic-watcher-agent.rst similarity index 100% rename from docs/source/core_services/service_agents/topic_watcher/TopicWatcherAgent.rst rename to docs/source/agent-framework/operations-agents/topic-watcher/topic-watcher-agent.rst diff --git a/docs/source/core_services/service_agents/Platform-Service-Standardization.rst b/docs/source/agent-framework/platform-service-standardization.rst similarity index 100% rename from docs/source/core_services/service_agents/Platform-Service-Standardization.rst rename to docs/source/agent-framework/platform-service-standardization.rst diff --git a/docs/source/setup/Third-Party-Agents.rst b/docs/source/agent-framework/third-party-agents.rst similarity index 100% rename from docs/source/setup/Third-Party-Agents.rst rename to docs/source/agent-framework/third-party-agents.rst diff --git a/docs/source/agent-framework/web-framework.rst b/docs/source/agent-framework/web-framework.rst new file mode 100644 index 0000000000..2d6d7064bd --- /dev/null +++ b/docs/source/agent-framework/web-framework.rst @@ -0,0 +1,123 @@ +.. _Web-Framework: + +============= +Web Framework +============= + +This document describes the interaction between web enabled agents and the Platform Web Service agent. + +The web framework enables agent developers to expose JSON, static, and websocket endpoints. + +Web SubSystem +============= + +Enabling +-------- + +The web subsystem is not enabled by default as it is only required by a small subset of agents. +To enable the web subsystem the platform instance must have an enabled the web server and the agent +must pass enable_web=True to the agent constructor. + +.. code-block:: python + + class WebAgent(Agent): + def __init__(self, **kwargs): + super(WebAgent, self).__init__(enable_web=True,**kwargs) + + +MANIFEST File +------------- + +The MANIFEST.in file is used to package additional files needed for your web enabled agent. +Please read the python packaging `documentation `_ +on the MANIFEST.in file. For most cases, i.e. when you only need to include a webroot directory for html +and javascript, the manifest file only needs to include the `recursive-include` command. For example, the entirety +of the VolttronCentral MANIFEST.in file is: + +.. code-block:: python + + recursive-include volttroncentral/webroot * + +The MANIFEST.in file should be located in the root directory of the agent. All pathing for the MANIFEST.in file +commands are relative to this root directory. + +Routes +------- + +The web subsystem allows an agent to register three different types of routes; file paths, endpoints, and websockets. + +.. note:: + For all routes the first match wins. Therefore ordering which routes are registered first becomes important. + + +File Path +~~~~~~~~~ + +A path-based route that allows the agent to specify a prefix and a static path on the file system to serve static files. +The prefix can be a regular expression. + +.. note:: + The static path should point to a location within the installed agent's agent-data directory. + You MUST have read access to the directory. + +The below example is based on the registered route in VolttronCentral. + + +.. code-block:: python + + @Core.receiver('onstart') + def onstart(self, sender, **kwargs): + """ + Allow serving of static content from 'webroot' + """ + # Sets WEB_ROOT to be the path to the webroot directory + # in the agent-data directory of the installed agent.. + WEB_ROOT = os.path.abspath(p.abspath(p.join(p.dirname(__file__), 'webroot/'))) + # Serves the static content from 'webroot' directory + self.vip.web.register_path(r'^/vc/.*', WEB_ROOT) + + +Endpoint +~~~~~~~~~ + +JSON endpoints allows an agent to serve data responses to specific queries from a web client's non-static responses. +The agent will pass a callback to the subsystem which will be called when the endpoint is triggered. + +.. code-block:: python + + def jsonrpc(env, data): + """ + The main entry point for jsonrpc data + """ + return {'dyamic': 'data'} + + @Core.receiver('onstart') + def onstart(self, sender, **kwargs): + """ + Register the /vc/jsonrpc endpoint for doing json-rpc based methods + """ + self.vip.web.register_endpoint(r'/vc/jsonrpc', self.jsonrpc) + + +Websocket +~~~~~~~~~ + +Websocket endpoints allow bi-directional communication between the client and the server. +Client connections can be authenticated during the opening of a websocket through the response of an open callback. + + +.. code-block:: python + + def _ws_opened(self, fromip, endpoint): + _log.debug("OPENED ip: {} endpoint: {}".format(fromip, endpoint)) + + def _ws_closed(self, endpoint): + _log.debug("CLOSED endpoint: {}".format(endpoint)) + + def _ws_received(self, endpoint, message): + _log.debug("RECEIVED endpoint: {} message: {}".format(endpoint, + message)) + + @Core.receiver('onstart') + def onstart(self, sender, **kwargs): + self.vip.web.register_websocket(r'/vc/ws', self._ws_opened, self._ws_closed, self._ws_received) diff --git a/docs/source/api_doc_config.yml b/docs/source/api_doc_config.yml new file mode 100644 index 0000000000..5b65cdb57c --- /dev/null +++ b/docs/source/api_doc_config.yml @@ -0,0 +1,14 @@ +services: + path: services/core + agent_excludes: + - DNP3Agent + - MarketServiceAgent + - OpenADRVenAgent + - OpenEISHistorian + - ObixHistoryPublish + - IEEE2030_5Agent + file_excludes: + - CrateHistorian/scripts/* +ops: + path: services/ops + diff --git a/docs/source/apidocs-templates/module.rst_t b/docs/source/apidocs-templates/module.rst_t new file mode 100644 index 0000000000..2490278551 --- /dev/null +++ b/docs/source/apidocs-templates/module.rst_t @@ -0,0 +1,9 @@ +{%- if show_headings %} +{{- [basename, "module"] | join(' ') | e | heading }} + +{% endif -%} +.. automodule:: {{ qualname }} +{%- for option in automodule_options %} + :{{ option }}: +{%- endfor %} + diff --git a/docs/source/apidocs-templates/package.rst_t b/docs/source/apidocs-templates/package.rst_t new file mode 100644 index 0000000000..9cd1517982 --- /dev/null +++ b/docs/source/apidocs-templates/package.rst_t @@ -0,0 +1,51 @@ +{%- macro automodule(modname, options) -%} +.. automodule:: {{ modname }} +{%- for option in options %} + :{{ option }}: +{%- endfor %} +{%- endmacro %} + +{%- macro toctree(docnames) -%} +.. toctree:: + :maxdepth: {{ maxdepth }} +{% for docname in docnames %} + {{ docname }} +{%- endfor %} +{%- endmacro %} + +{%- if is_namespace %} +{{- [pkgname, "namespace"] | join(" ") | e | heading }} +{% else %} +{{- [pkgname, "package"] | join(" ") | e | heading }} +{% endif %} + +{%- if modulefirst and not is_namespace %} +{{ automodule(pkgname, automodule_options) }} +{% endif %} + +{%- if subpackages %} +Subpackages +----------- + +{{ toctree(subpackages) }} +{% endif %} + +{%- if submodules %} +{% if separatemodules %} +{{ toctree(submodules) }} +{% else %} +{%- for submodule in submodules %} +{% if show_headings %} +{{- [submodule, "module"] | join(" ") | e | heading(2) }} +{% endif %} +{{ automodule(submodule, automodule_options) }} +{% endfor %} +{%- endif %} +{%- endif %} + +{%- if not modulefirst and not is_namespace %} +Module contents +--------------- + +{{ automodule(pkgname, automodule_options) }} +{% endif %} diff --git a/docs/source/apidocs-templates/toc.rst_t b/docs/source/apidocs-templates/toc.rst_t new file mode 100644 index 0000000000..f0877eeb2f --- /dev/null +++ b/docs/source/apidocs-templates/toc.rst_t @@ -0,0 +1,8 @@ +{{ header | heading }} + +.. toctree:: + :maxdepth: {{ maxdepth }} +{% for docname in docnames %} + {{ docname }} +{%- endfor %} + diff --git a/docs/source/community_resources/contributing.rst b/docs/source/community_resources/contributing.rst deleted file mode 100644 index c977f9960a..0000000000 --- a/docs/source/community_resources/contributing.rst +++ /dev/null @@ -1,224 +0,0 @@ -.. _contributing: - -Contributing to VOLTTRON -======================== - -As an open source project VOLTTRON requires input from the community to keep development focused on new and useful -features. To that end were are revising our commit process to hopefully allow more committers to be apart of the -community. The following document outlines the process for source code and documentation to be submitted. -There are GUI tools that may make this process easier, however this document will focus on what is required from the -command line. - -The only requirements for contributing are Git (Linux version control software) and your favorite web browser. - -Getting Started -~~~~~~~~~~~~~~~ - -Forking the main VOLTTRON repository ------------------------------------- - -The first step to editing the repository is to fork it into your own user space. This is done by pointing -your favorite web browser to -http://github.com/VOLTTRON/volttron and then clicking "Fork" on the upper right of the screen. (Note: You must have a -GitHub account to fork the repository. If you don't have one, we encourage you to `sign up https://github.com/join?source_repo=VOLTTRON%2Fvolttron`.) - -Cloning 'YOUR' VOLTTRON forked repository ------------------------------------------ - -The next step in the process is to get your forked repository down to your computer to work on. -This will create an identical copy of the GitHub repository on your local machine. To do this you need to know the address of -your repository. The URL to your repository address will be "https://github.com//volttron.git". From a terminal execute the following commands which will create a directory "git" in your home directory and then change to that directory, clone from your repository, and finally change into the cloned repository. - -.. note:: - - VOLTTRON uses develop as its main development branch rather than the standard master branch (the default). - -.. code-block:: bash - - # Here, we are assuming you are doing develop work in a folder called `git`. If you'd rather use something else, that's OK. - mkdir -p ~/git - cd ~/git - git clone -b develop https://github.com//volttron.git - cd volttron - -Adding and Committing files ---------------------------- - -Now that you have your repository cloned, it's time to start doing some modifications. Using a simple text editor -you can create or modify any file in the volttron directory. After making a modification or creating a file -it is time to move it to the stage for review before committing to the local repository. For this example let's assume -we have made a change to `README.md` in the root of the volttron directory and added a new file called `foo.py`. To get -those files in the staging area (preparing for committing to the local repository) we would execute the following commands - -.. code-block:: bash - - git add foo.py - git add README.md - - # Alternatively in one command - git add foo.py README.md - -After adding the files to the stage you can review the staged files by executing - -.. code-block:: bash - - git status - -Finally in order to commit to the local repository we need to think of what change we actually did and be able to -document it. We do that with a commit message (the -m parameter) such as the following. - -.. code-block:: bash - - git commit -m "Added new foo.py and updated copyright of README.md" - -Pushing to the remote repository --------------------------------- - -The next step is to share our changes with the world through GitHub. We can do this by pushing the commits -from your local repository out to your GitHub repository. This is done by the following command. - -.. code-block:: bash - - git push - # alternative where origin is the name of the remote you are pushing to - # more on that later. - git push origin - -Getting modifications to the main VOLTTRON repository ------------------------------------------------------ - -Now we want our changes to be added into the main VOLTTRON repository. After all our `foo.py` can cure a lot of the -world's problems and of course it is always good to have a copyright with the correct year. Open your browser -to https://github.com/VOLTTRON/volttron/compare/develop...YOUR_USERNAME:develop. - -On that page the base fork should always be VOLTTRON/volttron with the base develop whilst the head fork should -be /volttron and the compare should be the branch in your repository to pull from. Once you have -verified that you have got the right changes made then, click on create pull request, enter a title and description that represent your changes and submit the pull request. - -What happens next? ------------------- -Once you create a pull request, one or more VOLTTRON team members will review your changes and either accept them as is -or ask for modifications in order to have your commits accepted. You will be automatically emailed through the GitHub -notification system when this occurs (assuming you haven't changed your GitHub preferences). - -Next Steps -~~~~~~~~~~ - -Merging changes from the main VOLTTRON repository -------------------------------------------------- - -As time goes on the VOLTTRON code base will continually be modified so the next time you want to work on a change to -your files the odds are your local and remote repository will be out of date. In order to get your remote VOLTTRON -repository up to date with the main VOLTTRON repository you could simply do a pull request to your remote repository -from the main repository. That would involve pointing your browser at -"https://github.com/YOUR_USERNAME/volttron/compare/develop...VOLTTRON:develop". - -Click the 'Create Pull Request' button. On the following page click the -'Create Pull Request' button. On the next page click 'Merge Pull Request' button. - -Once your remote is updated you can now pull from your remote repository into your local repository through the -following command: - -.. code-block:: bash - - git pull - -The other way to get the changes into your remote repository is to first update your local repository with the -changes from the main VOLTTRON repository and then pushing those changes up to your remote repository. To do that you -need to first create a second remote entry to go along with the origin. A remote is simply a pointer to the url of a -different repository than the current one. Type the following command to create a new remote called 'upstream' - -.. code-block:: bash - - git remote add upstream https://github.com/VOLTTRON/volttron - -To update your local repository from the main VOLTTRON repository then execute the following command where upstream is -the remote and develop is the branch to pull from. - -.. code-block:: bash - - git pull upstream develop - -Finally to get the changes into your remote repository you can execute - -.. code-block:: bash - - git push origin - - -Other commands to know -~~~~~~~~~~~~~~~~~~~~~~ - -At this point in time you should have enough information to be able to update both your local and remote repository -and create pull requests in order to get your changes into the main VOLTTRON repository. The following commands are -other commands to give you more information that the preceeding tutorial went through - -Viewing what the remotes are in our local repository ----------------------------------------------------- - -.. code-block:: bash - - git remote -v - -Stashing changed files so that you can do a merge/pull from a remote --------------------------------------------------------------------- - -.. code-block:: bash - - git stash save 'A commment to be listed' - -Applying the last stashed files to the current repository ---------------------------------------------------------- - -.. code-block:: bash - - git stash pop - -Finding help about any git command ----------------------------------- - -.. code-block:: bash - - git help - git help branch - git help stash - git help push - git help merge - -Creating a branch from the branch and checking it out ------------------------------------------------------ - -.. code-block:: bash - - git checkout -b newbranchname - -Checking out a branch (if not local already will look to the remote to checkout) --------------------------------------------------------------------------------- - -.. code-block:: bash - - git checkout branchname - -Removing a local branch (cannot be current branch) --------------------------------------------------- - -.. code-block:: bash - - git branch -D branchname - -Determine the current and show all local branches -------------------------------------------------- - -.. code-block:: bash - - git branch - -Hooking into other services -~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -The main VOLTTRON repository is hooked into an automated build tool called travis-ci. Your remote repository can be -automatically built with the same tool by hooking your account into travis-ci's environment. -To do this go to https://travis-ci.org and create an account. You can using your GitHub login directly to this -service. Then you will need to enable the syncing of your repository through the travis-ci service. Finally you need -to push a new change to the repository. If the build fails you will receive an email notifying you of that fact and -allowing you to modify the source code and then push new changes out. diff --git a/docs/source/community_resources/documentation.rst b/docs/source/community_resources/documentation.rst deleted file mode 100644 index 401eda7fa7..0000000000 --- a/docs/source/community_resources/documentation.rst +++ /dev/null @@ -1,40 +0,0 @@ -.. _documentation: - -Contributing Documentation -============================= - -The Community is encouraged to contribute documentation back to the project as they work through use cases the -developers may not have considered or documented. By contributing documentation back, the community can -learn from each other and build up a much more extensive knowledge base. - -|VOLTTRON| documentation utilizes ReadTheDocs: http://volttron.readthedocs.io/en/develop/ and is built -using the `Sphinx `_ Python library with static content in -`Restructured Text `_. - -Building the Documentation ---------------------------- - -Static documentation can be found in the `docs/source` directory. Edit or create new .rst files to add new content -using the `Restructured Text `_ format. To see the results -of your changes. the documentation can be built locally through the command line using the following instructions. - -If you've already :ref:`bootstrapped ` |VOLTTRON|, do the following while activated. If not, -this will also pull down the necessary |VOLTTRON| libraries. - -.. code-block:: bash - - python bootstrap.py --documentation - cd docs - make html - -Then, open your browser to the created local files: - -.. code-block:: bash - - file:///home//git/volttron/docs/build/html/overview/index.html - - -When complete, changes can be contributed back using the same process as code :ref:`contributions ` by -creating a pull request. When the changes are accepted and merged, they will be reflected in the ReadTheDocs site. - -.. |VOLTTRON| unicode:: VOLTTRON U+2122 diff --git a/docs/source/conf.py b/docs/source/conf.py index 543936cd32..2845e0270c 100644 --- a/docs/source/conf.py +++ b/docs/source/conf.py @@ -21,7 +21,7 @@ import os from glob import glob from mock import Mock as MagicMock -from recommonmark.parser import CommonMarkParser +import yaml from volttron.platform.agent.utils import execute_command @@ -32,22 +32,20 @@ def __getattr__(cls, name): return Mock() -MOCK_MODULES = ['loadshape', 'numpy', 'sympy', 'xlrd', 'stomp', 'oadr2', - 'pyodbc', 'lxml', 'stomp.listener', - 'sympy.parsing', 'sympy.parsing.sympy_parser', 'pytest'] -sys.modules.update((mod_name, Mock()) for mod_name in MOCK_MODULES) - +autodoc_mock_imports = ['loadshape', 'numpy', 'sympy', 'xlrd', 'stomp', 'oadr2', 'pyodbc', 'lxml', 'pytest', + 'pint', 'pandas', 'suds', 'paho', 'pymongo', 'bson', 'subprocess32', 'heaters', 'meters', + 'hvac', 'blinds', 'vehicles'] # -- Project information ----------------------------------------------------- project = 'VOLTTRON' -copyright = '2019, The VOLTTRON Community' +copyright = '2020, The VOLTTRON Community' author = 'The VOLTTRON Community' # The short X.Y version -version = '7.0' +version = '8.0' # The full version, including alpha/beta/rc tags -release = '7.0 Release Candidate' +release = '8.0' # -- General configuration --------------------------------------------------- @@ -72,6 +70,8 @@ def __getattr__(cls, name): 'sphinx.ext.autosectionlabel', # http://www.sphinx-doc.org/en/master/usage/extensions/todo.html 'sphinx.ext.todo', + 'sphinx.ext.intersphinx', + 'm2r2' ] # prefix sections with the document so that we can cross link @@ -89,10 +89,9 @@ def __getattr__(cls, name): # # source_suffix = ['.rst', '.md'] source_suffix = ['.rst', '.md'] -source_parsers = {'.md': CommonMarkParser} -# The master toctree document. -master_doc = 'index' +# The top-level toctree document. +main_doc = 'index' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. @@ -169,7 +168,7 @@ def __getattr__(cls, name): # (source start file, target name, title, # author, documentclass [howto, manual, or own class]). latex_documents = [ - (master_doc, 'VOLTTRON.tex', 'VOLTTRON Documentation', + (main_doc, 'VOLTTRON.tex', 'VOLTTRON Documentation', 'The VOLTTRON Community', 'manual'), ] @@ -179,7 +178,7 @@ def __getattr__(cls, name): # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). man_pages = [ - (master_doc, 'volttron', 'VOLTTRON Documentation', + (main_doc, 'volttron', 'VOLTTRON Documentation', [author], 1) ] @@ -190,7 +189,7 @@ def __getattr__(cls, name): # (source start file, target name, title, author, # dir menu entry, description, category) texinfo_documents = [ - (master_doc, 'VOLTTRON', 'VOLTTRON Documentation', + (main_doc, 'VOLTTRON', 'VOLTTRON Documentation', author, 'VOLTTRON', 'One line description of project.', 'Miscellaneous'), ] @@ -201,7 +200,9 @@ def __getattr__(cls, name): # -- Options for intersphinx extension --------------------------------------- # Example configuration for intersphinx: refer to the Python standard library. -intersphinx_mapping = {'https://docs.python.org/2.7': None} +intersphinx_mapping = {'https://docs.python.org/3.6': None, + 'volttron-ansible': ('https://volttron.readthedocs.io/projects/volttron-ansible/en/main/', + None)} # -- Options for todo extension ---------------------------------------------- @@ -220,13 +221,13 @@ def setup(app): :param app: """ app.connect('builder-inited', generate_apidoc) -# app.connect('build-finished', clean_apirst) + # For now clean before building so that we can use the rst generated for debugging issues + # app.connect('build-finished', clean_api_rst) -# -# script_dir = os.path.dirname(os.path.realpath(__file__)) -# apidocs_base_dir =os.path.abspath(script_dir + "/apidocs") -# -# + +script_dir = os.path.dirname(os.path.realpath(__file__)) +apidocs_base_dir = os.path.abspath(script_dir + "/volttron-api") +volttron_root = os.path.abspath(os.path.join(script_dir, "../..")) def generate_apidoc(app): @@ -237,107 +238,87 @@ def generate_apidoc(app): :return: """ - volttron_src = os.path.abspath('../volttron') - - if os.environ.get("READTHEDOCS"): - volttron_src = os.path.abspath('../../volttron') - - # Exclusions must be full paths to directories - exlusions = [ - os.path.join(volttron_src, 'lint/'), - os.path.join(volttron_src, 'drivers/') - ] - cmd = ["sphinx-apidoc", '-M', '-d 4', '-o', 'source/volttron_api', '--force', volttron_src] - - cmd.extend(exlusions) - print("The command is: {}".format(cmd)) - - execute_command(cmd) - -# print("\n##In run_apidocs##\n") -# global script_dir, apidocs_base_dir -# -# os.makedirs(apidocs_base_dir, 0755) -# file_name = os.path.join(script_dir,"../docs_exclude_list.txt" ) -# services_excludes = [] -# volttron_excludes = ['tests/**/*'] -# examples_excludes = [] -# -# if os.path.exists(file_name): -# print "file_name {} exists".format(file_name) -# with open(file_name,'r') as file: -# for line in file: -# print "line is {}".format(line) -# if line.startswith('services'): -# _add_to_excludes(services_excludes, line) -# elif line.startswith('volttron'): -# _add_to_excludes(volttron_excludes, line) -# elif line.startswith('examples'): -# _add_to_excludes(examples_excludes, line) -# print ("processed exclude list") -# print ("services {}".format(services_excludes)) -# print ("volttron excludes {}".format(volttron_excludes)) -# -# # generate api-docs for services/core -# docs_subdir=os.path.join(apidocs_base_dir, "services") -# agent_dirs = glob(script_dir+"/../../services/core/*/") -# run_apidoc(docs_subdir, agent_dirs, services_excludes) -# -# # generate api-docs for examples -# docs_subdir = os.path.join(apidocs_base_dir, "examples") -# agent_dirs = glob(script_dir + "/../../examples/*/") -# agent_dirs += glob(script_dir + "/../../examples/MarketAgents/*/") -# run_apidoc(docs_subdir, agent_dirs, examples_excludes) -# -# # generate api-docs for platform core and drivers -# sys.path.insert(0, -# os.path.abspath(script_dir + "/../../volttron")) -# print("Added to sys path***: {}".format(os.path.abspath(script_dir + "/../.."))) -# -# cmd = ["sphinx-apidoc", '--force', '-o', -# os.path.join(apidocs_base_dir, "volttron"), -# script_dir + "/../../volttron"] -# cmd.extend(volttron_excludes) -# subprocess.check_call(cmd) -# -# -# def _add_to_excludes(application_excludes, line): -# global script_dir -# volttron_root = os.path.abspath(os.path.join(script_dir, "../..")) -# application_excludes.append(os.path.join(volttron_root, line)) -# -# -# def run_apidoc(docs_dir, agent_dirs, exclude_list): -# """ -# Runs sphinx-apidoc on all subdirectories under the given directory. -# commnad runs with --force and exclude any setup.py file in the subdirectory -# :param docs_dir: The base directory into with .rst files are generated. -# :param module_services_path: directory to search for packages to document -# """ -# -# for agent_dir in agent_dirs: -# agent_dir = os.path.abspath(agent_dir) -# agent_dir = agent_dir[:-1] if agent_dir.endswith("/") else agent_dir -# sys.path.insert(0, agent_dir) -# print "Added to syspath {}".format(agent_dir) -# name = os.path.basename(agent_dir) -# cmd = ["sphinx-apidoc", '--force', '-e', '-o', -# os.path.join(apidocs_base_dir, "volttron"), -# script_dir + "/../../volttron"] -# cmd.extend(exclude_list) -# print("RuNNING COMMAND:") -# print(cmd) -# subprocess.check_call(cmd) -# -# -# def clean_apirst(app, exception): -# """ -# Deletes folder containing all auto generated .rst files at the end of -# sphinx build immaterial of the exit state of sphinx build. -# :param app: -# :param exception: -# """ -# global apidocs_base_dir -# import shutil -# print("Cleanup: Removing apidocs directory {}".format(apidocs_base_dir)) -# shutil.rmtree(apidocs_base_dir) + print("\n##In run_apidocs##\n") + clean_api_rst(app, None) + global script_dir, apidocs_base_dir + + os.makedirs(apidocs_base_dir, 0o755) + config = _read_config() + # generate api-docs for each api docs directory + for docs_subdir in config.keys(): + docs_subdir_path = os.path.join(apidocs_base_dir, docs_subdir) + agent_dirs = glob(os.path.join(volttron_root, config[docs_subdir]["path"], "*/")) + file_excludes = [] + if config[docs_subdir].get("file_excludes"): + for exclude_pattern in config[docs_subdir].get("file_excludes", []): + file_excludes.append(os.path.join(volttron_root, config[docs_subdir]["path"], exclude_pattern)) + print("after file excludes. calling apidoc") + agent_excludes = \ + config[docs_subdir].get("agent_excludes") if config[docs_subdir].get("agent_excludes", []) else [] + run_apidoc(docs_subdir_path, agent_dirs, agent_excludes, file_excludes) + print("COMPLETED RUNNING API DOC") + + +def run_apidoc(docs_dir, agent_dirs, agent_excludes, exclude_pattern): + """ + Runs sphinx-apidoc on all subdirectories under the given directory. + commnad runs with --force and exclude any setup.py file in the subdirectory + :param docs_dir: The base directory into with .rst files are generated. + :param agent_dirs: directory to search for packages to document + :param agent_excludes: agent directories to be skipped + :param exclude_pattern: file name patterns to be excluded. This passed on to sphinx-apidoc command for exclude + """ + print(f"In run apidoc params {docs_dir}, {agent_dirs}, {agent_excludes}, {exclude_pattern}") + for agent_src_dir in agent_dirs: + agent_src_dir = os.path.abspath(agent_src_dir) + agent_src_dir = agent_src_dir[:-1] if agent_src_dir.endswith("/") else agent_src_dir + name = os.path.basename(agent_src_dir) + agent_doc_dir = os.path.join(docs_dir, name) + if name not in agent_excludes: + sys.path.insert(0, agent_src_dir) + cmd = ["sphinx-apidoc", '-e', '-a', '-M', '-d 4', + '-t', os.path.join(script_dir, 'apidocs-templates'), + '--force', '-o', agent_doc_dir, agent_src_dir, + os.path.join(agent_src_dir, "setup.py"), os.path.join(agent_src_dir, "conftest.py") + ] + + cmd.extend(exclude_pattern) + subprocess.check_call(cmd) + grab_agent_readme(agent_src_dir, agent_doc_dir) + + +def _read_config(): + filename = os.path.join(script_dir, "api_doc_config.yml") + data = {} + try: + with open(filename, 'r') as yaml_file: + data = yaml.safe_load(yaml_file) + except IOError as exc: + print("Error reading from file: {}".format(filename)) + raise exc + except yaml.YAMLError as exc: + print("Yaml Error: {}".format(filename)) + raise exc + return data + + +def grab_agent_readme(agent_src_dir, agent_doc_dir): + src = os.path.join(agent_src_dir, "README.md") + dst = os.path.join(agent_doc_dir, "README.md") + os.symlink(src, dst) + with open(os.path.join(agent_doc_dir, "modules.rst"), "a") as f: + f.write(" Agent README ") + + +def clean_api_rst(app, exception): + """ + Deletes folder containing all auto generated .rst files at the end of + sphinx build immaterial of the exit state of sphinx build. + :param app: + :param exception: + """ + global apidocs_base_dir + import shutil + if os.path.exists(apidocs_base_dir): + print("Cleanup: Removing apidocs directory {}".format(apidocs_base_dir)) + shutil.rmtree(apidocs_base_dir) diff --git a/docs/source/core_services/config_store/index.rst b/docs/source/core_services/config_store/index.rst deleted file mode 100644 index 74b2d358b1..0000000000 --- a/docs/source/core_services/config_store/index.rst +++ /dev/null @@ -1,13 +0,0 @@ -.. _VOLTTRON-Configuration-Store: - -============================ -VOLTTRON Configuration Store -============================ - -The configuration store provides storage for agent configurations and an agent interface to facilitate dynamic agent configuration. - -.. toctree:: - :glob: - :maxdepth: 2 - - * diff --git a/docs/source/core_services/control/AgentAutostart.rst b/docs/source/core_services/control/AgentAutostart.rst deleted file mode 100644 index 6308fb081d..0000000000 --- a/docs/source/core_services/control/AgentAutostart.rst +++ /dev/null @@ -1,11 +0,0 @@ -.. _Agent-Autostart: - -Agent Autostart -=============== - -An agent can be setup to start when the platform is started with the -"enable" command. This command also allows a priority to be set (0-100, -default 50) so that agents can be started after any dependencies. This -command can also be used with the --tag or --name options. - -``vctl enable <--priority PRIORITY>`` diff --git a/docs/source/core_services/control/AgentManagement.rst b/docs/source/core_services/control/AgentManagement.rst deleted file mode 100644 index df8ef1c6b4..0000000000 --- a/docs/source/core_services/control/AgentManagement.rst +++ /dev/null @@ -1,113 +0,0 @@ -.. _Agent-Lifecycle-Management: - -Agent Lifecyle Management -~~~~~~~~~~~~~~~~~~~~~~~~~ - -The VOLTTRON platform has several commands for controlling the lifecycle -of agents. This page discusses how to use them, for details of operation -please see :ref:`PlatformConfiguration ` - -**These examples assume the VOLTTRON environment has been activated (. -env/bin/activate). If not, add "bin/" to all commands.** - -Agent Packaging -=============== - -The "vpkg" command is used for packaging and configuring agents. -It is not necessary to have the platform running to use this command. -The platform uses `Python Wheel `__ -for its packaging and follows the Wheel naming -`convention `__. - -To create an agent package, call ``vpkg ``. - -For instance: ``vpkg package examples/ListenerAgent`` - -The ``package`` command uses the setup.py in the agent directory to -create the package. The name and version number portion of the Wheel -filename come from this. The resulting wheels are created at -"~/.volttron/packaged". - -For example: -``~/.volttron/packaged/listeneragent-3.0-py2-none-any.whl``. - -Agent Configuration -=================== - -Agent packages are configured with the -``vpkg configure `` command. It is -suggested that this file use json formatting but the agent can be -written to interpret any format it requires. The configuration of a -particular agent is opaque to the VOLTTRON platform. The location of the -agent config file is passed as an environmental variable "AGENT\_CONFIG" -which the provided utilities read in and pass to the agent. - -An example config file passing in some parameters: - -:: - - { - - "agentid": "listener1", - "message": "hello" - } - -Agent Installation and Removal -============================== - -| Agents are installed into the platform using: - -``vctl install ``. -| When agents are installed onto a platform, it creates a uuid for that -instance of an agent. This allows multiple instances of the same agent -package to be installed on the platform. - -Agents can also be installed with a :ref:`tag ` by using: - -``vctl install =`` - -This allows the user to refer to the agent with "--tag " instead of the -uuid when issuing commands. This tag can also distinguish instances of -an agent from each other. - -A stopped agent can be removed with: - -- ``vctl remove `` -- ``vctl remove --tag `` -- ``vctl remove --name `` - -Removal by tag and name potentially allows multiple agents to be removed -at once and should be used with caution. A "-f" option is required to -delete more than one agent at a time. - -Agent Control -============= - -Starting and Stopping an Agent ------------------------------- - -Agent that are installed in the platform can be launched with the -"start" command. By default this operates off the agent's UUID but can -be used with "--tag" or "--name" to launch agents by those attributes. -This can allow multiple agents to be started at once. For instance: -``vctl start --name myagent-0.1`` would start all instances of -that agent regardless of their uuid, tag, or configuration information. -After an agent is started, it will show up in -:ref:`AgentStatus ` as "running" with a process id. - -Similarly, ``volttron-ctl stop `` can also operate off the tag and -name of agent(s). After an agent is stopped, it will show an exit code -of 0 in :ref:`AgentStatus ` - -Running an agent ----------------- - -For testing purposes, an agent package not installed in the platform can -be run by using: ``vctl run ``. - -Agent Status -============ -olttron- -| ``vctl list`` lists the agents installed on the platform and their priority -| The ``vctl status`` shows the list of installed agents and whether they are running or have exited. -| See :ref:`AgentStatus ` for more details. diff --git a/docs/source/core_services/control/AgentStatus.rst b/docs/source/core_services/control/AgentStatus.rst deleted file mode 100644 index f744741f5a..0000000000 --- a/docs/source/core_services/control/AgentStatus.rst +++ /dev/null @@ -1,56 +0,0 @@ -.. _Agent-Status: - -Agent List Display -~~~~~~~~~~~~~~~~~~ - -:: - - AGENT IDENTITY TAG PRI - - d listeneragent-3.0 listeneragent-3.0_1 30 - 2 testeragent-0.1 testeragent-0.1_1 - -``vctl list`` shows the agents which have been installed on the -platform along with their uuid, associated `tag `__, and -`priority `__. - -- uuid is the first column of the display and is displayed as the - shorted unique portion. Using this portion, agents can be started, - stopped, removed, etc. -- AGENT is the "name" of this agent based on the name of the wheel file - which was installed. Agents can be controlled with this using "--name - ". Note, if multiple instances of a wheel are installed they will all - have the same name and can be controlled as a group. -- `TAG `__ is a user-provided tag which makes it simpler to - track and refer to agents. Agents can be controlled by using "--tag". -- PRI is the priority for agents which have been "enabled" using the - ``vctl enable`` command. When enabled, agents will be - automatically started in priority order along with the platform. - -Agent Status Display -==================== - -:: - - AGENT TAG STATUS - - d listeneragent-3.0 listener running [3813] - 2 testeragent-0.1 0 - -``vctl status`` shows a list of all agents installed on the -platform and their current status. - -- uuid is the first column of the display and is displayed as the - shorted unique portion. Using this portion, agents can be started, - stopped, removed, etc. -- AGENT is the "name" of this agent based on the name of the wheel file - which was installed. Agents can be controlled with this using "--name - ". Note, if multiple instances of a wheel are installed they will all - have the same name and can be controlled as a group. -- `TAG `__ is a user provided tag which makes it simpler to - track and refer to agents. Using "--tag " agents can be controlled - using this -- STATUS is the current condition of the agent. If the agent is - currently executing, it has "running" and the process id of the - agent. If the agent is not running, the exit code is shown. - diff --git a/docs/source/core_services/control/AgentTag.rst b/docs/source/core_services/control/AgentTag.rst deleted file mode 100644 index 8f3894b42e..0000000000 --- a/docs/source/core_services/control/AgentTag.rst +++ /dev/null @@ -1,24 +0,0 @@ -.. _AgentTag: - -Tagging Agents -============== - -Agents can be tagged as they are installed with: - -``vctl install =`` - -Agents can be tagged after installation with: - -``vctl tag `` - -Agents can be "tagged" to provide a meaningful user defined way to -reference the agent instead of the uuid or the name. This allows users -to differentiate between instances of agents which use the same codebase -but are configured differently. For instance, the AFDDAgent can be -configured to work against a single HVAC unit and can have any number of -instances running on one platform. A tagging scheme for this could be by -unit: afdd-rtu1, afdd-rtu2, etc. - -Commands which operate off an agent's UUID can optionally operate off -the tag by using "--tag ". This can use wildcards to catch multiple -agents at once. diff --git a/docs/source/core_services/control/AuthenticationCommands.rst b/docs/source/core_services/control/AuthenticationCommands.rst deleted file mode 100644 index 9a672de567..0000000000 --- a/docs/source/core_services/control/AuthenticationCommands.rst +++ /dev/null @@ -1,71 +0,0 @@ -.. _AuthenticationCommands: - -Authentication Commands -======================= - -All authentication sub-commands can be viewed by entering following command. - -.. code-block:: console - - vctl auth --help - -.. code-block:: console - - optional arguments: - -h, --help show this help message and exit - -c FILE, --config FILE - read configuration from FILE - --debug show tracbacks for errors rather than a brief message - -t SECS, --timeout SECS - timeout in seconds for remote calls (default: 30) - --vip-address ZMQADDR - ZeroMQ URL to bind for VIP connections - --keystore-file FILE use keystore from FILE - --known-hosts-file FILE - get known-host server keys from FILE - - subcommands: - add add new authentication record - add-group associate a group name with a set of roles - add-known-host add server public key to known-hosts file - add-role associate a role name with a set of capabilities - keypair generate CurveMQ keys for encrypting VIP connections - list list authentication records - list-groups show list of group names and their sets of roles - list-known-hosts list entries from known-hosts file - list-roles show list of role names and their sets of capabilities - publickey show public key for each agent - remove removes one or more authentication records by indices - remove-group disassociate a group name from a set of roles - remove-known-host remove entry from known-hosts file - remove-role disassociate a role name from a set of capabilities - serverkey show the serverkey for the instance - update updates one authentication record by index - update-group update group to include (or remove) given roles - update-role update role to include (or remove) given capabilities - -Authentication record ---------------------- - -An authentication record consist of following parameters - -.. code-block:: console - - domain []: - address []: Either a single agent identity or an array of agents identities - user_id []: Arbitrary string to identify the agent - capabilities (delimit multiple entries with comma) []: Array of strings referring to authorized capabilities defined by exported RPC methods - roles (delimit multiple entries with comma) []: - groups (delimit multiple entries with comma) []: - mechanism [CURVE]: - credentials []: Public key string for the agent - comments []: - enabled [True]: - -For more details on how to create authentication record, please see section :ref:`Agent Authentication` - - - - - - diff --git a/docs/source/core_services/control/PlatformCommands.rst b/docs/source/core_services/control/PlatformCommands.rst deleted file mode 100644 index 6adefd115f..0000000000 --- a/docs/source/core_services/control/PlatformCommands.rst +++ /dev/null @@ -1,257 +0,0 @@ -.. _PlatformCommands: - -Platform Commands -================= - -VOLTTRON files for -a platform instance are stored under a single directory known as the -VOLTTRON home. This home directory is set via the VOLTTRON\_HOME -environment variable and defaults to ~/.volttron. Multiple instances of -the platform may exist under the same account on a system by setting the -VOLTTRON\_HOME environment variable appropriately before executing -VOLTTRON commands. - -Configuration files use a modified INI format where section names are -command names for which the settings in the section apply. Settings -before the first section are considered global and will be used by all -commands for which the settings are valid. Settings keys are long -options (with or without the opening --) and are followed by a colon (:) -or equal (=) and then the value. Boolean options need not include the -separator or value, but may specify a value of 1, yes, or true for true -or 0, no, or false for false. - -A default configuration file, $VOLTTRON\_HOME/config, may be created to -override default options. If it exists, it will be automatically parsed -before all other command-line options. To skip parsing the default -configuration file, either move the file out of the way or set the -SKIP\_VOLTTRON\_CONFIG environment variable. - -All commands and sub-commands have help available with "-h" or "--help". -Additional configuration files may be specified with "-c" or "--config". -To specify a log file, use "-l" or "--log". - -.. code-block:: bash - - env/bin/volttron -c config.ini -l volttron.log - -Full options: - -.. code-block:: console - - VOLTTRON platform service - - optional arguments: - -c FILE, --config FILE - read configuration from FILE - -l FILE, --log FILE send log output to FILE instead of stderr - -L FILE, --log-config FILE - read logging configuration from FILE - --log-level LOGGER:LEVEL - override default logger logging level - --monitor monitor and log connections (implies -v) - -q, --quiet decrease logger verboseness; may be used multiple - times - -v, --verbose increase logger verboseness; may be used multiple - times - --verboseness LEVEL set logger verboseness - -h, --help show this help message and exit - --version show program's version number and exit - - agent options: - --autostart automatically start enabled agents and services - --publish-address ZMQADDR - ZeroMQ URL used for pre-3.x agent publishing - (deprecated) - --subscribe-address ZMQADDR - ZeroMQ URL used for pre-3.x agent subscriptions - (deprecated) - --vip-address ZMQADDR - ZeroMQ URL to bind for VIP connections - --vip-local-address ZMQADDR - ZeroMQ URL to bind for local agent VIP connections - --bind-web-address BINDWEBADDR - Bind a web server to the specified ip:port passed - --volttron-central-address VOLTTRON_CENTRAL_ADDRESS - The web address of a volttron central install - instance. - --volttron-central-serverkey VOLTTRON_CENTRAL_SERVERKEY - The serverkey of volttron central. - --instance-name INSTANCE_NAME - The name of the instance that will be reported to - VOLTTRON central. - - Boolean options, which take no argument, may be inversed by prefixing the - option with no- (e.g. --autostart may be inversed using --no-autostart). - - -volttron-ctl Commands ---------------------- -volttron-ctl is used to issue commands to the platform from the command line. Through -volttron-ctl it is possible to install and removed agents, start and stop agents, -manage the configuration store, get the platform status, and shutdown the platform. - -In more recent versions of VOLTTRON, the commands 'vctl', 'vpkg', and 'vcfg' -have been added to be used as a stand-in for 'volttron-ctl', 'volttron-pkg', and -'volttron-cfg' in the CLI. The VOLTTRON documentation will often use this convention. - -.. warning:: - volttron-ctl creates a special temporary agent ito communicate with the - platform with a specific VIP IDENTITY, thus multiple instances of volttron-ctl - cannot run at the same time. Attempting to do so will result in a conflicting - identity error. - -.. code-block:: console - - usage: vctl command [OPTIONS] ... - - Manage and control VOLTTRON agents. - - optional arguments: - -c FILE, --config FILE - read configuration from FILE - --debug show tracebacks for errors rather than a brief message - -t SECS, --timeout SECS - timeout in seconds for remote calls (default: 60) - --msgdebug MSGDEBUG route all messages to an agent while debugging - --vip-address ZMQADDR - ZeroMQ URL to bind for VIP connections - -l FILE, --log FILE send log output to FILE instead of stderr - -L FILE, --log-config FILE - read logging configuration from FILE - -q, --quiet decrease logger verboseness; may be used multiple - times - -v, --verbose increase logger verboseness; may be used multiple - times - --verboseness LEVEL set logger verboseness - -h, --help show this help message and exit - - - - commands: - - install install agent from wheel - tag set, show, or remove agent tag - remove remove agent - list list installed agent - status show status of agents - clear clear status of defunct agents - enable enable agent to start automatically - disable prevent agent from start automatically - start start installed agent - stop stop agent - restart restart agent - run start any agent by path - auth manage authorization entries and encryption keys - config manage the platform configuration store - shutdown stop all agents - send send agent and start on a remote platform - stats manage router message statistics tracking - -vctl auth subcommands -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -.. code-block:: console - - subcommands: - - add add new authentication record - add-known-host add server public key to known-hosts file - keypair generate CurveMQ keys for encrypting VIP connections - list list authentication records - publickey show public key for each agent - remove removes one or more authentication records by indices - serverkey show the serverkey for the instance - update updates one authentication record by index - -vctl config subcommands -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -.. code-block:: console - - subcommands: - - store store a configuration - delete delete a configuration - list list stores or configurations in a store - get get the contents of a configuration - - - -vpkg Commands ---------------------- - -.. code-block:: console - - usage: volttron-pkg [-h] [-l FILE] [-L FILE] [-q] [-v] [--verboseness LEVEL] - {package,repackage,configure} ... - - optional arguments: - -h, --help show this help message and exit - - subcommands: - valid subcommands - - {package,repackage,configure} - additional help - package Create agent package (whl) from a directory or - installed agent name. - repackage Creates agent package from a currently installed - agent. - configure add a configuration file to an agent package - -vpkg commands (with Volttron Restricted package installed and -enabled): - -.. code-block:: console - - usage: volttron-pkg [-h] [-l FILE] [-L FILE] [-q] [-v] [--verboseness LEVEL] - {package,repackage,configure,create_ca,create_cert,sign,verify} - ... - - VOLTTRON packaging and signing utility - - optional arguments: - -h, --help show this help message and exit - -l FILE, --log FILE send log output to FILE instead of stderr - -L FILE, --log-config FILE - read logging configuration from FILE - -q, --quiet decrease logger verboseness; may be used multiple - times - -v, --verbose increase logger verboseness; may be used multiple - times - --verboseness LEVEL set logger verboseness - - subcommands: - valid subcommands - - {package,repackage,configure,create_ca,create_cert,sign,verify} - additional help - package Create agent package (whl) from a directory or - installed agent name. - repackage Creates agent package from a currently installed - agent. - configure add a configuration file to an agent package - sign sign a package - verify verify an agent package - -volttron-cfg Commands ---------------------- -volttron-cfg (vcfg) is a tool aimed at making it easier to get up and running with -Volttron and a handful of agents. Running the tool without any arguments -will start a *wizard* with a walk through for setting up instance configuration -options and available agents.If only individual agents need to be configured -they can be listed at the command line. - -.. code-block:: console - - usage: vcfg [-h] [--list-agents | --agent AGENT [AGENT ...]] - - optional arguments: - -h, --help show this help message and exit - --list-agents list configurable agents - listener - platform_historian - vc - vcp - --agent AGENT [AGENT ...] - configure listed agents diff --git a/docs/source/core_services/control/PlatformConfigFile.rst b/docs/source/core_services/control/PlatformConfigFile.rst deleted file mode 100644 index 8045a93ee2..0000000000 --- a/docs/source/core_services/control/PlatformConfigFile.rst +++ /dev/null @@ -1,45 +0,0 @@ -.. _PlatformConfigFile: - -VOLTTRON Config File -==================== - -The VOLTTRON platform config file can contain any of the command line -arguments for starting the platform... - -.. code-block:: console - - -c FILE, --config FILE - read configuration from FILE - -l FILE, --log FILE send log output to FILE instead of stderr - -L FILE, --log-config FILE - read logging configuration from FILE - -q, --quiet decrease logger verboseness; may be used multiple - times - -v, --verbose increase logger verboseness; may be used multiple - times - --verboseness LEVEL set logger verboseness - --help show this help message and exit - --version show program's version number and exit - -agent options: - -.. code-block:: console - - --autostart automatically start enabled agents and services - --publish-address ZMQADDR - ZeroMQ URL for used for agent publishing - --subscribe-address ZMQADDR - ZeroMQ URL for used for agent subscriptions - -control options: - -.. code-block:: console - - --control-socket FILE - path to socket used for control messages - --allow-root allow root to connect to control socket - --allow-users LIST users allowed to connect to control socket - --allow-groups LIST user groups allowed to connect to control socket - -| Boolean options, which take no argument, may be inversed by prefixing the -| option with no- (e.g. --autostart may be inversed using --no-autostart). diff --git a/docs/source/core_services/control/PlatformConfiguration.rst b/docs/source/core_services/control/PlatformConfiguration.rst deleted file mode 100644 index 9889803eaa..0000000000 --- a/docs/source/core_services/control/PlatformConfiguration.rst +++ /dev/null @@ -1,22 +0,0 @@ -volttron-.. _PlatformConfiguration: - -VOLTTRON Environment -==================== - -By default, the VOLTTRON projects bases its files out of VOLTTRON\_HOME -which defaults to "~/.volttron". - -- ``$VOLTTRON_HOME/agents`` contains the agents installed on the - platform -- ``$VOLTTRON_HOME/certificates`` contains the certificates for use - with the Licensed VOLTTRON code. -- ``$VOLTTRON_HOME/run`` contains files create by the platform during - execution. The main ones are the 0MQ files created for publish and - subcribe. -- ``$VOLTTRON_HOME/ssh`` keys used by agent mobility in the Licensed - VOLTTRON code -- ``$VOLTTRON_HOME/config`` Default location to place a config file to - override any platform settings. -- ``$VOLTTRON_HOME/packaged`` is where agent packages created with - \`volttron-pkg package are created - diff --git a/docs/source/core_services/control/VOLTTRON-Config.rst b/docs/source/core_services/control/VOLTTRON-Config.rst deleted file mode 100644 index 310169d98d..0000000000 --- a/docs/source/core_services/control/VOLTTRON-Config.rst +++ /dev/null @@ -1,107 +0,0 @@ -.. _VOLTTRON-Config: - -VOLTTRON Config -=============== - -The new volttron-cfg (vcfg) commands allows for the easy configuration of a -VOLTTRON platform. This includes setting up the platform configuration, -historian, VOLTTRON Central UI, and platform agent. - -example vcfg output: - -.. note:: - - - In this example, represents the user's home directory, and represents the machine's localhost. - - If an option was not specified during bootstrapping i.e. "--web", "--rabbitmq", or "--driver", and an option is - selected during the vcfg wizard that requires that option, the necessary dependencies will be installed automatically. - -.. code-block:: console - - Your VOLTTRON_HOME currently set to: /home//.volttron - - Is this the volttron you are attempting to setup? [Y]: - What type of message bus (rmq/zmq)? [zmq]: rmq - Name of this volttron instance: [volttron1]: - RabbitMQ server home: [/home//rabbitmq_server/rabbitmq_server-3.7.7]: - Fully qualified domain name of the system: []: - Would you like to create a new self signed root CAcertificate for this instance: [Y]: - - Please enter the following details for root CA certificate - Country: [US]: - State: WA - Location: Richland - Organization: PNNL - Organization Unit: VOLTTRON - Do you want to use default values for RabbitMQ home, ports, and virtual host: [Y]: - A rabbitmq conf file /home//rabbitmq_server/rabbitmq_server-3.7.7/etc/rabbitmq/rabbitmq.conf already exists. - In order for setup to proceed it must be removed. - - Remove /home//rabbitmq_server/rabbitmq_server-3.7.7/etc/rabbitmq/rabbitmq.conf? y - 2020-04-13 13:29:36,347 rmq_setup.py INFO: Starting RabbitMQ server - 2020-04-13 13:29:46,528 rmq_setup.py INFO: Rmq server at /home//rabbitmq_server/rabbitmq_server-3.7.7 is running at - 2020-04-13 13:29:46,554 volttron.utils.rmq_mgmt DEBUG: Creating new VIRTUAL HOST: volttron - 2020-04-13 13:29:46,582 volttron.utils.rmq_mgmt DEBUG: Create READ, WRITE and CONFIGURE permissions for the user: volttron1-admin - Create new exchange: volttron, {'durable': True, 'type': 'topic', 'arguments': {'alternate-exchange': 'undeliverable'}} - Create new exchange: undeliverable, {'durable': True, 'type': 'fanout'} - 2020-04-13 13:29:46,600 rmq_setup.py INFO: - Checking for CA certificate - - 2020-04-13 13:29:46,601 rmq_setup.py INFO: - Creating root ca for volttron instance: /home//.volttron/certificates/certs/volttron1-root-ca.crt - 2020-04-13 13:29:46,601 rmq_setup.py INFO: Creating root ca with the following info: {'C': 'US', 'ST': 'WA', 'L': 'Richland', 'O': 'PNNL', 'OU': 'VOLTTRON', 'CN': 'volttron1-root-ca'} - Created CA cert - 2020-04-13 13:29:49,668 rmq_setup.py INFO: **Stopped rmq server - 2020-04-13 13:30:00,556 rmq_setup.py INFO: Rmq server at /home//rabbitmq_server/rabbitmq_server-3.7.7 is running at - 2020-04-13 13:30:00,557 rmq_setup.py INFO: - - ####################### - - Setup complete for volttron home /home//.volttron with instance name=volttron1 - Notes: - - On production environments, restrict write access to /home//.volttron/certificates/certs/volttron1-root-ca.crt to only admin user. For example: sudo chown root /home//.volttron/certificates/certs/volttron1-root-ca.crt and /home//.volttron/certificates/certs/volttron1-trusted-cas.crt - - A new admin user was created with user name: volttron1-admin and password=default_passwd. - You could change this user's password by logging into https://:15671/ Please update /home//.volttron/rabbitmq_config.yml if you change password - - ####################### - - The rmq message bus has a backward compatibility - layer with current zmq instances. What is the - zmq bus's vip address? [tcp://127.0.0.1]: - What is the port for the vip address? [22916]: - Is this instance web enabled? [N]: y - Web address set to: https:// - What is the port for this instance? [8443]: - Is this an instance of volttron central? [N]: y - Configuring /home//volttron/services/core/VolttronCentral. - Installing volttron central. - ['volttron', '-vv', '-l', '/home//.volttron/volttron.cfg.log'] - Should the agent autostart? [N]: y - VC admin and password are set up using the admin web interface. - After starting VOLTTRON, please go to https://:8443/admin/login.html to complete the setup. - Will this instance be controlled by volttron central? [Y]: - Configuring /home//volttron/services/core/VolttronCentralPlatform. - What is the name of this instance? [volttron1]: - Volttron central address set to https://:8443 - ['volttron', '-vv', '-l', '/home//.volttron/volttron.cfg.log'] - Should the agent autostart? [N]: y - Would you like to install a platform historian? [N]: y - Configuring /home//volttron/services/core/SQLHistorian. - ['volttron', '-vv', '-l', '/home//.volttron/volttron.cfg.log'] - Should the agent autostart? [N]: y - Would you like to install a master driver? [N]: y - Configuring /home//volttron/services/core/MasterDriverAgent. - ['volttron', '-vv', '-l', '/home//.volttron/volttron.cfg.log'] - Would you like to install a fake device on the master driver? [N]: y - Should the agent autostart? [N]: y - Would you like to install a listener agent? [N]: y - Configuring examples/ListenerAgent. - ['volttron', '-vv', '-l', '/home//.volttron/volttron.cfg.log'] - Should the agent autostart? [N]: y - Finished configuration! - - You can now start the volttron instance. - - If you need to change the instance configuration you can edit - the config file is at /home//.volttron/config - - diff --git a/docs/source/core_services/control/index.rst b/docs/source/core_services/control/index.rst deleted file mode 100644 index 3d9b8b5223..0000000000 --- a/docs/source/core_services/control/index.rst +++ /dev/null @@ -1,13 +0,0 @@ -.. _Control: - -=========================== -Base Platform Functionality -=========================== - -The base platform functionality focuses on the agent lifecycle, management of the platform itself, and security. - -.. toctree:: - :glob: - :maxdepth: 2 - - * diff --git a/docs/source/core_services/drivers/BACnet-Auto-Configuration.rst b/docs/source/core_services/drivers/BACnet-Auto-Configuration.rst deleted file mode 100644 index 5f0b75cf4e..0000000000 --- a/docs/source/core_services/drivers/BACnet-Auto-Configuration.rst +++ /dev/null @@ -1,279 +0,0 @@ -.. _BACnet-Auto-Configuration: - -=================================================== -Automatically Generating BACnet Configuration Files -=================================================== - -Included with the platform are two scripts for finding and configuring BACnet devices. -These scripts are located in ``scripts/bacnet``. ``bacnet_scan.py`` will scan -the network for devices. ``grab_bacnet_config.py`` creates a CSV file for -the BACnet driver that can be used as a starting point for creating -your own register configuration. - -Both scripts are configured with the file ``BACpypes.ini``. - -Configuring the Utilities -------------------------- - -While running both scripts create a temporary virtual BACnet device -using the ``bacpypes`` library. The virtual -device must be configured properly in order to work. This -configuration is stored in ``scripts/bacnet/BACpypes.ini`` and will be -read automatically when the utility is run. - -The only value that (usually) needs to be changed is the **address** field. -**This is the address bound to the port on the machine you are running the script from, NOT -A TARGET DEVICE!** This value should be set to the IP address of the -network interface used to communicate with the remote device. If there -is more than one network interface you must use the address of the interface -connected to the network that can reach the device. - -In Linux you can usually get the addresses bound to all interfaces by running -``ifconfig`` from the command line. - -If a different outgoing port other than the default 47808 must be used, -it can be specified as part of the address in the form - - ``
:`` - -In some cases, the netmask of the network will be needed for proper configuration. -This can be done following this format - - ``
/:`` - -where ```` is the netmask length. The most common value is 24. See http://www.computerhope.com/jargon/n/netmask.htm - -In some cases, you may also need to specify a different device ID by -changing the value of **objectIdentifier** so the virtual BACnet device does -not conflict with any devices on the network. **objectIdentifier** -defaults to 599. - -Sample BACpypes.ini -******************* - -:: - - [BACpypes] - objectName: Betelgeuse - address: 10.0.2.15/24 - objectIdentifier: 599 - maxApduLengthAccepted: 1024 - segmentationSupported: segmentedBoth - vendorIdentifier: 15 - -Scanning for BACnet Devices ---------------------------- - -If the addresses for BACnet devices are unknown they can be discovered -using the ``bacnet_scan.py`` utility. - -To run the utility simply execute the following command: - - ``python bacnet_scan.py`` - -and expect output similar to this: - -:: - - Device Address =
- Device Id = 699 - maxAPDULengthAccepted = 1024 - segmentationSupported = segmentedBoth - vendorID = 15 - - Device Address = - Device Id = 540011 - maxAPDULengthAccepted = 480 - segmentationSupported = segmentedBoth - vendorID = 5 - -Reading Output -************** - -The address where the device can be reached is listed on the **Device Address** line. -The BACnet device ID is listed on the **Device Id** line. -The remaining lines are informational and not needed to configure the BACnet driver. - -For the first example, the IP address ``192.168.1.42`` can be used to reach -the device. The second device is behind a BACnet router and can be -reached at ``1002:11``. See RouterAddressing Remote Station addressing. - -BACNet Scan Options -******************* - - - ``--address ADDRESS`` Send the WhoIs request only to a specific address. Useful as a way to ping devices on a network that blocks broadcast traffic. - - ``--range LOW HIGH`` Specify the device ID range for the results. Useful for filtering. - - ``--timeout SECONDS`` Specify how long to wait for responses to the original broadcast. This defaults to 5 which should be sufficient for most networks. - - ``--csv-out CSV_OUT`` Write the discovered devices to a CSV file. This can be used as inout for ``grab_multiple_configs.py``. See `Scraping Multiple Devices`_. - -Automatically Generating a BACnet Registry Configuration File -------------------------------------------------------------- - -A CSV registry configuration file for the BACnet driver can be generated with the -``grab_bacnet_config.py`` script. **This configuration will need to be edited -before it can be used.** - -The utility is invoked with the command: - - ``python grab_bacnet_config.py `` - -This will query the device with the matching device ID for configuration -information and print the resulting CSV file to the console. - -In order to save the configuration to a file use the ``--out-file`` option to specify the -output file name. - -Optionally the ``--address`` option can be used to specify the address of the target. In some cases, this is needed to help -establish a route to the device. - -Output and Assumptions -********************** - -Attempts at determining if a point is writable proved too unreliable. -Therefore all points are considered to be read-only in the output. - -The only property for which a point is setup for an object is -**presentValue**. - -By default, the **Volttron Point Name** is set to the value of the **name** -property of the BACnet object on the device. In most cases this name is vague. -No attempt is made at choosing a better name. A -duplicate of "Volttron Point Name" column called "Reference Point Name" is created to so that -once "Volttron Point Name" is changed a reference remains to the actual -BACnet device object name. - -Meta data from the objects on the device is used to attempt to put -useful info in the **Units** **Unit Details**, and **Notes** columns. -Information such as the range of valid values, defaults, the resolution -or sensor input, and enumeration or state names are scraped from the -device. - -With a few exceptions "Units" is pulled from the object's "units" -property and given the name used by the bacpypes library to describe it. -If a value in the **Units** column takes the form - - ``UNKNOWN UNIT ENUM VALUE: `` - -then the device is using a nonstandard value for the units on that -object. - -Scraping Multiple Devices -------------------------- - -The ``grab_multiple_configs.py`` script will use the CSV output of bacnet_scan.py to automatically run -``grab_bacnet_config.py`` on every device listed in the CSV file. - -The output is put in two directories. ``devices/`` contains basic driver configurations for the scrapped devices. -``registry_configs/`` contains the registry file generated by grab_bacnet_config.py. - -``grab_multiple_configs.py`` makes no assumptions about device names or topics, however the output is appropriate for the -``install_master_driver_configs.py`` script. - -Grab Multiple Configs Options -***************************** - - - ``--out-directory OUT_DIRECTORY`` Specify the output directory. - - ``--use-proxy`` Use ``proxy_grab_bacnet_config.py`` to gather configuration data. - - -BACnet Proxy Alternative Scripts --------------------------------- - -Both ``grab_bacnet_config.py`` and ``bacnet_scan.py`` have alternative versions called -``proxy_grab_bacnet_config.py`` and ``proxy_bacnet_scan.py`` repectively. These versions require that the -VOLTTRON platform is running and BACnet Proxy agent is running. Both of these agents use the same command line -arguments as their independent counterparts. - -.. warning:: - - These versions of the BACnet scripts are intended as a proof of concept and have not been optimized for performance. - ``proxy_grab_bacnet_config.py`` takes about 10 times longer to grab a configuration than ``grab_bacnet_config.py`` - - - -Problems and Debugging ----------------------- - -Both ``grab_bacnet_config.py`` and ``bacnet_scan.py`` creates a virtual device that open up a port for communication with devices. -If BACnet Proxy is running on the VOLTTRON platform it will cause both of these scripts to fail at startup. -Stopping the BACnet Proxy will resolve the problem. - -Typically the utility should run quickly and finish in 30 seconds or -less. In our testing, we have never seen a successful scrape take more -than 15 seconds on a very slow device with many points. Many devices -will scrape in less that 3 seconds. - -If the utility has not finished after about 60 seconds it -is probably having trouble communicating with the device and should be -stopped. Rerunning with debug output can help diagnose the problem. - -To output debug messages to the console add the ``--debug`` switch to -the **end** of the command line arguments. - - ``python grab_bacnet_config.py --out-file test.csv --debug`` - -On a successful run you will see output similar to this: - -:: - - DEBUG:main:initialization - DEBUG:main: - args: Namespace(address='10.0.2.20', buggers=False, debug=[], ini=, max_range_report=1e+20, out_file=) - DEBUG:main.SynchronousApplication:init (, '10.0.2.15') - DEBUG:main:starting build - DEBUG:main:pduSource =
- DEBUG:main:iAmDeviceIdentifier = ('device', 500) - DEBUG:main:maxAPDULengthAccepted = 1024 - DEBUG:main:segmentationSupported = segmentedBoth - DEBUG:main:vendorID = 5 - DEBUG:main:device_name = MS-NCE2560-0 - DEBUG:main:description = - DEBUG:main:objectCount = 32 - DEBUG:main:object name = Building/FCB.Local Application.Room Real Temp 2 - DEBUG:main: object type = analogInput - DEBUG:main: object index = 3000274 - DEBUG:main: object units = degreesFahrenheit - DEBUG:main: object units details = -50.00 to 250.00 - DEBUG:main: object notes = Resolution: 0.1 - DEBUG:main:object name = Building/FCB.Local Application.Room Real Temp 1 - DEBUG:main: object type = analogInput - DEBUG:main: object index = 3000275 - DEBUG:main: object units = degreesFahrenheit - DEBUG:main: object units details = -50.00 to 250.00 - DEBUG:main: object notes = Resolution: 0.1 - DEBUG:main:object name = Building/FCB.Local Application.OSA - DEBUG:main: object type = analogInput - DEBUG:main: object index = 3000276 - DEBUG:main: object units = degreesFahrenheit - DEBUG:main: object units details = -50.00 to 250.00 - DEBUG:main: object notes = Resolution: 0.1 - ... - -and will finish something like this: - -:: - - ... - DEBUG:main:object name = Building/FCB.Local Application.MOTOR1-C - DEBUG:main: object type = binaryOutput - DEBUG:main: object index = 3000263 - DEBUG:main: object units = Enum - DEBUG:main: object units details = 0-1 (default 0) - DEBUG:main: object notes = BinaryPV: 0=inactive, 1=active - DEBUG:main:finally - -Typically if the BACnet device is unreachable for any reason (wrong IP, -network down/unreachable, wrong interface specified, device failure, -etc) the scraper will stall at this message: - -:: - - DEBUG:main:starting build - -If you have not specified a valid interface in BACpypes.ini you will see -the following error with a stack trace: - -:: - - ERROR:main:an error has occurred: [Errno 99] Cannot assign requested address - - diff --git a/docs/source/core_services/drivers/BACnet-Proxy-Agent.rst b/docs/source/core_services/drivers/BACnet-Proxy-Agent.rst deleted file mode 100644 index c62b384eea..0000000000 --- a/docs/source/core_services/drivers/BACnet-Proxy-Agent.rst +++ /dev/null @@ -1,220 +0,0 @@ -.. _BACnet-Proxy-Agent: - -================== -BACnet Proxy Agent -================== - -Introduction ------------- - -Communication with BACnet device on a network happens via a single -virtual BACnet device. In VOLTTRON driver framework, we use a separate -agent specifically for communicating with BACnet devices and managing -the virtual BACnet device. - -Requirements ------------- -The BACnet Proxy agent requires the BACPypes package. This package can -be installed in an activated environment with: - -:: - - pip install bacpypes - -Configuration -------------- - -The agent configuration sets up the virtual BACnet device. - -.. code-block:: json - - { - "device_address": "10.0.2.15", - "max_apdu_length": 1024, - "object_id": 599, - "object_name": "Volttron BACnet driver", - "vendor_id": 15, - "segmentation_supported": "segmentedBoth" - } - -BACnet device settings -********************** - -- **device_address** - Address bound to the network port over which - BACnet communication will happen on the computer running VOLTTRON. - This is **NOT** the address of any target device. See `Device Addressing`_. -- **object_id** - ID of the Device object of the virtual BACnet - device. Defaults to 599. Only needs to be changed if there is - a conflicting BACnet device ID on your network. - -These settings determine the capabilities of the virtual BACnet device. -BACnet communication happens at the lowest common denominator between -two devices. For instance, if the BACnet proxy supports segmentation and -the target device does not communication will happen without -segmentation support and will be subject to those limitations. -Consequently, there is little reason to change the default settings -outside of the **max_apdu_length** (the default is not the largest -possible value). - -- **max_apdu_length** - (From bacpypes documentation) BACnet works on - lots of different types of networks, from high-speed Ethernet to - “slower” and “cheaper” ARCNET or MS/TP (a serial bus protocol used - for a field bus defined by BACnet). For devices to exchange messages - they have to know the maximum size message the device can handle. - (End BACpypes docs) - - This setting determines the largest APDU accepted by the BACnet - virtual device. Valid options are 50, 128, 206, 480, 1024, and 1476. - Defaults to 1024.(Optional) - - -- **object_name** - Name of the object. Defaults to "Volttron BACnet - driver". (Optional) -- **vendor_id** - Vendor ID of the virtual BACnet device. Defaults to - 15. (Optional) -- **segmentation_supported** - (From bacpypes documentation) A vast - majority of BACnet communications traffic fits into one message, but - there can be times when larger messages are convenient and more - efficient. Segmentation allows larger messages to be broken up into - segments and spliced back together. It is not unusual for “low power” - field equipment to not support segmentation. (End BACpypes docs) - - Possible setting are "segmentedBoth" (default), "segmentedTransmit", - "segmentedReceive", or "noSegmentation" (Optional) - -Device Addressing ------------------ - -In some cases, it will be needed to specify the subnet mask of the -virtual device or a different port number to listen on. The full format -of the BACnet device address is - - ``
/:`` - -where ```` is the port to use and ```` is the netmask length. -The most common value is 24. See http://www.computerhope.com/jargon/n/netmask.htm - -For instance, if you need to specify a subnet mask of 255.255.255.0 -and the IP address bound to the network port is 192.168.1.2 you -would use the address - -:: - - 192.168.1.2/24 - -If your BACnet network is on a different port (47809) besides the -default (47808) you would use the address - -:: - - 192.168.1.2:47809 - -If you need to do both - -:: - - 192.168.1.2/24:47809 - -.. _bacnet-proxy-multiple-networks: - -Communicating With Multiple BACnet Networks -------------------------------------------- - -If two BACnet devices are connected to different ports they are -considered to be on different BACnet networks. In order to communicate -with both devices, you will need to run one BACnet Proxy Agent per -network. - -Each proxy will need to be bound to different ports appropriate for -each BACnet network and will need a different VIP identity specified. -When configuring drivers you will need to specify which proxy to use by -:ref:`specifying the VIP identity `. - -TODO: Add link to docs showing how to specify the VIP IDENTITY when installing an agent. - -For example, a proxy connected to the default BACnet network - -.. code-block:: json - - { - "device_address": "192.168.1.2/24" - } - -and another on port 47809 - -.. code-block:: json - - { - "device_address": "192.168.1.2/24:47809" - } - -a device on the first network - -.. code-block:: json - - { - "driver_config": {"device_address": "1002:12", - "proxy_address": "platform.bacnet_proxy_47808", - "timeout": 10}, - "driver_type": "bacnet", - "registry_config":"config://registry_configs/bacnet.csv", - "interval": 60, - "timezone": "UTC", - "heart_beat_point": "Heartbeat" - } - -and a device on the second network - -.. code-block:: json - - { - "driver_config": {"device_address": "12000:5", - "proxy_address": "platform.bacnet_proxy_47809", - "timeout": 10}, - "driver_type": "bacnet", - "registry_config":"config://registry_configs/bacnet.csv", - "interval": 60, - "timezone": "UTC", - "heart_beat_point": "Heartbeat" - } - -Notice that both configs use the same registry configuration -(config://registry_configs/bacnet.csv). This is perfectly fine as long as the -registry configuration is appropriate for both devices. -For scraping large numbers of points from a single BACnet device, -there is an optional timeout parameter provided, to prevent the master driver -timing out while the BACnet Proxy Agent is collecting points. - - -BACnet Change of Value Services -------------------------------- - -|BACnet Change of Value Communications| - -Change of Value Services added in version 0.5 of the BACnet Proxy and version -3.2 of the Master Driver. - -There are a variety of scenarios in which a user may desire data from some -BACnet device point values to be published independently of the regular -scrape interval. Bacpypes provides a "ChangeOfValueServices" (hereby -referred to as 'COV') module, which enables a device to push updates to the -platform. - -The BACnet COV requires that points on the device be properly configured for -COV. A point on the BACnet device can be configured with the 'covIncrement' -property, which determines the threshold for a COV notification (note: this -property must be configured by the device operator - VOLTTRON does not -provide the ability to set or modify this property). - -Based on configuration options for BACnet drivers, the driver will instruct the -BACnet Proxy to establish a COV subscription with the device. The subscription -will last for an amount of time specified in the driver configuration, and will -auto-renew the subscription. If the proxy loses communication with the device or -the device driver is stopped the subscription will be removed when the lifetime -expires. While the subscription exists, the device will send (confirmed) -notifications to which will be published, with the topic based on the -driver's configured publish topics. - -https://bacpypes.readthedocs.io/en/latest/modules/service/cov.html - -.. |BACnet Change of Value Communications| image:: files/bacnet_cov.png diff --git a/docs/source/core_services/drivers/BACnet-Router-Addressing.rst b/docs/source/core_services/drivers/BACnet-Router-Addressing.rst deleted file mode 100644 index aee348d75a..0000000000 --- a/docs/source/core_services/drivers/BACnet-Router-Addressing.rst +++ /dev/null @@ -1,38 +0,0 @@ -.. _BACnet-Router-Addressing: - -======================== -BACnet Router Addressing -======================== - -The underlying library that Volttron uses for BACnet supports IP to -MS/TP routers. Devices behind the router use a Remote Station address in -the form - -:: - - :
- -where **** is the configured network ID of the router and **
** -is the address of the device behind the router. - -For example to access the device at **
** 12 for a router configured -for **** 1002 can be accessed with this address: - -:: - - 1002:12 - -**** must be number from 0 to 65534 and **
** must be a number -from 0 to 255. - -This type of address can be used anywhere an address is required in -configuration of the Volttron BACnet driver. - -Caveats -------- - -VOLTTRON uses a UDP broadcast mechanism to establish the route to the device. -If the route cannot be established it will fall back to a UDP broadcast for -all communication with the device. -If the IP network where the router is connected blocks UDP -broadcast traffic then these addresses will not work. diff --git a/docs/source/core_services/drivers/Using-Third-Party-Drivers.rst b/docs/source/core_services/drivers/Using-Third-Party-Drivers.rst deleted file mode 100644 index 304176fc01..0000000000 --- a/docs/source/core_services/drivers/Using-Third-Party-Drivers.rst +++ /dev/null @@ -1,23 +0,0 @@ -.. _Using-Third-Party-Drivers: - -========================= -Using Third Party Drivers -========================= - -In some cases you will need to use a driver provided by a third-party to interact with a device. -While the interface file can be copied into ``services/core/MasterDriverAgent/master_driver/interfaces`` -this does not work well with third-party code that is under source control. - -The recommended method is to create a symbolic link to the interface file in -``services/core/MasterDriverAgent/master_driver/interfaces``. This will work in both -a development environment and in production. When packing the agent for installation -a copy of the linked file will be put in the resulting wheel file. - -:: - - #A copy of the interface file lives in ~/my_driver/my_driver.py - #Create the link - ln -s ~/my_driver/my_driver.py services/core/MasterDriverAgent/master_driver/interfaces/my_driver.py - - #remove the link - rm services/core/MasterDriverAgent/master_driver/interfaces/my_driver.py diff --git a/docs/source/core_services/drivers/driver_configuration/_Driver-Configuration.rst b/docs/source/core_services/drivers/driver_configuration/_Driver-Configuration.rst deleted file mode 100644 index c588015419..0000000000 --- a/docs/source/core_services/drivers/driver_configuration/_Driver-Configuration.rst +++ /dev/null @@ -1,306 +0,0 @@ -.. _Driver-Configuration: - -==================== -Driver Configuration -==================== -The Master Driver Agent manages all device communication. To communicate with devices you must setup and deploy the Master Driver Agent. - -Configuration for each device consists of 3 parts: - -* Master Driver Agent configuration file - lists all driver configuration files to load -* Driver configuration file - contains the general driver configuration and device settings -* Device Register configuration file - contains the settings for each individual data point on the device - -For each device, you must create a driver configuration file, device register configuration file, and an entry in the Master Driver Agent configuration file. - -Once configured, the Master Driver Agent is :ref:`configured and deployed -` in a manner similar to any other agent. - -.. _MasterDriverConfig: - -Requirements ------------- - -VOLTTRON drivers operated by the master driver may have additional requirements for installation. -Required libraries: - -:: - - BACnet driver - bacpypes - Modbus driver - pymodbus - Modbus_TK driver - modbus-tk - DNP3 and IEEE 2030.5 drivers - pydnp3 - -Master Driver Agent Configuration ---------------------------------- -The Master Driver Agent configuration consists of general settings for all devices. The default values of the master driver should be sufficient for most users. -The user may optionally change the interval between device scrapes with the driver_scrape_interval. - -The following example sets the driver_scrape_interval to 0.05 seconds or 20 devices per second: - -.. code-block:: json - - { - "driver_scrape_interval": 0.05, - "publish_breadth_first_all": false, - "publish_depth_first": false, - "publish_breadth_first": false, - "publish_depth_first_all": true, - "group_offset_interval": 0.0 - } - -* **driver_scrape_interval** - Sets the interval between devices scrapes. Defaults to 0.02 or 50 devices per second. Useful for when the platform scrapes too many devices at once resulting in failed scrapes. -* **group_offset_interval** - Sets the interval between when groups of devices are scraped. Has no effect if all devices are in the same group. - -In order to improve the scalability of the platform unneeded device state publishes for all devices can be turned off. -All of the following setting are optional and default to `True`. - -* **publish_depth_first_all** - Enable "depth first" publish of all points to a single topic for all devices. -* **publish_breadth_first_all** - Enable "breadth first" publish of all points to a single topic for all devices. -* **publish_depth_first** - Enable "depth first" device state publishes for each register on the device for all devices. -* **publish_breadth_first** - Enable "breadth first" device state publishes for each register on the device for all devices. - -An example master driver configuration file can be found in the VOLTTRON repository in ``services/core/MasterDriverAgent/master-driver.agent``. - -.. _driver-configuration-file: - -Driver Configuration File -------------------------- - -.. note:: - - The terms `register` and `point` are used interchangeably in the documentation and - in the configuration setting names. They have the same meaning. - -Each device configuration has the following form: - -.. code-block:: json - - { - "driver_config": {"device_address": "10.1.1.5", - "device_id": 500}, - "driver_type": "bacnet", - "registry_config":"config://registry_configs/vav.csv", - "interval": 60, - "heart_beat_point": "heartbeat", - "group": 0 - } - -The following settings are required for all device configurations: - - - **driver_config** - Driver specific setting go here. See below for driver specific settings. - - **driver_type** - Type of driver to use for this device: bacnet, modbus, fake, etc. - - **registry_config** - Reference to a configuration file in the configuration store for registers - on the device. See the `Registry Configuration File`_ - and `Adding Device Configurations to the Configuration Store`_ sections below. - -These settings are optional: - - - **interval** - Period which to scrape the device and publish the results in seconds. Defaults to 60 seconds. - - **heart_beat_point** - A Point which to toggle to indicate a heartbeat to the device. A point with this Volttron Point Name must exist in the registry. If this setting is missing the driver will not send a heart beat signal to the device. Heart beats are triggered by the Actuator Agent which must be running to use this feature. - - **group** - Group this device belongs to. Defaults to 0 - -These settings are used to create the topic that this device will be referenced by following the VOLTTRON convention of {campus}/{building}/{unit}. This will also be the topic published on, when the device is periodically scraped for it's current state. - -The topic used to reference the device is derived from the name of the device configuration in the store. See the `Adding Device Configurations to the Configuration Store`_ section. - -Device Grouping -............... - -Devices may be placed into groups to separate them logically when they are scraped. This is done by setting the `group` in the device configuration. `group` is a number greater than or equal to 0. -Only number of devices in the same group and the `group_offset_interval` are considered when determining when to scrape a device. - -This is useful in two cases. First, if you need to ensure that certain devices are scraped in close proximity to each other you can put them in their own group. -If this causes devices to be scraped too quickly the groups can be separated out time wise using the `group_offset_interval` setting. -Second, you may scrape devices on different networks in parallel for performance. For instance BACnet devices behind a single MSTP router need to be scraped slowly and serially, but devices behind different routers may be scraped in parallel. Grouping devices by router will do this automatically. - -The `group_offset_interval` is applied by multiplying it by the `group` number. If you intent to use `group_offset_interval` only use consecutive `group` values that start with 0. - - -Registry Configuration File ---------------------------- -Registry configuration files setup each individual point on a device. Typically this file will be in CSV format, but the exact format is driver specific. See the section for a particular driver for the registry configuration format. - -The following is a simple example of a MODBUS registry configuration file: - -.. csv-table:: Catalyst 371 - :header: Reference Point Name,Volttron Point Name,Units,Units Details,Modbus Register,Writable,Point Address,Default Value,Notes - - CO2Sensor,ReturnAirCO2,PPM,0.00-2000.00,>f,FALSE,1001,,CO2 Reading 0.00-2000.0 ppm - CO2Stpt,ReturnAirCO2Stpt,PPM,1000.00 (default),>f,TRUE,1011,1000,Setpoint to enable demand control ventilation - HeatCall2,HeatCall2,On / Off,on/off,BOOL,FALSE,1114,,Status indicator of heating stage 2 need - -.. _config-store: - -======================================================= -Adding Device Configurations to the Configuration Store -======================================================= - -Configurations are added to the Configuration Store using the command line `volttron-ctl config store platform.driver `. - -* **name** - The name used to refer to the file from the store. -* **file name** - A file containing the contents of the configuration. -* **file type** - `--raw`, `--json`, or `--csv`. Indicates the type of the file. Defaults to `--json`. - -The main configuration must have the name `config` - -Device configuration but **not** registry configurations must have a name prefixed with `devices/`. Scripts that automate the process will prefix registry configurations with `registry_configs/`, but that is not a requirement for registry files. - -The name of the device's configuration in the store is used to create the topic used to reference the device. For instance, a configuration named ``devices/PNNL/ISB1/vav1`` will publish scrape results to ``devices/PNNL/ISB1/vav1`` and is accessible with the Actuator Agent via ``PNNL/ISB1/vav1``. - -The name of a registry configuration must match the name used to refer to it in the driver configuration. The reference is not case sensitive. - -If the Master Driver Agent is running any changes to the configuration store will immediately affect the running devices according to the changes. - -Consider the following three configuration files: - -A master driver configuration called `master-driver.agent`: - -.. code-block:: json - - { - "driver_scrape_interval": 0.05 - } - -A MODBUS device configuration file called `modbus1.config`: - -.. code-block:: json - - { - "driver_config": {"device_address": "10.1.1.2", - "port": 502, - "slave_id": 5}, - "driver_type": "modbus", - "registry_config":"config://registry_configs/hvac.csv", - "interval": 60, - "timezone": "UTC", - "heart_beat_point": "heartbeat" - } - -A MODBUS registry configuration file called `catalyst371.csv`: - -.. csv-table:: catalyst371.csv - :header: Reference Point Name,Volttron Point Name,Units,Units Details,Modbus Register,Writable,Point Address,Default Value,Notes - - CO2Sensor,ReturnAirCO2,PPM,0.00-2000.00,>f,FALSE,1001,,CO2 Reading 0.00-2000.0 ppm - CO2Stpt,ReturnAirCO2Stpt,PPM,1000.00 (default),>f,TRUE,1011,1000,Setpoint to enable demand control ventilation - HeatCall2,HeatCall2,On / Off,on/off,BOOL,FALSE,1114,,Status indicator of heating stage 2 need - -To store the master driver configuration run the command - -``volttron-ctl config store platform.driver config master-driver.agent`` - -To store the registry configuration run the command (note the --csv option) - -``volttron-ctl config store platform.driver registry_configs/hvac.csv catalyst371.csv --csv`` - -Note the name ``registry_configs/hvac.csv`` matches the configuration reference in the file ``modbus1.config``. - -To store the driver configuration run the command - -``volttron-ctl config store platform.driver devices/my_campus/my_building/hvac1 modbus1.config`` - - -Converting Old Style Configuration ----------------------------------- - -The new Master Driver no longer supports the old style of device configuration. The old ``device_list`` setting is ignored. - -To simplify updating to the new format ``scripts/update_master_driver_config.py`` is provide to automatically update to the new configuration format. - -With the platform running run: - -``python scripts/update_master_driver_config.py `` - -**old_configuration** is the main configuration file in the old format. The script automatically modifies the driver files to create references to CSV files and adds the CSV files with the appropriate name. - -**output** is the target output directory. - -If the ``--keep-old`` switch is used the old configurations in the output directory (if any) will not be deleted before new configurations are created. Matching names will still be overwritten. - -The output from ``scripts/update_master_driver_config.py`` can be automatically added to the configuration store -for the Master Driver agent with ``scripts/install_master_driver_configs.py``. - -Creating and naming configuration files in the form needed by ``scripts/install_master_driver_configs.py`` -can speed up the process of changing and updating a large number of configurations. See the ``--help`` -message for ``scripts/install_master_driver_configs.py`` for more details. - -Device State Publishes ----------------------- - -By default, the value of each register on a device is published 4 different ways when the device state is published. -Consider the following settings in a driver configuration stored under the name ``devices/pnnl/isb1/vav1``: - -.. code-block:: json - - { - "driver_config": {"device_address": "10.1.1.5", - "device_id": 500}, - - "driver_type": "bacnet", - "registry_config":"config://registry_configs/vav.csv", - } - -In the ``vav.csv`` file is a register with the name ``temperature``. For these examples -the current value of the register on the device happens to be 75.2 and the meta data -is - -.. code-block:: python - - {"units": "F"} - -When the driver publishes the device state the following 2 things will be published for this register: - - A "depth first" publish to the topic ``devices/pnnl/isb1/vav1/temperature`` - with the following message: - - .. code-block:: python - - [75.2, {"units": "F"}] - - A "breadth first" publish to the topic ``devices/temperature/vav1/isb1/pnnl`` - with the following message: - - .. code-block:: python - - [75.2, {"units": "F"}] - - These publishes can be turned off by setting `publish_depth_first` and `publish_breadth_first` to `false` respectively. - -Also these two publishes happen once for all registers: - - A "depth first" publish to the topic ``devices/pnnl/isb1/vav1/all`` - with the following message: - - .. code-block:: python - - [{"temperature": 75.2, ...}, {"temperature":{"units": "F"}, ...}] - - A "breadth first" publish to the topic ``devices/all/vav1/isb1/pnnl`` - with the following message: - - .. code-block:: python - - [{"temperature": 75.2, ...}, {"temperature":{"units": "F"}, ...}] - - These publishes can be turned off by setting `publish_depth_first_all` and `publish_breadth_first_all` to `false` respectively. - -Device Scalability Settings ---------------------------- - -In order to improve the scalability of the platform unneeded device state publishes for a device can be turned off. -All of the following setting are optional and will override the value set in the main master driver configuration. - - - **publish_depth_first_all** - Enable "depth first" publish of all points to a single topic. - - **publish_breadth_first_all** - Enable "breadth first" publish of all points to a single topic. - - **publish_depth_first** - Enable "depth first" device state publishes for each register on the device. - - **publish_breadth_first** - Enable "breadth first" device state publishes for each register on the device. - -It is common practice to set **publish_breadth_first_all**, **publish_depth_first**, and -**publish_breadth_first** to `False` unless they are specifically needed by an agent running on -the platform. - - -.. note:: - - All Historian Agents require **publish_depth_first_all** to be set to `True` in order to capture data. diff --git a/docs/source/core_services/drivers/driver_configuration/bacnet-driver.rst b/docs/source/core_services/drivers/driver_configuration/bacnet-driver.rst deleted file mode 100644 index 426c0e6da5..0000000000 --- a/docs/source/core_services/drivers/driver_configuration/bacnet-driver.rst +++ /dev/null @@ -1,100 +0,0 @@ -.. _BACnet-Driver: - -BACnet Driver Configuration ---------------------------- -Communicating with BACnet devices requires that the BACnet Proxy Agent is configured and running. All device communication happens through this agent. - -Requirements ------------- -The BACnet driver requires the Pint package. This package can be installed in an -activated environment with: - -:: - - pip install bacpypes - -driver_config -************* - -There are nine arguments for the "driver_config" section of the device configuration file: - - - **device_address** - Address of the device. If the target device is behind an IP to MS/TP router then Remote Station addressing will probably be needed for the driver to find the device. - - **device_id** - BACnet ID of the device. Used to establish a route to the device at startup. - - **min_priority** - (Optional) Minimum priority value allowed for this device whether specifying the priority manually or via the registry config. Violating this parameter either in the configuration or when writing to the point will result in an error. Defaults to 8. - - **max_per_request** - (Optional) Configure driver to manually segment read requests. The driver will only grab up to the number of objects specified in this setting at most per request. This setting is primarily for scraping many points off of low resource devices that do not support segmentation. Defaults to 10000. - - **proxy_address** - (Optional) VIP address of the BACnet proxy. Defaults to "platform.bacnet_proxy". See :ref:`bacnet-proxy-multiple-networks` for details. Unless your BACnet network has special needs you should not change this value. - - **ping_retry_interval** - (Optional) The driver will ping the device to establish a route at startup. If the BACnet proxy is not available the driver will retry the ping at this interval until it succeeds. Defaults to 5. - - **use_read_multiple** - (Optional) During a scrape the driver will tell the proxy to use a ReadPropertyMultipleRequest to get data from the device. Otherwise the proxy will use multiple ReadPropertyRequest calls. If the BACnet proxy is reporting a device is rejecting requests try changing this to false for that device. Be aware that setting this to false will cause scrapes for that device to take much longer. Only change if needed. Defaults to true. - - **cov_lifetime** - (Optional) When a device establishes a change of value subscription for a point, this argument will be used to determine the lifetime and renewal period for the subscription, in seconds. Defaults to 180. (Added to Master Driver version 3.2) - -Here is an example device configuration file: - -.. code-block:: json - - { - "driver_config": {"device_address": "10.1.1.3", - "device_id": 500, - "min_priority": 10, - "max_per_request": 24 - }, - "driver_type": "bacnet", - "registry_config":"config://registry_configs/vav.csv", - "interval": 5, - "timezone": "UTC", - "heart_beat_point": "heartbeat" - } - -A sample BACnet configuration file can be found `here `_ or -in the VOLTTRON repository in ``examples/configurations/drivers/bacnet1.config`` - -.. _BACnet-Registry-Configuration-File: - -BACnet Registry Configuration File -********************************** - -The registry configuration file is a `CSV `_ file. Each row configures a point on the device. - -Most of the configuration file can be generated with the ``grab_bacnet_config.py`` utility in ``scripts/bacnet``. See :doc:`BACnet-Auto-Configuration`. - -Currently, the driver provides no method to access array type properties even if the members of the array are of a supported type. - -The following columns are required for each row: - - - **Volttron Point Name** - The name by which the platform and agents running on the platform will refer to this point. For instance, if the Volttron Point Name is HeatCall1 (and using the example device configuration above) then an agent would use "pnnl/isb2/hvac1/HeatCall1" to refer to the point when using the RPC interface of the actuator agent. - - **Units** - Used for meta data when creating point information on the historian. - - **BACnet Object Type** - A string representing what kind of BACnet standard object the point belongs to. Examples include: - - * analogInput - * analogOutput - * analogValue - * binaryInput - * binaryOutput - * binaryValue - * multiStateValue - - - **Property** - A string representing the name of the property belonging to the object. Usually, this will be "presentValue". - - **Writable** - Either "TRUE" or "FALSE". Determines if the point can be written to. Only points labeled TRUE can be written to through the ActuatorAgent. Points labeled "TRUE" incorrectly will cause an error to be returned when an agent attempts to write to the point. - - **Index** - Object ID of the BACnet object. - -The following columns are optional: - - - **Write Priority** - BACnet priority for writing to this point. Valid values are 1-16. Missing this column or leaving the column blank will use the default priority of 16. - - **COV Flag** - Either "True" or False". Determines if a BACnet Change of Value subscription should be established for this point. Missing this column or leaving the column blank will result in no change of value subscriptions being established. (Added to Master Driver version 3.2) - -Any additional columns will be ignored. It is common practice to include a **Point Name** or **Reference Point Name** to include the device documentation's name for the point and **Notes** and **Unit Details**" for additional information about a point. - -.. csv-table:: BACnet - :header: Point Name,Volttron Point Name,Units,Unit Details,BACnet Object Type,Property,Writable,Index,Notes - - Building/FCB.Local Application.PH-T,PreheatTemperature,degreesFahrenheit,-50.00 to 250.00,analogInput,presentValue,FALSE,3000119,Resolution: 0.1 - Building/FCB.Local Application.RA-T,ReturnAirTemperature,degreesFahrenheit,-50.00 to 250.00,analogInput,presentValue,FALSE,3000120,Resolution: 0.1 - Building/FCB.Local Application.RA-H,ReturnAirHumidity,percentRelativeHumidity,0.00 to 100.00,analogInput,presentValue,FALSE,3000124,Resolution: 0.1 - Building/FCB.Local Application.CLG-O,CoolingValveOutputCommand,percent,0.00 to 100.00 (default 0.0),analogOutput,presentValue,TRUE,3000107,Resolution: 0.1 - Building/FCB.Local Application.MAD-O,MixedAirDamperOutputCommand,percent,0.00 to 100.00 (default 0.0),analogOutput,presentValue,TRUE,3000110,Resolution: 0.1 - Building/FCB.Local Application.PH-O,PreheatValveOutputCommand,percent,0.00 to 100.00 (default 0.0),analogOutput,presentValue,TRUE,3000111,Resolution: 0.1 - Building/FCB.Local Application.RH-O,ReheatValveOutputCommand,percent,0.00 to 100.00 (default 0.0),analogOutput,presentValue,TRUE,3000112,Resolution: 0.1 - Building/FCB.Local Application.SF-O,SupplyFanSpeedOutputCommand,percent,0.00 to 100.00 (default 0.0),analogOutput,presentValue,TRUE,3000113,Resolution: 0.1 - - -A sample BACnet registry file can be found `here `_ or -in the VOLTTRON repository in ``examples/configurations/drivers/bacnet.csv`` diff --git a/docs/source/core_services/drivers/driver_configuration/ecobee_web_driver.rst b/docs/source/core_services/drivers/driver_configuration/ecobee_web_driver.rst deleted file mode 100644 index 7cd84fb189..0000000000 --- a/docs/source/core_services/drivers/driver_configuration/ecobee_web_driver.rst +++ /dev/null @@ -1,349 +0,0 @@ -.. _ecobee-web-driver: - -************* -Ecobee Driver -************* - -The Ecobee driver is an implementation of a :ref:`VOLTTRON driver framework ` Interface. -In this case, the Master Driver issues commands to the Ecobee driver to collect data from and send control signals to -`Ecobee's remote web API `_ - -.. note:: - - Reading the driver framework and driver configuration documentation prior to following this guide will help the user - to understand drivers, driver communication, and driver configuration files. - -This guide covers: - -* Creating an Ecobee application via the web interface -* Creating an Ecobee driver configuration file, including finding the user's Ecobee API key and Ecobee thermostat serial - number -* Creating an Ecobee registry configuration file -* Installing the Master Driver and loading Ecobee driver and registry configurations -* Starting the driver and viewing Ecobee data publishes - - -.. _Ecobee-Application: - -Ecobee Application -################## - -Connecting the Ecobee driver to the Ecobee API requires configuring your account with an Ecobee application. - -#. Log into the `Ecobee site `_ - -#. Click on the "hamburger" icon on the right to open the account menu, then click "Developer" - - .. image:: files/ecobee_developer_menu.png - -#. On the bottom-left corner of the screen that appears, click "Create New" - - .. image:: files/ecobee_create_app.png - -#. Fill out the name, summary, and description forms as desired. Click "Authorization Method" and from the drop-down - that appears, select "ecobee PIN" (this will enable an extra layer of authentication to protect your account) - -#. Record the API key for the Application from the Developer menu - - .. figure:: files/ecobee_api_key.png - - From Ecobee `authenication docs `_ - - -Configuration File -################## - -The Ecobee driver uses two configuration files, a driver configuration which sets the parameters of the behavior of the -driver, and registry configuration which instructs the driver on how to interact with each point. - -This is an example driver configuration: - -.. code-block:: JSON - - { - "driver_config": { - "API_KEY": "abc123", - "DEVICE_ID": 8675309 - }, - "driver_type": "ecobee", - "registry_config":"config://campus/building/ecobee.csv", - "interval": 180, - "timezone": "UTC" - } - -The driver configuration works as follows: - -+-----------------+----------------------------------------------------------------------------------------------------+ -| config field | description | -+=================+====================================================================================================+ -| driver_config | this section specifies values used by the driver agent during operation | -+-----------------+----------------------------------------------------------------------------------------------------+ -| API_KEY | This is the User's API key. This must be obtained by the user from the Ecobee web UI and provided | -| | in this part of the configuration. Notes on how to do this will be provided below. | -+-----------------+----------------------------------------------------------------------------------------------------+ -| DEVICE_ID | This is the device number of the Ecobee thermostat the driver is responsible for operating. This | -| | must be obtained by the user from the Ecobee web UI. Notes on how to do this will be provided | -| | below. | -+-----------------+----------------------------------------------------------------------------------------------------+ -| driver_type | This value should match the name of the python file which contains the interface class | -| | implementation for the Ecobee driver and should not change. | -+-----------------+----------------------------------------------------------------------------------------------------+ -| registry_config | This should a user specified path of the form "config://. It is recommended to use the | -| | device topic string following "devices" with the file extension | -| | ("config:///`_ | -+-----------------+----------------------------------------------------------------------------------------------------+ - -.. note:: - - Values for API_KEY and DEVICE_ID must be obtained by the user. DEVICE_ID should be added as an integer - representation of the thermostat's serial number. - - **Getting API Key** - - Ecobee API keys require configuring an application using the Ecobee web UI. For more information on configuring an - application and obtaining the API key, please refer to the `Ecobee Application `_ heading in - this documentation. - - **Finding Device Identifier** - - To find your Ecobee thermostat's device identifier: - - 1. Log into the `Ecobee customer portal `_ - 2. From the Home screen click "About My Ecobee" - 3. The thermostat identifier is the serial number listed on the About screen - - -Registry Configuration ----------------------- - -This file specifies how data is read from Ecobee API response data as well as how points are set via the Master Driver -and actuator. - -It is likely that more points may be added to obtain additional data, but barring implementation changes by Ecobee it is -unlikely that the values in this configuration will need to change substantially, as most thermostats provide the -same range of data in a similar format. - -This is an example registry configuration: - -+-------------------+---------------------+---------+---------+----------+----------+---------------+-------+ -| Point Name | Volttron Point Name | Units | Type | Writable | Readable | Default Value | Notes | -+===================+=====================+=========+=========+==========+==========+===============+=======+ -| fanMinOnTime | fanMinOnTime | seconds | setting | True | True | | | -+-------------------+---------------------+---------+---------+----------+----------+---------------+-------+ -| hvacMode | hvacMode | seconds | setting | True | True | | | -+-------------------+---------------------+---------+---------+----------+----------+---------------+-------+ -| humidity | humidity | % | setting | False | True | | | -+-------------------+---------------------+---------+---------+----------+----------+---------------+-------+ -| coolHoldTemp | coolHoldTemp | degF | hold | True | False | | | -+-------------------+---------------------+---------+---------+----------+----------+---------------+-------+ -| heatHoldTemp | heatHoldTemp | degF | hold | True | False | | | -+-------------------+---------------------+---------+---------+----------+----------+---------------+-------+ -| actualTemperature | actualTemperature | degF | hold | False | True | | | -+-------------------+---------------------+-------------------+----------+----------+---------------+-------+ - -.. note:: - - An example registry configuration containing all points from the development device is available in the - `examples/configurations/drivers/ecobee.csv` file in the VOLTTRON repository. - - -This configuration works as follows: - -+---------------------+------------------------------------------------------------------------------------------------+ -| config field | description | -+=====================+================================================================================================+ -| Point Name | Name of a point as it appears in Ecobee response data (example below) | -+---------------------+------------------------------------------------------------------------------------------------+ -| Volttron Point Name | Name of a point as a user would like it to be displayed in data publishes to the message bus | -+---------------------+------------------------------------------------------------------------------------------------+ -| Units | Unit of measurement specified by remote API | -+---------------------+------------------------------------------------------------------------------------------------+ -| Type | The Ecobee driver registry configuration supports "setting" and "hold" register types, based | -| | on how the data is represented in Ecobee response data (example below) | -+---------------------+------------------------------------------------------------------------------------------------+ -| Writable | Whether or not the point is able to be written to. This may be determined by what Ecobee | -| | allows, and by the operation of Ecobee's API (to set an Ecobee cool/heat hold, cool/HoldTemp | -| | is used, but to read other data points are used and therefore are not writable; this is a | -| | quirk of Ecobee's API) | -+---------------------+------------------------------------------------------------------------------------------------+ -| Readable | Whether or not the point is able to be read as specified. This may be determined by what | -| | Ecobee allows, and by the operation of Ecobee's API (to set an Ecobee cool/heat hold, | -| | cool/HoldTemp is used, however the requested hold values are represented as desiredCool/Heat | -| | in Ecobee's response data; this is a quirk of Ecobee's API) | -+---------------------+------------------------------------------------------------------------------------------------+ -| Default Value | Used to send device defaults to the Ecobee API, this is optional. | -+---------------------+------------------------------------------------------------------------------------------------+ -| Notes | Any user specified notes, this is optional | -+---------------------+------------------------------------------------------------------------------------------------+ - -For additional explanation on the quirks of Ecobee's readable/writable points, visit: -https://www.ecobee.com/home/developer/api/documentation/v1/functions/SetHold.shtml - - -Installation -############ - -The following instructions make up the minimal steps required to set up an instance of the Ecobee driver on the VOLTTRON -platform and connect it to the Ecobee remote API: - -#. Create a directory using the path $VOLTTRON_ROOT/configs and create two files, `ecobee.csv` and `ecobee.config`. - Copy the registry config to the `ecobee.csv` file and the driver config to the `ecobee.config file`. Modify the - `API_KEY` and `DEVICE_ID` fields from the driver config with your own API key and device serial number. - -#. If the platform has not been started: - - .. code-block:: Bash - - ./start-volttron - -#. Be sure that the environment has been activated - you should see (volttron) next to @ in your terminal - window. To activate an environment, use the following command. - - .. code-block:: Bash - - source env/bin/activate - -#. Install a Master Driver if one is not yet installed - - .. code-block:: Bash - - python scripts/install-agent.py --agent-source services/core/MasterDriverAgent --config \ - examples/configurations/drivers/master-driver.agent --tag platform.driver - -#. Load the driver configuration into the configuration store ("vctl config list platform.driver" can be used to show - installed configurations) - - .. code-block:: Bash - - vctl config store platform.driver devices/campus/building/ecobee $VOLTTRON_ROOT/configs/ecobee.config - -#. Load the driver's registry configuration into the configuration store - - .. code-block:: Bash - - vctl config store platform.driver campus/building/ecobee.csv $VOLTTRON_ROOT/configs/ecobee.csv --csv - -#. Start the master driver - - .. code-block:: Bash - - vctl start platform.driver - -At this point, the master driver will start, configure the driver agent, and data should start to publish on the publish -interval. - -.. note:: - - If starting the driver for the first time, or if the authorization which is managed by the driver is out of date, - the driver will perform some additional setup internally to authenticate the driver with the Ecobee API. This stage - will require the user enter a pin provided in the `volttron.log` file to the Ecobee web UI. The Ecobee driver has - a wait period of 60 seconds to allow users to enter the pin code into the Ecobee UI. Instructions for pin - verification follow. - - -PIN Verification steps: ------------------------ - -#. Obtain the pin from the VOLTTRON logs. The pin is a 4 character long string in the logs flanked by 2 rows of - asterisks - - .. image:: files/ecobee_pin.png - -#. Log into the `Ecobee UI `_ . After logging in, the - customer dashboard will be brought up, which features a series of panels (where the serial number was found for - device configuration) and a "hamburger" menu. - - .. image:: files/ecobee_console.png - -#. Add the application: Click the "hamburger" icon which will display a list of items in a panel that becomes - visible on the right. Click "My Apps", then "Add application". A text form will appear, enter the pin provided in - VOLTTRON logs here, then click "validate" and "add application. - - .. image:: files/ecobee_verify_pin.png - -This will complete the pin verification step. - - -Ecobee Driver Usage -################### - -At the configured interval, the master driver will publish a JSON object -with data obtained from Ecobee based on the provided configuration files. - -To view the publishes in the `volttron.log` file, install and start a ListenerAgent: - -.. code-block:: Bash - - python scripts/install-agent.py -s examples/ListenerAgent - -The following is an example publish: - -.. code-block:: Bash - - 'Status': [''], - 'Vacations': [{'coolHoldTemp': 780, - 'coolRelativeTemp': 0, - 'drRampUpTemp': 0, - 'drRampUpTime': 3600, - 'dutyCyclePercentage': 255, - 'endDate': '2020-03-29', - 'endTime': '08:00:00', - 'fan': 'auto', - 'fanMinOnTime': 0, - 'heatHoldTemp': 660, - 'heatRelativeTemp': 0, - 'holdClimateRef': '', - 'isCoolOff': False, - 'isHeatOff': False, - 'isOccupied': False, - 'isOptional': True, - 'isTemperatureAbsolute': True, - 'isTemperatureRelative': False, - 'linkRef': '', - 'name': 'Skiing', - 'occupiedSensorActive': False, - 'running': False, - 'startDate': '2020-03-15', - 'startTime': '20:00:00', - 'type': 'vacation', - 'unoccupiedSensorActive': False, - 'vent': 'off', - 'ventilatorMinOnTime': 5}], - 'actualTemperature': 720, - 'desiredCool': 734, - 'desiredHeat': 707, - 'fanMinOnTime': 0, - 'humidity': '36', - 'hvacMode': 'off'}, - {'Programs': {'type': 'custom', 'tz': 'UTC', 'units': None}, - 'Status': {'type': 'list', 'tz': 'UTC', 'units': None}, - 'Vacations': {'type': 'custom', 'tz': 'UTC', 'units': None}, - 'actualTemperature': {'type': 'integer', 'tz': 'UTC', 'units': 'degF'}, - 'coolHoldTemp': {'type': 'integer', 'tz': 'UTC', 'units': 'degF'}, - 'desiredCool': {'type': 'integer', 'tz': 'UTC', 'units': 'degF'}, - 'desiredHeat': {'type': 'integer',S 'tz': 'UTC', 'units': 'degF'}, - 'fanMinOnTime': {'type': 'integer', 'tz': 'UTC', 'units': 'seconds'}, - 'heatHoldTemp': {'type': 'integer', 'tz': 'UTC', 'units': 'degF'}, - 'humidity': {'type': 'integer', 'tz': 'UTC', 'units': '%'}, - 'hvacMode': {'type': 'bool', 'tz': 'UTC', 'units': 'seconds'}}] - -Individual points can be obtained via JSON RPC on the VOLTTRON Platform. -In an agent: - -.. code-block:: Python - - self.vip.rpc.call("platform.driver", "get_point", , ) - -Versioning ----------- - -The Ecobee driver has been tested using the May 2019 API release as well as device firmware version 4.5.73.24 diff --git a/docs/source/core_services/drivers/driver_configuration/fake-driver.rst b/docs/source/core_services/drivers/driver_configuration/fake-driver.rst deleted file mode 100644 index 2c3a6ba907..0000000000 --- a/docs/source/core_services/drivers/driver_configuration/fake-driver.rst +++ /dev/null @@ -1,64 +0,0 @@ -.. _Fake-Driver: - -Fake Device Driver Configuration --------------------------------- -This driver does not connect to any actual device and instead produces random and or pre-configured values. - -driver_config -************* - -There are no arguments for the "driver_config" section of the device configuration file. The driver_config entry must still be present and should be left blank - -Here is an example device configuration file: - -.. code-block:: json - - { - "driver_config": {}, - "driver_type": "bacnet", - "registry_config":"config://registry_configs/vav.csv", - "interval": 5, - "timezone": "UTC", - "heart_beat_point": "heartbeat" - } - -A sample fake device configuration file can be found in the VOLTTRON repository in ``examples/configurations/drivers/fake.config`` - -Fake Device Registry Configuration File -*************************************** - -The registry configuration file is a `CSV `_ file. Each row configures a point on the device. - -The following columns are required for each row: - - - **Volttron Point Name** - The name by which the platform and agents running on the platform will refer to this point. For instance, if the Volttron Point Name is HeatCall1 (and using the example device configuration above) then an agent would use *pnnl/isb2/hvac1/HeatCall1* to refer to the point when using the RPC interface of the actuator agent. - - **Units** - Used for meta data when creating point information on the historian. - - **Writable** - Either "TRUE" or "FALSE". Determines if the point can be written to. Only points labeled TRUE can be written to through the ActuatorAgent. Points labeled "TRUE" incorrectly will cause an error to be returned when an agent attempts to write to the point. - - -The following columns are optional: - - - **Starting Value** - Initial value for the point. If the point is reverted it will change back to this value. By default, points will start with a random value (1-100). - - **Type** - Value type for the point. Defaults to "string". Valid types are: - - * string - * integer - * float - * boolean - -Any additional columns will be ignored. It is common practice to include a **Point Name** or **Reference Point Name** to include the device documentation's name for the point and **Notes** and **Unit Details** for additional information about a point. Please note that there is nothing in the driver that will enforce anything specified in the **Unit Details** column. - -.. csv-table:: BACnet - :header: Volttron Point Name,Units,Units Details,Writable,Starting Value,Type,Notes - - Heartbeat,On/Off,On/Off,TRUE,0,boolean,Point for heartbeat toggle - OutsideAirTemperature1,F,-100 to 300,FALSE,50,float,CO2 Reading 0.00-2000.0 ppm - SampleWritableFloat1,PPM,10.00 (default),TRUE,10,float,Setpoint to enable demand control ventilation - SampleLong1,Enumeration,1 through 13,FALSE,50,int,Status indicator of service switch - SampleWritableShort1,%,0.00 to 100.00 (20 default),TRUE,20,int,Minimum damper position during the standard mode - SampleBool1,On / Off,on/off,FALSE,TRUE,boolean,Status indicator of cooling stage 1 - SampleWritableBool1,On / Off,on/off,TRUE,TRUE,boolean,Status indicator - - -A sample fake registry configuration file can be found `here `_ or -in the VOLTTRON repository in ``examples/configurations/drivers/fake.csv`` diff --git a/docs/source/core_services/drivers/driver_configuration/index.rst b/docs/source/core_services/drivers/driver_configuration/index.rst deleted file mode 100644 index 3a7cda0110..0000000000 --- a/docs/source/core_services/drivers/driver_configuration/index.rst +++ /dev/null @@ -1,11 +0,0 @@ -.. _VOLTTRON-Drivers: - -================ -VOLTTRON Drivers -================ - -.. toctree:: - :glob: - :maxdepth: 2 - - * diff --git a/docs/source/core_services/drivers/driver_configuration/modbus-driver.rst b/docs/source/core_services/drivers/driver_configuration/modbus-driver.rst deleted file mode 100644 index 4d9932b290..0000000000 --- a/docs/source/core_services/drivers/driver_configuration/modbus-driver.rst +++ /dev/null @@ -1,92 +0,0 @@ -.. _MODBUS-config: - -Modbus Driver Configuration ---------------------------- -VOLTTRON's modbus driver supports the Modbus over TCP/IP protocol only. For Modbus RTU support, -see VOLTTRON's modbus-tk driver. - -Requirements ------------- -The Modbus driver requires the pymodbus package. This package can be installed in an -activated environment with: - -:: - - pip install pymodbus - -driver_config -************* - -There are three arguments for the **driver_config** section of the device configuration file: - - - **device_address** - IP Address of the device. - - **port** - Port the device is listening on. Defaults to 502 which is the standard port for MODBUS devices. - - **slave_id** - Slave ID of the device. Defaults to 0. Use 0 for no slave. - -Here is an example device configuration file: - -.. code-block:: json - - { - "driver_config": {"device_address": "10.1.1.2", - "port": 502, - "slave_id": 5}, - "driver_type": "modbus", - "registry_config":"config://registry_configs/hvac.csv", - "interval": 60, - "timezone": "UTC", - "heart_beat_point": "heartbeat" - } - -A sample MODBUS configuration file can be found in the VOLTTRON repository in ``examples/configurations/drivers/modbus.config`` - - -.. _MODBUS-Driver: - -Modbus Registry Configuration File -********************************** - -The registry configuration file is a `CSV `_ file. Each row configures a point on the device. - -The following columns are required for each row: - - - **Volttron Point Name** - The name by which the platform and agents running on the platform will refer to this point. For instance, if the Volttron Point Name is HeatCall1 (and using the example device configuration above) then an agent would use ``pnnl/isb2/hvac1/HeatCall1`` to refer to the point when using the RPC interface of the actuator agent. - - **Units** - Used for meta data when creating point information on the historian. - - **Modbus Register** - A string representing how to interpret the data register and how to read it from the device. The string takes two forms: - - + "BOOL" for coils and discrete inputs. - + A format string for the Python struct module. See http://docs.python.org/2/library/struct.html for full documentation. The supplied format string must only represent one value. See the documentation of your device to determine how to interpret the registers. Some Examples: - - * ">f" - A big endian 32-bit floating point number. - * "l" - A big endian 32-bit integer. - - - **Writable** - Either "TRUE" or "FALSE". Determines if the point can be written to. Only points labeled TRUE can be written to through the ActuatorAgent. - - **Point Address** - Modbus address of the point. Cannot include any offset value, it must be the exact value of the address. - - **Mixed Endian** - (Optional) Either "TRUE" or "FALSE". For mixed endian values. This will reverse the order of the MODBUS registers that make up this point before parsing the value or writing it out to the device. Has no effect on bit values. - -The following column is optional: - - - **Default Value** - The default value for the point. When the point is reverted by an agent it will change back to this value. If this value is missing it will revert to the last known value not set by an agent. - -Any additional columns will be ignored. It is common practice to include a **Point Name** or **Reference Point Name** to include the device documentation's name for the point and **Notes** and **Unit Details** for additional information about a point. - -The following is an example of a MODBUS registry confugration file: - -.. csv-table:: Catalyst 371 - :header: Reference Point Name,Volttron Point Name,Units,Units Details,Modbus Register,Writable,Point Address,Default Value,Notes - - CO2Sensor,ReturnAirCO2,PPM,0.00-2000.00,>f,FALSE,1001,,CO2 Reading 0.00-2000.0 ppm - CO2Stpt,ReturnAirCO2Stpt,PPM,1000.00 (default),>f,TRUE,1011,1000,Setpoint to enable demand control ventilation - Cool1Spd,CoolSupplyFanSpeed1,%,0.00 to 100.00 (75 default),>f,TRUE,1005,75,Fan speed on cool 1 call - Cool2Spd,CoolSupplyFanSpeed2,%,0.00 to 100.00 (90 default),>f,TRUE,1007,90,Fan speed on Cool2 Call - Damper,DamperSignal,%,0.00 - 100.00,>f,FALSE,1023,,Output to the economizer damper - DaTemp,DischargeAirTemperature,F,(-)39.99 to 248.00,>f,FALSE,1009,,Discharge air reading - ESMEconMin,ESMDamperMinPosition,%,0.00 to 100.00 (5 default),>f,TRUE,1013,5,Minimum damper position during the energy savings mode - FanPower,SupplyFanPower, kW,0.00 to 100.00,>f,FALSE,1015,,Fan power from drive - FanSpeed,SupplyFanSpeed,%,0.00 to 100.00,>f,FALSE,1003,,Fan speed from drive - HeatCall1,HeatCall1,On / Off,on/off,BOOL,FALSE,1113,,Status indicator of heating stage 1 need - HeartBeat,heartbeat,On / Off,on/off,BOOL,FALSE,1114,,Status indicator of heating stage 2 need - -A sample MODBUS registry file can be found `here `_ or -in the VOLTTRON repository in ``examples/configurations/drivers/catalyst371.csv`` diff --git a/docs/source/core_services/drivers/driver_configuration/rainforest-driver.rst b/docs/source/core_services/drivers/driver_configuration/rainforest-driver.rst deleted file mode 100644 index 8d8cce3a73..0000000000 --- a/docs/source/core_services/drivers/driver_configuration/rainforest-driver.rst +++ /dev/null @@ -1,49 +0,0 @@ -.. _Rainforest-Driver: - -Rainforest Emu2 Driver Configuration ------------------------------------- - -The Emu2 is a device for connecting to and reading data from smart power meters. -We have an experimental driver to talk to this device. It requires cloning the -Rainforest Automation library which can be found -`here `_. - -.. note:: - - The Emu Serial Api library has its own dependencies which should be installed - with pip while the VOLTTRON environment is activated. - - :: - - pip install -r requirements.txt - -The Emu2 device interface is configured as follows. Set `emu_library_path` -to the location of the cloned library. `tty` should be set to the name of the -Emu2's character special file. One way to find this is to run `dmesg` before -and after plugging in the Emu2, and checking the new output. - -.. code-block:: json - - { - "driver_config": { - "tty": "ttyACM0", - "emu_library_path": "/home/volttron/Emu-Serial-Api" - }, - "driver_type": "rainforestemu2", - "interval": 30, - "registry_config": "config://emu2.json", - "timezone": "UTC" - } - -The registry config file referred to in the first configuration must be an array -of strings. This tells the interface which data points should be retrieved from -the device every interval. If the NetworkInfo point is omitted it will be -included automatically. - -.. code-block:: json - - [ - "NetworkInfo", - "InstantaneousDemand", - "PriceCluster" - ] diff --git a/docs/source/core_services/drivers/index.rst b/docs/source/core_services/drivers/index.rst deleted file mode 100644 index 8425b97d03..0000000000 --- a/docs/source/core_services/drivers/index.rst +++ /dev/null @@ -1,19 +0,0 @@ -.. _VOLTTRON-Driver-Framework: - -========================= -VOLTTRON Driver Framework -========================= - -All Voltton drivers are implemented through the Master Driver Agent and are technically sub-agents -running in the same process as the Master Driver Agent. Each of these driver sub-agents is responsible -for creating an interface to a single device. Creating that interface is facilitated by an instance of -an interface class. There are a variety of interface classes included. The most commonly used interfaces -are BACnet and Modbus. - -.. toctree:: - :glob: - :maxdepth: 2 - - * - - driver_configuration/index diff --git a/docs/source/core_services/drivers/master_driver_override.rst b/docs/source/core_services/drivers/master_driver_override.rst deleted file mode 100644 index 04f16975d4..0000000000 --- a/docs/source/core_services/drivers/master_driver_override.rst +++ /dev/null @@ -1,106 +0,0 @@ -.. _Master_Driver_Override: - -====================== -Master Driver Override -====================== - -By default, every user is allowed write access to the devices by the master driver. The override -feature will allow the user (for example, building administrator) to override this default -behavior and enable the user to lock the write access on the devices for a specified duration of -time or indefinitely. - -Set Override On ---------------- - -The Master Driver's "set_override_on" RPC method can be used to set the override condition for -all drivers with topic matching the provided pattern. This can be specific devices, groups of -devices, or even all configured devices. The pattern matching is based on bash style filename -matching semantics. - -Parameters: - - pattern: - Override pattern to be applied. For example, - If pattern is campus/building1/* - Override condition is applied for all the - devices under campus/building1/. - If pattern is campus/building1/ahu1 - Override condition is applied for only - campus/building1/ahu1. The pattern matching is based on bash style filename - matching semantics. - duration: - Time duration for the override in seconds. If duration <= 0.0, it implies as - indefinite duration. - failsafe_revert: - Flag to indicate if all the devices falling under the override condition has to be set - to its default state/value immediately. - staggered_revert: - If this flag is set, reverting of devices will be staggered. - -Example "set_override_on" RPC call: - -:: - - self.vip.rpc.call(PLATFORM_DRIVER, "set_override_on", , ) - -Set Override Off ----------------- - -The override condition can also be toggled off based on a provided pattern using the Master -Driver's "set_override_off" RPC call. - -Parameters: - - pattern: - Override pattern to be applied. For example, - If pattern is campus/building1/* - Override condition is applied for all the - devices under campus/building1/. - If pattern is campus/building1/ahu1 - Override condition is applied for only - campus/building1/ahu1. The pattern matching is based on bash style filename - matching semantics. - -Example "set_override_off" RPC call: - -:: - - self.vip.rpc.call(PLATFORM_DRIVER, "set_override_off", ) - -Get Override Devices --------------------- - -A list of all overridden devices can be obtained with the Master Driver's "get_override_devices" -RPC call. - -This method call has no additional parameters - -Example "get_override_devices" RPC call: - -:: - - self.vip.rpc.call(PLATFORM_DRIVER, "get_override_devices") - -Get Override Patterns ---------------------- - -A list of all patterns which have been requested for override can be obtained with the Master -Driver's "get_override_patterns" RPC call. - -This method call has no additional parameters - -Example "get_override_patterns" RPC call: - -:: - - self.vip.rpc.call(PLATFORM_DRIVER, "get_override_patterns") - -Clear Overrides ---------------- - -All overrides set by RPC calls described above can be toggled off at using a single -"clear_overrides" RPC call. - -This method call has no additional parameters - -Example "clear_overrides" RPC call: - -:: - - self.vip.rpc.call(PLATFORM_DRIVER, "clear_overrides") diff --git a/docs/source/core_services/historians/Crate-Historian.rst b/docs/source/core_services/historians/Crate-Historian.rst deleted file mode 100644 index 8ab0fa439b..0000000000 --- a/docs/source/core_services/historians/Crate-Historian.rst +++ /dev/null @@ -1,81 +0,0 @@ -.. _Crate-Historian: - -=============== -Crate Historian -=============== - -Crate is an open source SQL database designed on top of a No-SQL design. It -allows automatic data replication and self-healing clusters for high -availability, automatic sharding, and fast joins, aggregations and sub-selects. - -Find out more about crate from ``_. - - -Prerequisites -~~~~~~~~~~~~~ - -1. Crate Database ------------------ - -For Arch Linux, Debian, RedHat Enterprise Linux and Ubuntu distributions there -is a dead simple installer to get crate up and running on your system. - - :: - - sudo bash -c "$(curl -L https://try.crate.io)" - -This command will download and install all of the requirements for running -crate, create a crate user and install a crate service. After the installation -the service will be available for viewing at http://localhost:4200 by default. - -.. note:: There is no authentication support within crate. - - -2. Crate Driver ---------------- - -There is a python library for crate that must be installed in the volttron -python environment in order to access crate. From an activated environment, -in the root of the volttron folder, execute the following command: - - :: - - python bootstrap.py --crate - - -or - - :: - - python bootstrap.py --crate - - -or - - :: - - pip install crate - - -Configuration -~~~~~~~~~~~~~ - -Because there is no authorization to access a crate database the configuration -for the CrateHistorian is very easy. - -.. code-block:: python - - { - "connection": { - "type": "crate", - # Optional table prefix defaults to historian - "schema": "testing", - "params": { - "host": "localhost:4200" - } - } - } - -Finally package, install and start the CrateHistorian agent. - -.. seealso:: :ref:`Agent Development Walkthrough ` diff --git a/docs/source/core_services/historians/DataMover-Historian.rst b/docs/source/core_services/historians/DataMover-Historian.rst deleted file mode 100644 index f418b5aa9c..0000000000 --- a/docs/source/core_services/historians/DataMover-Historian.rst +++ /dev/null @@ -1,60 +0,0 @@ -.. _DataMover: - -DataMover Historian -=================== - -DataMover sends data from its platform to a remote platform in cases where -there is not sufficient resources to store locally. It shares this -functionality with the :ref:`Forward Historian `, -However DataMover does not have a goal of data appearing "live" on the -remote platform. This allows DataMover to be more efficient by both batching -data and by sending an RPC call to a remote historian instead of publishing -data on the remote message bus. This allows allows DataMover to be more -robust by ensuring that the receiving historian is running. If the target is -unreachable, DataMover will cache data until it is available. - -Configuration -------------- - -The default configuration file is -*services/core/DataMover/config* . Change **destination-vip** to -point towards the foreign Volttron instance. - -:: - - { - "destination-vip": "ipc://@/home/volttron/.volttron/run/vip.socket", - "destination-serverkey": null, - "required_target_agents": [], - "custom_topic_list": [], - "services_topic_list": [ - "devices", "analysis", "record", "datalogger", "actuators" - ], - "topic_replace_list": [ - #{"from": "FromString", "to": "ToString"} - ] - } - - -The **services_topic_list** allows you to specify which of the main data topics -to forward. If there is no entry, the historian defaults to sending all. - -**topic_replace_list** allows you to replace portions of topics if needed. This -could be used to correct or standardize topics or to replace building/device -names with an anonymized version. The receiving platform will only see the -replaced values. - - - -Adding the configuration option below will limit the backup cache -to *n* gigabytes. This will keep your hard drive from filling up if -the agent is disconnected from its target for a long time. - -:: - - "backup_storage_limit_gb": n - -See Also -~~~~~~~~ - -:ref:`Historians ` diff --git a/docs/source/core_services/historians/Forward-Historian.rst b/docs/source/core_services/historians/Forward-Historian.rst deleted file mode 100644 index a9a42bd7b4..0000000000 --- a/docs/source/core_services/historians/Forward-Historian.rst +++ /dev/null @@ -1,61 +0,0 @@ -.. _Forward-Historian: - -Forward Historian -================= - -The primary use case for the ForwardHistorian is to send data to another -instance of VOLTTRON as if the data were live. This allows agents running on a -more secure and/or more powerful machine to run analysis on data being -collected on a potentially less secure/powerful board. - -Given this use case, it is not optimized for batching large amounts of data -when liveness is not needed. For this use case, please see the -:ref:`DataMover Historian `. - -The forward historian can be found in the services/core directory. - -Configuration -------------- - -The default configuration file is -*services/core/ForwardHistorian/config* . Change **destination-vip** to -point towards the foreign Volttron instance. - -:: - - { - "agentid": "forwarder", - "destination-vip": "ipc://@/home/volttron/.volttron/run/vip.socket" - } - -In order to send to a remote platform, you will need its VIP address and server -key. The server key can be found by running - -:: - - vctl auth serverkey - -Put the result into the following example -(Note the example uses a local IP address) - -:: - - { - "agentid": "forwarder", - "destination-vip": "tcp://127.0.0.1:22916", - "destination-serverkey": "" - } - - -Adding the configuration option below will limit the backup cache -to *n* gigabytes. This will keep your hard drive from filling up if -the agent is disconnected from its target for a long time. - -:: - - "backup_storage_limit_gb": n - -See Also -~~~~~~~~ - -:ref:`Historians ` diff --git a/docs/source/core_services/historians/Historian-Topic-Syntax.rst b/docs/source/core_services/historians/Historian-Topic-Syntax.rst deleted file mode 100644 index 1f4467ae66..0000000000 --- a/docs/source/core_services/historians/Historian-Topic-Syntax.rst +++ /dev/null @@ -1,110 +0,0 @@ -.. _Historian-Topic-Syntax: - -Historian Topic Syntax -====================== - -Each historian will subscribe to the following message bus topics -(datalogger/*, anaylsis/*, record/\* and devices/\*). For each of these -topics there is a different message syntax that must be adhered to in -order for the correct interpretation of the data being specified. - -record/\* ---------- -The record topic is the most flexible of all of the topics. This topic allows -any serializable message to be published to any topic under the root topic -'/record'. - -**Note: this topic is not recommended to plot, as the structure of the -messages are not necessarily numeric.** - -:: - - # Example messages that can be published - - # Dictionary data - {'foo': 'world'} - - # Numerical data - 52 - - # Time data (note not a datatime object) - '2015-12-02T11:06:32.252626' - -devices/\* ----------- - -The devices topic is meant to be data structured from a scraping of a -ModBus or BacNet device. Currently drivers for both of these -protocols write data to the message bus in the proper format. VOLTTRON -drivers also publish an aggregation of points in an "all" topic. **Only the -"all" topic messages are read and published to a historian.** -Both the all topic and point topic have the same header information, b -ut the message body for each is slightly different. -For a complete working example of these messages please see - -- :py:mod:`examples.ExampleSubscriber.subscriber.subscriber_agent` - -Format of header and message for device topics (i.e. messages published to -topics with pattern "devices/\*/all"): - -:: - - # Header contains the data associated with the message. - { - # python code to get this is - # from datetime import datetime - # from volttron.platform.messaging import headers as header_mod - # from volttron.platform.agent import utils - # now = utils.format_timestamp( datetime.utcnow()) - # { - # headers_mod.DATE: now, - # headers_mod.TIMESTAMP: now - # } - "Date": "2015-11-17 21:24:10.189393+00:00", - "TimeStamp": "2015-11-17 21:24:10.189393+00:00" - } - - # Message Format: - - # WITH METADATA - # Messages contains a two element list. The first element contains a - # dictionary of all points under a specific parent. While the second - # element contains a dictionary of meta data for each of the specified - # points. For example devices/pnnl/building/OutsideAirTemperature and - # devices/pnnl/building/MixedAirTemperature ALL message would be created as: - [ - {"OutsideAirTemperature ": 52.5, "MixedAirTemperature ": 58.5}, - { - "OutsideAirTemperature ": {'units': 'F', 'tz': 'UTC', 'type': 'float'}, - "MixedAirTemperature ": {'units': 'F', 'tz': 'UTC', 'type': 'float'} - } - ] - - #WITHOUT METADATA - # Message contains a dictionary of all points under a specific parent - {"OutsideAirTemperature ": 52.5, "MixedAirTemperature ": 58.5} - -analysis/\* ------------ - -Data sent to analysis/* topics is result of analysis done by applications. -The format of data sent to analysis/* topics is similar to data sent to -devices/\*/all topics. - -datalogger/\* -------------- -Messages published to datalogger will be assumed to be time point data that -is composed of units and specific types with the assumption that they have -the ability to be graphed easily. - -:: - - {'MixedAirTemperature': {'Readings': ['2015-12-02T00:00:00', - mixed_reading], - 'Units': 'F', - 'tz': 'UTC', - 'data_type': 'float'}} - -If no datetime value is specified as a part of the reading, current time is -used. Message can be published without any header. In the above message -'Readings' and 'Units' are mandatory diff --git a/docs/source/core_services/historians/Influxdb-Historian.rst b/docs/source/core_services/historians/Influxdb-Historian.rst deleted file mode 100644 index 4161f5d1f2..0000000000 --- a/docs/source/core_services/historians/Influxdb-Historian.rst +++ /dev/null @@ -1,199 +0,0 @@ -.. _Influxdb-Historian: - -################## -Influxdb Historian -################## - -InfluxDB is an open source time series database with a fast, scalable engine and high availability. -It's often used to build DevOps Monitoring (Infrastructure Monitoring, Application Monitoring, -Cloud Monitoring), IoT Monitoring, and Real-Time Analytics solutions. - -More information about InfluxDB is available from ``_. - - -Prerequisites -############# - -InfluxDB Installation -===================== - -To install InfluxDB on an Ubuntu or Debian operating system, run the script: - - :: - - services/core/InfluxdbHistorian/scripts/install-influx.sh - -For installation on other operating systems, -see ``_. - -Authentication in InfluxDB -========================== - -By default, the InfluxDB *Authentication* option is disabled, and no user authentication is -required to access any InfluxDB database. You can enable authentication by updating the -InfluxDB configuration file. For detailed information on enabling authentication, see: -``_. - -If *Authentication* is enabled, authorization privileges are enforced. There must be at least -one defined admin user with access to administrative queries as outlined in the linked document -above. Additionally, you must pre-create the ``user`` and ``database`` that are specified in the -configuration file (the default configuration file for InfluxDB -is ``services/core/InfluxdbHistorian/config``). -If your ``user`` is a non-admin user, they must be granted a full set of privileges on the -desired ``database``. - -InfluxDB Driver -=============== - -In order to connect to an InfluxDb client, the Python library for InfluxDB must be installed -in VOLTTRON's virtual environment. From the command line, after enabling the virtual environment, -install the InfluxDB library as follows: - - :: - - python bootstrap.py --influxdb - - -or - - :: - - python bootstrap.py --databases - - -or - - :: - - pip install influxdb - -Configuration -############# - -The default configuration file for VOLTTRON's InfluxDBHistorian agent should be in the format: - -.. code-block:: python - - { - "connection": { - "params": { - "host": "localhost", - "port": 8086, # Don't change this unless default bind port - # in influxdb config is changed - "database": "historian", - "user": "historian", # user is optional if authentication is turned off - "passwd": "historian" # passwd is optional if authentication is turned off - } - }, - "aggregations": { - "use_calendar_time_periods": true - } - } - - -The InfluxDBHistorian agent can be packaged, installed and started according to the standard -VOLTTRON agent creation procedure. A sample VOLTTRON configuration file has been -provided: ``services/core/InfluxdbHistorian/config``. - -.. seealso:: :ref:`Agent Development Walkthrough ` - -Connection -========== - -The ``host``, ``database``, ``user`` and ``passwd`` values in the VOLTTRON configuration file -can be modified. ``user`` and ``passwd`` are optional if InfluxDB *Authentication* is disabled. - -.. note:: Be sure to initialize or pre-create the ``database`` and ``user`` that you defined in - the configuration file, and if ``user`` is a non-admin user, be make sure to grant - privileges for the user on the specified ``database``. - For more information, see `Authentication in InfluxDB`_. - -Aggregations -============ - -In order to use aggregations, the VOLTTRON configuration file must also specify a value, -either ``true`` or ``false``, for ``use_calendar_time_periods``, indicating whether the -aggregation period should align to calendar time periods. If this value is omitted from the -configuration file, aggregations cannot be used. - -For more information on historian aggregations, -see: :ref:`Aggregate Historian Agent Specification `. - -Supported Influxdb aggregation functions: - - Aggregations: COUNT(), DISTINCT(), INTEGRAL(), MEAN(), MEDIAN(), MODE(), SPREAD(), STDDEV(), SUM() - - Selectors: FIRST(), LAST(), MAX(), MIN() - - Transformations: CEILING(),CUMULATIVE_SUM(), DERIVATIVE(), DIFFERENCE(), ELAPSED(), NON_NEGATIVE_DERIVATIVE(), NON_NEGATIVE_DIFFERENCE() - -More information how to use those functions: ``_ - -.. note:: Historian aggregations in InfluxDB are different from aggregations employed - by other historian agents in VOLTTRON. InfluxDB doesn't have a separate agent for aggregations. - Instead, aggregation is supported through the ``query_historian`` function. Other agents can - execute an aggregation query directly in InfluxDB by calling the *RPC.export* method ``query``. - For an example, see :ref:`Aggregate Historian Agent Specification ` - -Database Schema -############### - -Each InfluxDB database has a ``meta`` table as well as other tables for different measurements, -e.g. one table for "power_kw", one table for "energy", one table for "voltage", etc. -(An InfluxDB ``measurement`` is similar to a relational table, so for easier understanding, InfluxDB -measurements will be referred to below as tables.) - -Measurement Table -================= - -Example: If a topic name is *"CampusA/Building1/Device1/Power_KW"*, the ``power_kw`` table might look as follows: - -+-------------------------------+-----------+---------+----------+-------+------+ -|time |building |campus |device |source |value | -+-------------------------------+-----------+---------+----------+-------+------+ -|2017-12-28T20:41:00.004260096Z |building1 |campusa |device1 |scrape |123.4 | -+-------------------------------+-----------+---------+----------+-------+------+ -|2017-12-30T01:05:00.004435616Z |building1 |campusa |device1 |scrape |567.8 | -+-------------------------------+-----------+---------+----------+-------+------+ -|2018-01-15T18:08:00.126345Z |building1 |campusa |device1 |scrape |10 | -+-------------------------------+-----------+---------+----------+-------+------+ - -``building``, ``campus``, ``device``, and ``source`` are InfluxDB *tags*. ``value`` is an InfluxDB *field*. - -.. note:: The topic is converted to all lowercase before being stored in the table. - In other words, a set of *tag* names, as well as a table name, are created by - splitting ``topic_id`` into substrings (see `meta table`_ below). - - -So in this example, where the typical format of a topic name is ``///``, -``campus``, ``building`` and ``device`` are each stored as tags in the database. - -A topic name might not confirm to that convention: - - #. The topic name might contain additional substrings, e.g. - *CampusA/Building1/LAB/Device/OutsideAirTemperature*. In this case, - ``campus`` will be *campusa/building*, ``building`` will be *lab*, and ``device`` will be *device*. - - #. The topic name might contain fewer substrings, e.g. *LAB/Device/OutsideAirTemperature*. - In this case, the ``campus`` tag will be empty, ``building`` will be *lab*, - and ``device`` will be *device*. - -Meta Table -========== - -The meta table will be structured as in the following example: - -+---------------------+---------------------------------+------------------------------------------------------------------+-------------------------------------+--------------------------------------+ -|time |last_updated |meta_dict |topic |topic_id | -+---------------------+---------------------------------+------------------------------------------------------------------+-------------------------------------+--------------------------------------+ -|1970-01-01T00:00:00Z |2017-12-28T20:47:00.003051+00:00 |{u'units': u'kw', u'tz': u'US/Pacific', u'type': u'float'} |CampusA/Building1/Device1/Power_KW |campusa/building1/device1/power_kw | -+---------------------+---------------------------------+------------------------------------------------------------------+-------------------------------------+--------------------------------------+ -|1970-01-01T00:00:00Z |2017-12-28T20:47:00.003051+00:00 |{u'units': u'kwh', u'tz': u'US/Pacific', u'type': u'float'} |CampusA/Building1/Device1/Energy_KWH |campusa/building1/device1/energy_kwh | -+---------------------+---------------------------------+------------------------------------------------------------------+-------------------------------------+--------------------------------------+ - -In the InfluxDB, ``last_updated``, ``meta_dict`` and ``topic`` are *fields* and ``topic_id`` is a *tag*. - -Since InfluxDB is a time series database, the ``time`` column is required, and a dummy value (``time=0``, -which is 1970-01-01T00:00:00Z based on epoch unix time) is assigned to all topics for easier -metadata updating. Hence, if the contents of ``meta_dict`` change for a specific topic, both ``last_updated`` -and ``meta_dict`` values for that topic will be replaced in the table. diff --git a/docs/source/core_services/historians/Mongo-Historian.rst b/docs/source/core_services/historians/Mongo-Historian.rst deleted file mode 100644 index cc96faa329..0000000000 --- a/docs/source/core_services/historians/Mongo-Historian.rst +++ /dev/null @@ -1,93 +0,0 @@ -.. _Mongo-Historian: - -=============== -Mongo Historian -=============== - -Prerequisites -~~~~~~~~~~~~~ - -1. Mongodb ----------- - -Setup mongodb based on using one of the three below scripts. - -1. Install as root on Redhat or Cent OS - - :: - - sudo scripts/historian-scripts/root_install_mongo_rhel.sh - - The above script will prompt user for os version, db user name, password and database name - Once installed you can start and stop the service using the command: - - **sudo service mongod [start|stop|service]** - -2. Install as root on Ubuntu - - :: - - sudo scripts/historian-scripts/root_install_mongo_ubuntu.sh - - The above script will prompt user for os version, db user name, password and database name - Once installed you can start and stop the service using the command: - - **sudo service mongod [start|stop|service]** - -3. Install as non root user on any Linux machine - - :: - - scripts/historian-scripts/install_mongodb.sh - - Usage: - install_mongodb.sh [-h] [-d download_url] [-i install_dir] [-c config_file] [-s] - Optional arguments: - -s setup admin user and test collection after install and startup - - -d download url. defaults to https://fastdl.mongodb.org/linux/mongodb-linux-x86_64-3.2.4.tgz - - -i install_dir. defaults to current_dir/mongo_install - - -c config file to be used for mongodb startup. Defaults to - default_mongodb.conf in the same directory as this script.Any datapath - mentioned in the config file should already exist and should have write - access to the current user - - -h print this help message - -2. Mongodb connector --------------------- -This historian requires a mongodb connector installed in your activated -volttron environment to talk to mongodb. Please execute the following -from an activated shell in order to install it. - - :: - - python bootstrap.py --mongo - - -or - - :: - - python bootstrap.py --databases - - -or - - :: - - pip install pymongo - - -3. Configuration Options ------------------------- -The historian configuration file can specify - -:: - - "history_limit_days": - -which will remove entries from the data and rollup collections older than n -days. Timestamps passed to the manage_db_size method are truncated to the day. diff --git a/docs/source/core_services/historians/Platform-Historian.rst b/docs/source/core_services/historians/Platform-Historian.rst deleted file mode 100644 index ff5003e978..0000000000 --- a/docs/source/core_services/historians/Platform-Historian.rst +++ /dev/null @@ -1,30 +0,0 @@ -.. _Platform-Historian: - -Platform Historian -================== - -A platform historian is a `"friendly named" `__ -historian on a VOLTTRON instance. It always has the identity (see -`vip `__) of platform.historian. A platform -historian is made available to a volttron central agent for monitoring -of the VOLTTRON instances health and plotting topics from the platform -historian. In order for one of the (historians)[Historians] to be turned -into a platform historian the identity keyword must be added to it's -configuration with the value of platform.historian. The following -configuration file shows a sqlite based platform historian -configuration. - -:: - - { - "agentid": "sqlhistorian-sqlite", - "identity": "platform.historian", - "connection": { - "type": "sqlite", - "params": { - "database": "~/.volttron/data/platform.historian.sqlite" - } - } - } - - diff --git a/docs/source/core_services/historians/SQL-Historian.rst b/docs/source/core_services/historians/SQL-Historian.rst deleted file mode 100644 index f4566d8478..0000000000 --- a/docs/source/core_services/historians/SQL-Historian.rst +++ /dev/null @@ -1,218 +0,0 @@ -.. _SQL-Historian: - -SQL Historian -============= - -An SQL Historian is available as a core service. The sql historian has -been programmed to allow for inconsistent network connectivity -(automatic re-connection to tcp based databases). All additions to the -historian are batched and wrapped within a transaction with commit and -rollback functions properly implemented. This allows the maximum -throughput of data with the most protection. The following example -configurations show the different options available for configuring the -SQL Historian Agent. - -MySQL Specifics -~~~~~~~~~~~~~~~ - -MySQL requires a third party driver (mysql-connector) to be installed in -order for it to work. Please execute the following from an activated -shell in order to install it. - -:: - - pip install --allow-external mysql-connector-python mysql-connector-python - -or - -:: - - python bootstrap.py --mysql - -or - -:: - - python bootstrap.py --databases - -| In addition, the mysql database must be created and permissions - granted for select, insert and update before the agent is started. In - order to support timestamp with microseconds you need at least MySql - 5.6.4. Please see this `MySql - documentation `__ - for more details -| The following is a minimal configuration file for using a MySQL based - historian. Other options are available and are documented - http://dev.mysql.com/doc/connector-python/en/connector-python-connectargs.html. - **Not all parameters have been tested, use at your own risk**. - -:: - - { - "agentid": "sqlhistorian-mysql", - "connection": { - "type": "mysql", - "params": { - "host": "localhost", - "port": 3306, - "database": "volttron", - "user": "user", - "passwd": "pass" - } - } - } - -Sqlite3 Specifics -~~~~~~~~~~~~~~~~~ - -An Sqlite Historian provides a convenient solution for under powered -systems. The database is parameter is a location on the file system. By -default it is relative to the agents installation directory, however it -will respect a rooted or relative path to the database. - -:: - - { - "agentid": "sqlhistorian-sqlite", - "connection": { - "type": "sqlite", - "params": { - "database": "data/historian.sqlite" - } - } - } - - -PostgreSQL and Redshift -~~~~~~~~~~~~~~~~~~~~~~~ - -Installation notes ------------------- - -1. The PostgreSQL database driver supports recent PostgreSQL versions. - It was tested on 10.x, but should work with 9.x and 11.x. - -2. The user must have SELECT, INSERT, and UPDATE privileges on historian - tables. - -3. The tables in the database are created as part of the execution of - the SQLHistorianAgent, but this will fail if the database user does not - have CREATE privileges. - -4. Care must be exercised when using multiple historians with the same - database. This configuration may be used only if there is no overlap in - the topics handled by each instance. Otherwise, duplicate topic IDs - may be created, producing strange results. - -5. Redshift databases do not support unique constraints. Therefore, it is - possible that tables may contain some duplicate data. The Redshift driver - handles this by using distinct queries. It does not remove duplicates - from the tables. - -Dependencies ------------- - -The PostgreSQL and Redshift database drivers require the **psycopg2** Python package. - - From an activated shell execute: - - :: - - pip install psycopg2-binary - -Configuration -------------- - -The following are minimal configuration files for using a psycopg2-based -historian. Other options are available and are documented -http://initd.org/psycopg/docs/module.html -**Not all parameters have been tested, use at your own risk**. - -Local PostgreSQL Database -+++++++++++++++++++++++++ - -The following snippet demonstrates how to configure the -SQLHistorianAgent to use a PostgreSQL database on the local system -that is configured to use Unix domain sockets. The user executing -volttron must have appropriate privileges. - -:: - - { - "connection": { - "type": "postgresql", - "params": { - "dbname": "volttron" - } - } - } - -Remote PostgreSQL Database -++++++++++++++++++++++++++ - -The following snippet demonstrates how to configure the -SQLHistorianAgent to use a remote PostgreSQL database. - -:: - - { - "connection": { - "type": "postgresql", - "params": { - "dbname": "volttron", - "host": "historian.example.com", - "port": 5432, - "user": "volttron", - "password": "secret" - } - } - } - -TimescaleDB Support -+++++++++++++++++++ - -Both of the above PostgreSQL connection types can make -use of TimescaleDB's high performance Hypertable backend -for the primary timeseries table. The agent assumes you -have completed the TimescaleDB installation and setup -the database by following the instructions here: -https://docs.timescale.com/latest/getting-started/setup -To use, simply add 'timescale_dialect: true' to the -connection params in the Agent Config as below - -:: - - { - "connection": { - "type": "postgresql", - "params": { - "dbname": "volttron", - "host": "historian.example.com", - "port": 5432, - "user": "volttron", - "password": "secret", - "timescale_dialect": true - } - } - } - -Redshift Database -+++++++++++++++++ - -The following snippet demonstrates how to configure the -SQLHistorianAgent to use a Redshift database. - -:: - - { - "connection": { - "type": "redshift", - "params": { - "dbname": "volttron", - "host": "historian.example.com", - "port": 5432, - "user": "volttron", - "password": "secret" - } - } - } diff --git a/docs/source/core_services/historians/index.rst b/docs/source/core_services/historians/index.rst deleted file mode 100644 index 85d18d95da..0000000000 --- a/docs/source/core_services/historians/index.rst +++ /dev/null @@ -1,155 +0,0 @@ -.. _Historian Index: - -============================ -VOLTTRON Historian Framework -============================ - -Historian Agents are the way by which device, actuator, datalogger, and -analysis are captured and stored in some sort of data store. Historians exist for the following storage options: - -- A general :ref:`SQL Historian ` implemented for MySQL, SQLite, PostgreSQL, and Amazon Redshift -- :ref:`MongoDB Historian ` -- :ref:`Crate Historian ` -- :ref:`Forward Historian ` for sending data to another VOLTTRON instance -- :ref:`OpenEIS Historian ` -- :ref:`MQTT Historian ` Forwards data to an MQTT broker -- :ref:`InfluxDB Historian ` - -Other implementations of historians can be created by following the -:ref:`developing historian agents ` section of -the wiki. - -Historians are all built upon the BaseHistorian which provides general -functionality the specific implementations is built upon. - -In most cases the default settings are fine for all deployments. - -All historians support the following settings: - -.. code-block:: python - - { - # Maximum amount of time to wait before retrying a failed publish in seconds. - # Will try more frequently if new data arrives before this timelime expires. - # Defaults to 300 - "retry_period": 300.0, - - # Maximum number of records to submit to the historian at a time. - # Defaults to 1000 - "submit_size_limit": 1000, - - # In the case where a historian needs to catch up after a disconnect - # the maximum amount of time to spend writing to the database before - # checking for and caching new data. - # Defaults to 30 - "max_time_publishing": 30.0, - - # Limit how far back the historian will keep data in days. - # Partial days supported via floating point numbers. - # A historian must implement this feature for it to be enforced. - "history_limit_days": 366, - - # Limit the size of the historian data store in gigabytes. - # A historian must implement this feature for it to be enforced. - "storage_limit_gb": 2.5 - - # Size limit of the backup cache in Gigabytes. - # Defaults to no limit. - "backup_storage_limit_gb": 8.0, - - # Do not actually gather any data. Historian is query only. - "readonly": false, - - # capture_device_data - # Defaults to true. Capture data published on the `devices/` topic. - "capture_device_data": true, - - # capture_analysis_data - # Defaults to true. Capture data published on the `analysis/` topic. - "capture_analysis_data": true, - - # capture_log_data - # Defaults to true. Capture data published on the `datalogger/` topic. - "capture_log_data": true, - - # capture_record_data - # Defaults to true. Capture data published on the `record/` topic. - "capture_record_data": true, - - # Replace a one topic with another before saving to the database. - # Deprecated in favor of retrieving the list of - # replacements from the VCP on the current instance. - "topic_replace_list": [ - #{"from": "FromString", "to": "ToString"} - ], - - # For historian developers. Adds benchmarking information to gathered data. - # Defaults to false and should be left that way. - "gather_timing_data": false - - # Allow for the custom topics or for limiting topics picked up by a historian instance. - # the key for each entry in custom topics is the data handler. The topic and data must - # conform to the syntax the handler expects (e.g., the capture_device_data handler expects - # data the driver framework). Handlers that expect specific data format are - # capture_device_data, capture_log_data, and capture_analysis_data. All other handlers will be - # treated as record data. The list associated with the handler is a list of custom - # topics to be associated with that handler. - # - # To restrict collection to only the custom topics, set the following config variables to False - # capture_device_data - # capture_analysis_data - # capture_log_data - # capture_record_data - "custom_topics": { - "capture_device_data": ["devices/campus/building/device/all"], - "capture_analysis_data": ["analysis/application_data/example"], - "capture_record_data": ["example"] - }, - # To restrict the points processed by a historian for a device or set of devices (i.e., this configuration - # parameter only filters data on topics with base 'devices). If the 'device' is in the - # topic (e.g.,'devices/campus/building/device/all') then only points in the list will be passed to the - # historians capture_data method, and processed by the historian for storage in its database (or forwarded to a - # remote platform (in the case of the ForwardHistorian). The key in the device_data_filter dictionary can - # be made more restrictive (e.g., "device/subdevice") to limit unnecessary searches through topics that may not - # contain the point(s) of interest. - "device_data_filter": - { - "device": ["point_name1", "point_name2"] - } - } - -By default the base historian will listen to 4 separate root topics -`datalogger/*`, `record/*`, `analysis/*`, and `devices/*`. - -Each root -topic has a :ref:`specific message syntax ` that -it is expecting for incoming data. - -Messages published to `datalogger` -will be assumed to be timepoint data that is composed of units and -specific types with the assumption that they have the ability to be -graphed easily. - -Messages published to `devices` are data that comes -directly from drivers. - -Messages published to `analysis` are analysis data published by agents -in the form of key value pairs. - -Finally, messages that are published to `record` -will be handled as string data and can be customized to the user -specific situation. - -Please consult the :ref:`Historian Topic -Syntax ` page for a specific syntax. - -This base historian will cache all received messages to a local -database before publishing it to the historian. This allows recovery from -unexpected happenings before the successful writing of data to the historian. - - -.. toctree:: - :glob: - :maxdepth: 2 - - * diff --git a/docs/source/core_services/index.rst b/docs/source/core_services/index.rst deleted file mode 100644 index e45d074a83..0000000000 --- a/docs/source/core_services/index.rst +++ /dev/null @@ -1,27 +0,0 @@ -.. _core-services: - -============= -Core Services -============= - -Platform services provide underlying functionality used by applications to perform their tasks. - -.. toctree:: - :glob: - :maxdepth: 2 - - service_agents/index - control/index - historians/index - drivers/index - messagebus/index - security/index - restricted/index - config_store/index - multiplatform/index - messagebus_refactor/index - openadr/index - - * - - diff --git a/docs/source/core_services/messagebus/RPC.rst b/docs/source/core_services/messagebus/RPC.rst deleted file mode 100644 index a34b74e80f..0000000000 --- a/docs/source/core_services/messagebus/RPC.rst +++ /dev/null @@ -1,121 +0,0 @@ -.. _Remote-Procedure-Calls: - -Remote Procedure Calls -====================== - -Remote procedure calls (RPC) is a new feature added with VOLTTRON 3.0. -The new VOLTTRON Interconnect Protocol `VIP `__ introduced the -ability to create new point-to-point protocols, called subsystems, -enabling the implementation of `JSON-RPC -2.0 `__. This provides a simple -method for agent authors to write methods and expose or export them to -other agents, making request-reply or notify communications patterns as -simple as writing and calling methods. - -Exporting Methods ------------------ - -The *export()* method, defined on the RPC subsystem class, is used to -mark a method as remotely accessible. This *export()* method has a dual -use. The class method can be used as a decorator to statically mark -methods when the agent class is defined. The instance method dynamically -exports methods, and can be used with methods not defined on the agent -class. Each take an optional export name argument, which defaults to the -method name. Here are the two export method signatures: - -Instance method: - -.. code:: python - - RPC.export(method, name=None) - -Class method: - -:: - - RPC.export(name=None) - -And here is an example agent definition using both methods: - -.. code:: python - - from volttron.platform.vip import Agent, Core, RPC - - def add(a, b): - '''Add two numbers and return the result''' - return a + b - - - class ExampleAgent(Agent): - @RPC.export - def say_hello(self, name): - '''Build and return a hello string''' - return 'Hello, %s!' % (name,) - - @RPC.export('say_bye') - def bye(self, name): - '''Build and return a goodbye string''' - return 'Goodbye, %s.' % (name,) - - @Core.receiver('setup') - def onsetup(self, sender, **kwargs): - self.vip.rpc.export('add') - -Calling exported methods ------------------------- - -The RPC subsystem provides three methods for calling exported RPC -methods. - -.. code:: python - - RPC.call(peer, method, *args, **kwargs) - -Call the remote *method* exported by *peer* with the given arguments. -Returns a gevent *AsyncResult* object. - -.. code:: python - - RPC.batch(peer, requests) - -Batch call remote methods exported by *peer*. *requests* must be an -iterable of 4-tuples *(notify, method, args, kwargs)*, where *notify* is -a boolean indicating whether this is a notification or standard call, -*method* is the method name, *args* is a list and *kwargs* is a -dictionary. Returns a list of *AsyncResult* objects for any standard -calls. Returns *None* if all requests were notifications. - -.. code:: python - - RPC.notify(peer, method, *args, **kwargs) - -Send a one-way notification message to *peer* by calling *method* -without returning a result. - -Here are some examples: - -.. code:: python - - self.vip.rpc.call(peer, 'say_hello', 'Bob').get() - results = self.vip.rpc.batch(peer, [(False, 'say_bye', 'Alice', {}), (True, 'later', [], {})]) - self.vip.rpc.notify(peer, 'ready') - -Inspection ----------- - -A list of methods is available by calling the *inspect* method. -Additional information can be returned for any method by appending -'.inspect' to the method name. Here are a couple examples: - -.. code:: python - - self.vip.rpc.call(peer, 'inspect') # Returns a list of exported methods - self.vip.rpc.call(peer, 'say_hello.inspect') # Return metadata on say_hello method - -Implementation --------------- - -See the -`RPC module `_ for implementation details. - -Also see :ref:`Multi-Platform RPC Communication ` and :ref:`RPC in RabbitMQ ` for additional resources. diff --git a/docs/source/core_services/messagebus/VIP/VIP-Authorization.rst b/docs/source/core_services/messagebus/VIP/VIP-Authorization.rst deleted file mode 100644 index d3829123cf..0000000000 --- a/docs/source/core_services/messagebus/VIP/VIP-Authorization.rst +++ /dev/null @@ -1,209 +0,0 @@ -.. _VIP-Authorization: - -================= -VIP Authorization -================= - -VIP :ref:`authentication ` and authorization go hand in -hand. When an agent authenticates to a VOLTTRON platform that agent proves its -identity to the platform. Once authenticated, an agent is allowed to -connect to the :ref:`message bus<_VOLTTRON-Message-Bus>`. VIP -authorization is about giving a platform owner the ability to limit the -capabilities of authenticated agents. - -There are two parts to authorization: - -#. Required capabilities (specified in agent's code) -#. Authorization entries (specified via ``volttron-ctl auth`` commands) - -The following example will walk through how to specify required capabilities -and grant those capabilities in authorization entries. - -Single Capability ------------------ -For this example suppose there is a temperature agent that can read and set the -temperature of a particular room. The agent author anticipates that building -managers will want to limit which agents can set the temperature. - -In the temperature agent, a required capability is specified by -using the ``RPC.allow`` decorator: - -.. code:: Python - - @RPC.export - def get_temperature(): - ... - - @RPC.allow('CAP_SET_TEMP') - @RPC.export - def set_temperature(temp): - ... - -In the code above, any agent can call the ``get_temperature`` method, but only -agents with the ``CAP_SET_TEMP`` capability can call ``set_temperature``. -(Note: capabilities are arbitrary strings. This example follows the general -style used for Linux capabilities, but it is up to the agent author.) - -Now that a required capability has been specified, suppose a VOLLTRON platform -owner wants to allow a specific agent, say AliceAgent, to set the temperature. - -The platform owner runs ``vctl auth add`` to add new authorization -entries or ``vctl auth update`` to update an existing entry. -If AliceAgent is installed on the platform, then it already has an -authorization entry. Running ``vctl auth list`` shows the existing -entries: - -.. code:: JSON - - ... - INDEX: 3 - { - "domain": null, - "user_id": "AliceAgent", - "roles": [], - "enabled": true, - "mechanism": "CURVE", - "capabilities": [], - "groups": [], - "address": null, - "credentials": "JydrFRRv-kdSejL6Ldxy978pOf8HkWC9fRHUWKmJfxc", - "comments": null - } - ... - -Currently AliceAgent cannot set the temperature because it does -not have the ``CAP_SET_TEMP`` capability. To grant this capability -the platform owner runs ``vctl auth update 3``: - -.. code:: Bash - - (For any field type "clear" to clear the value.) - domain []: - address []: - user_id [AliceAgent]: - capabilities (delimit multiple entries with comma) []: CAP_SET_TEMP - roles (delimit multiple entries with comma) []: - groups (delimit multiple entries with comma) []: - mechanism [CURVE]: - credentials [JydrFRRv-kdSejL6Ldxy978pOf8HkWC9fRHUWKmJfxc]: - comments []: - enabled [True]: - updated entry at index 3 - - -Now AliceAgent can call ``set_temperature`` via RPC. -If other agents try to call that method they will get the following -exception:: - - error: method "set_temperature" requires capabilities set(['CAP_SET_TEMP']), - but capability list [] was provided - -Multiple Capabilities ---------------------- - -Expanding on the temperature-agent example, the ``set_temperature`` method can -require agents to have multiple capabilities: - -.. code:: Python - - @RPC.allow(['CAP_SET_TEMP', 'CAP_FOO_BAR']) - @RPC.export - def set_temperature(): - ... - -This requires an agent to have both the ``CAP_SET_TEMP`` and the -``CAP_FOO_BAR`` capabilities. Multiple capabilities can also -be specified by using multiple ``RPC.allow`` decorators: - -.. code:: Python - - @RPC.allow('CAP_SET_TEMP') - @RPC.allow('CAN_FOO_BAR') - @RPC.export - def temperature(): - ... - -Capability with parameter restriction -------------------------------------- - -Capabilities can also be used to restrict access to a rpc method only with certain parameter values. For example, if AgentA -exposes a method bar which accepts parameter x - - -AgentA's capability enabled exported RPC method: - -:: - - @RPC.export - @RPC.allow('can_call_bar') - def bar(self, x): - return 'If you can see this, then you have the required capabilities' - -You can restrict access to AgentA's bar method to AgentB with x=1. To add this auth entry use the vctl auth add command -as show below - -:: - - vctl auth add --capabilities '{"test1_cap2":{"x":1}}' --user_id AgentB --credential vELQORgWOUcXo69DsSmHiCCLesJPa4-CtVfvoNHwIR0 - -auth.json file entry for the above command would be - -:: - - { - "domain": null, - "user_id": "AgentB", - "roles": [], - "enabled": true, - "mechanism": "CURVE", - "capabilities": { - "test1_cap2": { - "x": 1 - } - }, - "groups": [], - "address": null, - "credentials": "vELQORgWOUcXo69DsSmHiCCLesJPa4-CtVfvoNHwIR0", - "comments": null - } - - - -Parameter values can also be regular expressions - -:: - - (volttron)volttron@volttron1:~/git/myvolttron$ vctl auth add - domain []: - address []: - user_id []: - capabilities (delimit multiple entries with comma) []: {'test1_cap2':{'x':'/.*'}} - roles (delimit multiple entries with comma) []: - groups (delimit multiple entries with comma) []: - mechanism [CURVE]: - credentials []: vELQORgWOUcXo69DsSmHiCCLesJPa4-CtVfvoNHwIR0 - comments []: - enabled [True]: - added entry domain=None, address=None, mechanism='CURVE', credentials=u'vELQORgWOUcXo69DsSmHiCCLesJPa4-CtVfvoNHwIR0', user_id='b22e041d-ec21-4f78-b32e-ab7138c22373' - - -auth.json file entry for the above command would be: - -:: - - { - "domain": null, - "user_id": "90f8ef35-4407-49d8-8863-4220e95974c7", - "roles": [], - "enabled": true, - "mechanism": "CURVE", - "capabilities": { - "test1_cap2": { - "x": "/.*" - } - }, - "groups": [], - "address": null, - "credentials": "vELQORgWOUcXo69DsSmHiCCLesJPa4-CtVfvoNHwIR0", - "comments": null - } diff --git a/docs/source/core_services/messagebus/VIP/VIP-Enhancements.rst b/docs/source/core_services/messagebus/VIP/VIP-Enhancements.rst deleted file mode 100644 index cd3a918ca3..0000000000 --- a/docs/source/core_services/messagebus/VIP/VIP-Enhancements.rst +++ /dev/null @@ -1,111 +0,0 @@ -.. _VIP-Enhancements: - -VIP Enhancements -================ - -Outline a vision of how VOLTTRON Message Bus should work - -When creating VIP for VOLTTRON 3.0 we wanted to address two security -concerns and one user request: - -- Security Concern 1: Agents can spoof each other on the VOLTTRON - message bus and fake messages. -- Security Concern 2: Agents can subscribe to topics that they are not - authorized to subscribe to. -- User Request 1: Several users requested means to transfer large - amounts of data between agents without using the message bus. - -VOLTTRON Interconnect Protocol (VIP) was created to address these issues -but unfortunately, it broke the easy to use pub-sub messaging model of -VOLTTRON. Additionally to use the security features of VOLTTRON in 3.0 -code has become an ordeal especially when multiple platforms are -concerned. Finally, VIP has introduced the requirement for knowledge of -specific other platforms to agents written by users in order to be able -to communicate. The rest of this memo focuses on defining the way -VOLTTRON message bus will work going forward indefinitely and should be -used as the guiding principles for any future work on VIP and VOLTTRON. -  - -VOLTTRON Message Bus Guiding Principles: - -#. | All communications between two or more different VOLTTRON platforms - MUST go through the VIP Router. Said another way, a user agent - (application) should have NO capability to reach out to an agent on a - different VOLTTRON platform directly. - | All communications between two or more VOLTTRON platforms must be - in the form of topics on the message bus. Agents MUST not use a - distinct platform address or name to communicate via a direct - connection between two platforms. - -#. VOLTTRON will use two TCP ports. One port is used to extend VIP - across platforms. A second port is used for the VOLTTRON discovery - protocol (more on this to come on a different document). VIP will - establish bi-directional communication via a single TCP port. - -#. In order to solve the bootstrapping problem that CurveMQ has punted - on, we will modify VIP to operate similar (behaviorally) to SSH. - -A. On a single VOLTTRON platform, the platform’s public key will be made -available via an API so that all agents will be able to communicate with -the platform. Additionally, the behavior of the platform will be changed -so that agents on the same platform will automatically be added to -auth.json file. No more need for user to add the agents manually to the -file. The desired behavior is similar to how SSH handles known\_hosts. -Note that this behavior still addresses the security request 1 & 2. - -B. When connecting VOLTTRON platforms, VOLTTRON Discovery Protocol (VDP) -will be used to discover the other platforms public key to establish the -router to router connection. Note that since we BANNED agent to agent -communication between two platforms, we have prevented an O(N^2) -communication pattern and key bootstrapping problem. - -#. Authorization determines what agents are allowed to access what - topics. Authorization MUST be managed by the VOLTTRON Central - platform on a per organization basis. It is not recommended to have - different authorization profiles on different VOLTTRON instances - belonging to the same organization. - -#. VOLTTRON message bus uses topics such as and will adopt an - information model agreed upon by the VOLTTRON community going - forward. Our initial information model is based on the OpenEIS schema - going forward. A different document will describe the information - model we have adopted going forward. All agents are free to create - their own topics but the VOLTTRON team (going forward) will support - the common VOLTTRON information model and all agents developed by - PNNL will be converted to use the new information model. - -#. Two connected VOLTTRON systems will exchange a list of available - topics via the message router. This will allow each VIP router to - know what topics are available at what VOLTTRON platform. - -#. Even though each VOLTTRON platform will have knowledge of what topics - are available around itself, no actual messages will be forwarded - between VOLTTRON platforms until an agent on a specific platform - subscribes to a topic. When an agent subscribes to a topic that has a - publisher on a different VOLTTRON platform, the VIP router will send - a request to its peer routers so that the messages sent to that topic - will be forwarded. There will be cases (such as clean energy - transactive project) where the publisher to a topic may be multiple - hops away. In this case, the subscribe request will be sent towards - the publisher through other VIP routers. In order to find the most - efficient path, we may need to keep track of the total number of hops - (in terms of number of VIP routers). - -#. The model described in steps 5/6/7 applies to data collection. For - control applications, VOLTTRON team only allows control actions to be - originated from the VOLTTRON instance that is directly connected to - that controlled device. This decision is made to increase the - robustness of the control agent and to encourage truly distributed - applications to be developed. - -#. Direct agent to agent communication will be supported by creation of - an ephemeral topic under the topic hierarchy. Our measurements have - shown repeatedly that the overhead of using the ZeroMQ message - pub/sub is minimal and has zero impact on communications throughput. - -In summary, by making small changes to the way VIP operates, I believe -that we can significantly increase the usability of the platform and -also correct the mixing of two communication platforms into VIP. -VOLTTRON message bus will return to being a pub/sub messaging system -going forward. Direct agent to agent communication will be supported -through the message bus. diff --git a/docs/source/core_services/messagebus/VIP/VIP-Known-Identities.rst b/docs/source/core_services/messagebus/VIP/VIP-Known-Identities.rst deleted file mode 100644 index 77c237e561..0000000000 --- a/docs/source/core_services/messagebus/VIP/VIP-Known-Identities.rst +++ /dev/null @@ -1,32 +0,0 @@ -.. _VIP-Kwown-Identities: - -VIP Known Identities -==================== - -It is critical for systems to have known locations for receiving -resources and services from in a networked environment. The following -table details the vip identities that are reserved for VOLTTRON specific -usage. - -+----------------------+-----------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ -| VIP Identity | Sphere of Influence | Notes | -+======================+=======================+====================================================================================================================================================================================================================================================================+ -| platform | Platform | | -+----------------------+-----------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ -| platform.agent | Platform | The PlatformAgent is responsible for this identity. It is used to allow the VolttronCentralAgent to control and individual platform. | -+----------------------+-----------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ -| volttron.central | Multi-Network | The default identity for a VolttronCentralAgent. The PlatformAgent by default will use this as it's manager, but can be overridden in the configuration file of individual agents. | -+----------------------+-----------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ -| platform.historian | platform | An individual platform may have many historians available to it, however the only one that will be available through Volttron Central by default will be called this. Note that this does not require a specific type of historian, just that it's VIP Identity. | -+----------------------+-----------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ -| control | platform | Control is used to control the individual platform. From the command line when issuing any volttron-ctl operations or when using Volttron Central. | -+----------------------+-----------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ -| pubsub | platform | Pub/Sub subsystem router | -+----------------------+-----------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ -| platform.actuator | actuator | Agent which coordinates sending control commands to devices. | -+----------------------+-----------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ -| config.store | platform | The configuration subsystem service agent on the platform. | -+----------------------+-----------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ -| platform.driver | devices | The default identity for the Master Driver Agent. | -+----------------------+-----------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ - diff --git a/docs/source/core_services/messagebus/VIP/index.rst b/docs/source/core_services/messagebus/VIP/index.rst deleted file mode 100644 index 813f89125f..0000000000 --- a/docs/source/core_services/messagebus/VIP/index.rst +++ /dev/null @@ -1,9 +0,0 @@ -============================== -VOLTTRON Interconnect Protocol -============================== - -.. toctree:: - :glob: - :maxdepth: 2 - - * diff --git a/docs/source/core_services/messagebus/index.rst b/docs/source/core_services/messagebus/index.rst deleted file mode 100644 index 2e0e578946..0000000000 --- a/docs/source/core_services/messagebus/index.rst +++ /dev/null @@ -1,13 +0,0 @@ -.. _messagebus index: - -==================== -VOLTTRON Message Bus -==================== - -.. toctree:: - :glob: - :maxdepth: 2 - - VIP/index - - * diff --git a/docs/source/core_services/messagebus/topics.rst b/docs/source/core_services/messagebus/topics.rst deleted file mode 100644 index 3ddf38cc35..0000000000 --- a/docs/source/core_services/messagebus/topics.rst +++ /dev/null @@ -1,60 +0,0 @@ -.. _Topics: - -Messaging and Topics -==================== - -Introduction ------------- - -Agents in |VOLTTRON| communicate with each other using a -publish/subscribe mechanism built on the Zero MQ Python library. This -allows for great flexibility as topics can be created dynamically and -the messages sent can be any format as long as the sender and receiver -understand it. An agent with data to share publishes to a topic, then -any agents interested in that data subscribe to that topic. - -While this flexibility is powerful, it also could also lead to confusion -if some standard is not followed. The current conventions for -communicating in the VOLTTRON are: - -- Topics and subtopics follow the format: topic/subtopic/subtopic -- Subscribers can subscribe to any and all levels. Subscriptions to - "topic" will include messages for the base topic and all subtopics. - Subscriptions to "topic/subtopic1" will only receive messages for - that subtopic and any children subtopics. Subscriptions to empty - string ("") will receive ALL messages. This is not recommended. - - - All agents should subscribe to the "platform" topic. This is the - topic the VOLTTRON will use to send messages to agents, such as - "shutdown". - -Agents should set the "From" header. This will allow agents to filter on -the "To" message sent back. This is especially useful for requests to -the ArchiverAgent so agents do not receive replies not meant for their -request. - -Topics ------- - -In VOLTTRON -~~~~~~~~~~~ - -- platform - Base topic used by the platform to inform agents of - platform events -- platform/shutdown - General shutdown command. All agents should exit - upon receiving this. Message content will be a reason for the - shutdown -- platform/shutdown\_agent - This topic will provide a specific agent - id. Agents should subscribe to this topic and exit if the id in the - message matches their id. - -- devices - Base topic for data being published by drivers -- datalogger - Base topic for agents wishing to record time series data -- record - Base topic for agents to record data in an arbitrary format. - -Controller Agent Topics -~~~~~~~~~~~~~~~~~~~~~~~ - -See the documentation for the [[ActuatorAgent]]. - -.. |VOLTTRON| unicode:: VOLTTRON U+2122 diff --git a/docs/source/core_services/messagebus_refactor/RabbitMQ-Overview.rst b/docs/source/core_services/messagebus_refactor/RabbitMQ-Overview.rst deleted file mode 100644 index f5e7291586..0000000000 --- a/docs/source/core_services/messagebus_refactor/RabbitMQ-Overview.rst +++ /dev/null @@ -1,225 +0,0 @@ - .. _RabbitMQ-Overview: - -================= -RabbitMQ Overview -================= - -.. NOTE:: Some of the RabbitMQ summary/overview documentation and supporting images added here are taken from `RabbitMQ official documentation `_. - -RabbitMQ is the most popular messaging library with over 35,000 production deployments. -It is highly scalable, easy to deploy, runs on many operating systems and cloud -environments. It supports many kinds of distributed deployment methodologies such as -clusters, federation and shovels. - - -RabbitMQ uses Advanced Message Queueing Protocol (AMQP) and works on the basic -producer consumer model. A consumer is a program that consumes/receives messages and -producer is a program that sends the messages. Following are some important -definitions that we need to know before we proceed. - -* Queue - Queues can be considered like a post box that stores messages until consumed by the consumer. Each consumer must create a queue to receives messages that it is interested in receiving. We can set properties to the queue during it's declaration. The queue properties are - - * Name - Name of the queue - * Durable - Flag to indicate if the queue should survive broker restart. - * Exclusive - Used only for one connection and it will be removed when connection is closed. - * Auto-queue - Flag to indicate if auto-delete is needed. The queue is deleted when last consumer un-subscribes from it. - * Arguments - Optional, can be used to set message TTL (Time To Live), queue limit etc. - -* Bindings - Consumers bind the queue to an exchange with binding keys or routing patterns. Producers send messages and associate them with a routing key. Messages are routed to one or many queues based on a pattern matching between a message routing key and binding key. - -* Exchanges - Exchanges are entities that are responsible for routing messages to the queues based on the routing pattern/binding key used. They look at the routing key in the message when deciding how to route messages to queues. There are different types of exchanges and one must choose the type of exchange depending on the application design requirements - - #. Fanout - It blindly broadcasts the message it receives to all the queues it knows. - - #. Direct - Here, the message is routed to a queue if the routing key of the message exactly matches the binding key of the queue. - - #. Topic - Here, the message is routed to a queue based on pattern matching of the routing key with the binding key. The binding key and the routing key pattern must be a list of words delimited by dots, for example, "car.subaru.outback" or "car.subaru.*", "car.#". A message sent with a particular routing key will be delivered to all the queues that are bound with a matching binding key with some special rules as - - '*' (star) - can match exactly one word in that position. - '#' (hash) - can match zero or more words - - #. Headers - If we need more complex matching then we can add a header to the message with all the attributes set to the values that need to be matched. The message is considered matching if the values of the attributes in the header is equal to that of the binding. Header exchange ignore the routing key. - - We can set some properties to the exchange during it's declaration. - - * Name - Name of the exchange - * Durable - Flag to indicate if the exchange should survive broker restart. - * Auto-delete - Flag indicates if auto-delete is needed. If set to true, the exchange is deleted when the last queue is unbound from it. - * Arguments - Optional, used by plugins and broker-specific features - -Lets use an example to understand how they all fit together. Consider an example where there -are four consumers (Consumer 1 - 4) interested in receiving messages matching the pattern -"green", "red" or "yellow". In this example, we are using a direct exchange that will route -the messages to the queues only when there is an exact match of the routing key of the message -with the binding key of the queues. Each of the consumers declare a queue and bind the queue -to the exchange with a binding key of interest. Lastly, we have a producer that is continuously -sending messages to exchange with routing key "green". The exchange will check for an exact -match and route the messages to only Consumer 1 and Consumer 3. - -.. image:: files/rabbitmq_exchange.png - - -For more information about queues, bindings, exchanges, please refer to -`RabbitMQ tutorial `_. - - -Distributed RabbitMQ Brokers -============================ -RabbitMQ allows multiple distributed RabbitMQ brokers to be connected in three different ways - -with clustering, with federation and using shovel. We take advantage of these built-in plugins -for multi-platform VOLTTRON communication. For more information about the differences between clustering, -federation, and shovel, please refer to RabbitMQ documentation -`Distributed RabbitMQ brokers `_. - -Clustering ----------- -Clustering connects multiple brokers residing in multiple machines to form a single logical broker. -It is used in applications where tight coupling is necessary i.e, where each node shares the data -and knows the state of all other nodes in the cluster. A new node can connect to the cluster through -a peer discovery mechanism if configured to do so in the RabbitMQ config file. For all the nodes to -be connected together in a cluster, it is necessary for them to share the same Erlang cookie and be -reachable through it's DNS hostname. A client can connect to any one of the nodes in the cluster and -perform any operation (to send/receive messages from other nodes etc.), the nodes will route the operation -internally. In case of a node failure, clients should be able to reconnect to a different node, -recover their topology and continue operation. - -Please note, this feature is not integrated into VOLTTRON. But we hope to support it in the future. -For more detailed information about clustering, please refer to RabbitMQ documentation -`Clustering plugin `_. - -.. _Federation: - -Federation ----------- -Federation plugin is used in applications that does not require as much of tight coupling as clustering. -Federation has several useful features. - -* Loose coupling - The federation plugin can transmit messages between brokers (or clusters) in different administrative domains: - - * they may have different users and virtual hosts; - * they may run on different versions of RabbitMQ and Erlang. - -* WAN friendliness - They can tolerate network intermittent connectivity. - -* Specificity - Not everything needs to be federated ( made available to other brokers ). There can be local-only components. - -* Scalability - Federation does not require O(n2) connections for *n* brokers, so it scales better. - -The federation plugin allows you to make exchanges and queues federated. A federated exchange or queue can -receive messages from one or more upstreams (remote exchanges and queues on other brokers). A federated -exchange can route messages published upstream to a local queue. A federated queue lets a local consumer -receive messages from an upstream queue. - -Before we move forward, let's define upstream and downstream servers. - -* Upstream server - The node that is publishing some message of interest -* Downstream server - The node connected to a different broker that wants to receive messages from the upstream server - -A federation link needs to be established from downstream server to the upstream server. The data flows in -single direction from upstream server to downstream server. For bi-directional data flow, we would need to -create federation links on both the nodes. - -We can receive messages from upstream server to downstream server by either making an exchange or a queue -*federated*. - -For more detailed information about federation, please refer to RabbitMQ documentation -`Federation plugin `_. - -Federated Exchange ------------------- -When we make an exchange on the downstream server *federated*, the messages published to the upstream -exchanges are copied to the federated exchange, as though they were published directly to it. - -.. image:: files/federation.png - -Above figure explains message transfer using federated exchange. The box on the right acts as the downstream server -and the box on the left acts as the upstream server. A federation/upstream link is established between -the downstream server and the upstream server by using federation management plugin. An exchange on the -downstream server is made *federated* using federation policy configuration. The federated exchange only -receives the messages for which it has subscribed for. An upstream queue is created on the upstream -server with a binding key same as subscription made on the federated exchange. For example, if an upstream -server is publishing messages with binding key "foo" and a client on the downstream server is interested -in receiving messages of the binding key "foo", then it creates a queue and binds the queue to the federated -with the same binding key. This binding is sent to the upstream and the upstream queue binds to the -upstream exchange with that key. - - -Publications to either exchange may be received by queues bound to the federated exchange, but publications -directly to the federated exchange cannot be received by queues bound to the upstream exchange. - -For more information about federated exchanges and different federation topologies, please read -`Federated Exchanges `_. - -Federated Queue ---------------- -Federated queue provides a way of balancing load of a single queue across nodes or clusters. -A federated queue lets a local consumer receive messages from an upstream queue. A typical -use would be to have the same "logical" queue distributed over many brokers. Such a logical -distributed queue is capable of having higher capacity than a single queue. A federated queue -links to other upstream queues. - -A federation or upstream link needs to be created like before and a federated queue needs -to be setup on the downstream server using federation policy configuration. The federated -queue will only retrieve messages when it has run out of messages locally, it has consumers -that need messages, and the upstream queue has "spare" messages that are not being consumed. - -For more information about federated queues, please read -`Federated Queues `_. - -.. _Shovel: - -Shovel ------- -Shovel plugin allows you to reliably and continually move messages from a source in one -broker to destination in another broker. A shovel behaves like a well-written client application, that - -* connects to it's source and destination broker -* consumes messages from the source queue -* re-publishes messages to the destination if the messages match the routing key. - -Shovel plugin uses Erlang client under the hood. In case of shovel, apart from configuring -the hostname, port and virtual host of the remote node, we will also have to provide list -of routing keys that we want to forward to remote node.The primary advantages of shovels are - -* Loose coupling - A shovel can move messages between brokers (or clusters) in different administrative domains: - - * they may have different users and virtual hosts; - * they may run on different versions of RabbitMQ and Erlang. -* WAN friendliness - They can tolerate network intermittent connectivity. - -Shovels are also useful in case if one of the nodes is behind NAT. We can setup shovel on -the node behind NAT to forward messages to the node outside NAT. -Shovels do not allow you to adapt to subscriptions like a federation link and we need to a -create a new shovel per subscription. - -For more detailed information about shovel, please refer to RabbitMQ documentation -`Shovel plugin `_. - - -Authentication in RabbitMQ -========================== - -By default RabbitMQ supports SASL PLAIN authentication with user name and password. RabbitMQ supports other SASL authentication mechanism using plugins. In VOLTTRON we use one such external plugin based on x509 certifcates(``_). This authentication is based on a techique called public key cryptography which consists of a key pair - a public key and a private key. Data that has been encrypted with a public key can only be decrypted with the corresponding private key and vice versa. The owner of key pair makes the public key available and keeps the private confidential. To send a secure data to a receiver, a sender encrypts the data with the receiver's public key. Since only the receiver has access to his own private key only the receiver can decrypted. This ensures that others, even if they can get access to the encrypted data, cannot decrypt it. This is how public key cryptography achieves confidentiality. - - -Digital certificate is a digital file that is used to prove ownership of a public key. Certificates act like identification cards for it owner/entity. Certificates are hence crucial to determine that a sender is using the right public key to encrypt the data in the first place. Digital Certificates are issued by Certification Authorities(CA). Certification Authorities fulfil the role of the Trusted Third Party by accepting Certificate applications from entities, authenticating applications, issuing Certificates and maintaining status information about the Certificates issued. Each CA has its own public private key pair and its public key certificate is called a root CA certificate. The CA attests to the identity of a Certificate applicant when it signs the Digital Certificate using its private key. In x509 based authentication, a signed certificate is presented instead of username/password for authentication and if the server recognizes the the signer of the certificate as a trusted CA, accepts and allows the connection. Each server/system can maintain its own list of trusted CAs (i.e. list of public certificates of CAs). Certificates signed by any of the trusted CA would be considered trusted. Certificates can also be signed by intermediate CAs that are in turn signed by a trusted. - -This section only provides a breif overview about the SSL based authentication. Please refer to the vast material available online for detailed description. Some useful links to start: - - * ``_ - * ``_ - -Management Plugin -================= -The rabbitmq-management plugin provides an HTTP-based API for management and monitoring of RabbitMQ -nodes and clusters, along with a browser-based UI and a command line tool, *rabbitmqadmin*. The management -interface allows you to - -* Create, Monitor the status and delete resources such as virtual hosts, users, exchanges, queues etc. -* Monitor queue length, message rates and connection information and more -* Manage users and add permissions (read, write and configure) to use the resources -* Manage policies and runtime parameters -* Send and receive messages (for trouble shooting) - -For more detailed information about the management plugin, please refer to RabbitMQ documentation -`Management Plugin `_. diff --git a/docs/source/core_services/messagebus_refactor/RabbitMQ-Refactor-VOLTTRON.rst b/docs/source/core_services/messagebus_refactor/RabbitMQ-Refactor-VOLTTRON.rst deleted file mode 100644 index a044775c16..0000000000 --- a/docs/source/core_services/messagebus_refactor/RabbitMQ-Refactor-VOLTTRON.rst +++ /dev/null @@ -1,393 +0,0 @@ -.. _RabbitMQ-VOLTTRON: - -======================= -RabbitMQ Based VOLTTRON -======================= -RabbitMQ VOLTTRON uses Pika library for RabbitMQ message bus implementation. To setup VOLTTRON -instance to use RabbitMQ message bus, we need to first configure VOLTTRON to use RabbitMQ message -library. The contents of the RabbitMQ configuration file looks like below. - -Path: $VOLTTRON_HOME/rabbitmq_config.yml - -.. code-block:: yaml - - #host parameter is mandatory parameter. fully qualified domain name - host: mymachine.pnl.gov - - # mandatory. certificate data used to create root ca certificate. Each volttron - # instance must have unique common-name for root ca certificate - certificate-data: - country: 'US' - state: 'Washington' - location: 'Richland' - organization: 'PNNL' - organization-unit: 'VOLTTRON Team' - # volttron1 has to be replaced with actual instance name of the VOLTTRON - common-name: 'volttron1_root_ca' - # - # optional parameters for single instance setup - # - virtual-host: 'volttron' # defaults to volttron - - # use the below four port variables if using custom rabbitmq ports - # defaults to 5672 - amqp-port: '5672' - - # defaults to 5671 - amqp-port-ssl: '5671' - - # defaults to 15672 - mgmt-port: '15672' - - # defaults to 15671 - mgmt-port-ssl: '15671' - - # defaults to true - ssl: 'true' - - # defaults to ~/rabbitmq_server/rabbbitmq_server-3.7.7 - rmq-home: "~/rabbitmq_server/rabbitmq_server-3.7.7" - -Each VOLTTRON instance resides within a RabbitMQ virtual host. The name of the virtual -host needs to be unique per VOLTTRON instance if there are multiple virtual instances -within a single host/machine. The hostname needs to be able to resolve to a valid IP. -The default port of AMQP port without authentication is 5672 and with authentication -is 5671. The default management HTTP port without authentication is 15672 and -with authentication is 15671. These needs to be set appropriately if default ports are -not used. The 'ssl' flag indicates if SSL based authentication is required or not. -If set to True, information regarding SSL certificates needs to be also provided. -SSL based authentication is described in detail in `Authentication And Authorization With RabbitMQ Message Bus `_. - - -To configure the VOLTTRON instance to use RabbitMQ message bus, run the following command. - - vcfg --rabbitmq single [optional path to rabbitmq_config.yml] - -At the end of the setup process, RabbitMQ broker is setup to use the configuration provided. -A new topic exchange for the VOLTTRON instance is created within the configured virtual host. - - -On platform startup, VOLTTRON checks for the type of message bus to be used. If using RabbitMQ -message bus, the RabbitMQ platform router is instantiated. The RabbitMQ platform router, - -* Connects to RabbitMQ broker (with or without authentication) -* Creates a VIP queue and binds itself to the "VOLTTRON" exchange with binding key ".router". This binding key makes it unique across multiple VOLTTRON instances in a single machine as long as each instance has a unique instance name. -* Handles messages intended for router module such as "hello", "peerlist", "query" etc. -* Handles unrouteable messages - Messages which cannot be routed to any destination agent are captured and an error message indicating "Host Unreachable" error is sent back to the caller. -* Disconnects from the broker when the platform shuts down. - - -When any agent is installed and started, the Agent Core checks for the type of message bus used. -If it is RabbitMQ message bus then - -* It creates a RabbitMQ user for the agent. -* If SSL based authentication is enabled, client certificates for the agent is created. -* Connect to the RabbitQM broker with appropriate connection parameters -* Creates a VIP queue and binds itself to the "VOLTTRON" exchange with binding key ".". -* Sends and receives messages using Pika library methods. -* Checks for the type of subsystem in the message packet that it receives and calls the appropriate subsystem message handler. -* Disconnects from the broker when the agent stops or platform shuts down. - - -RPC In RabbitMQ VOLTTRON -======================== -The agent functionality remain unchanged irrespective of the underlying message bus used. -That means they can continue to use the same RPC interfaces without any change. - -.. image:: files/rpc.png - -Consider two agents with VIP identities "agent_a" and "agent_b" connected to VOLTTRON platform -with instance name "volttron1". Agent A and B each have a VIP queue with binding key "volttron1.agent_a" -and "volttron1.agent_b". Following is the sequence of operation when Agent A wants to make RPC -call to Agent B. - -1. Agent A make RPC call to Agent B. - agent_a.vip.rpc.call("agent_b", set_point, "point_name", 2.5) - -2. RPC subsystem wraps this call into a VIP message object and sends it to Agent B. -3. The VOLTTRON exchange routes the message to Agent B as the destination routing in the VIP message object matches with the binding key of Agent B. -4. Agent Core on Agent B receives the message, unwraps the message to find the subsystem type and calls the RPC subsystem handler. -5. RPC subsystem makes the actual RPC call "set_point()" and gets the result. It then wraps into VIP message object and sends it back to the caller. -6. The VOLTTRON exchange routes it to back to Agent A. -7. Agent Core on Agent A calls the RPC subsystem handler which in turn hands over the RPC result to Agent A application. - - -PUBSUB In RabbitMQ VOLTTRON -=========================== -The agent functionality remains unchanged irrespective of the platform using ZeroMQ based pubsub or -RabbitMQ based pubsub i,e, agents continue to use the same PubSub interfaces and use the same topic -format delimited by “/”. Since RabbitMQ expects binding key to be delimited by '.', RabbitMQ PUBSUB -internally replaces '/' with ".". Additionally, all agent topics converted to -“__pubsub__..” to differentiate from main Agent VIP queue binding. - -.. image:: files/pubsub.png - -Consider two agents with VIP identities "agent_a" and "agent_b" connected to VOLTTRON platform -with instance name "volttron1". Agent A and B each have a VIP queue with binding key "volttron1.agent_a" -and "volttron1.agent_b". Following is the sequence of operation when Agent A subscribes to a topic and Agent B -publishes to same the topic. - -1. Agent B makes subscribe call for topic "devices". - agent_b.vip.pubsub.subscribe("pubsub", prefix="devices", callback=self.onmessage) - -2. Pubsub subsystem creates binding key from the topic “__pubsub__.volttron1.devices.#” - -3. It creates a queue internally and binds the queue to the VOLTTRON exchange with the above binding key. - -4. Agent B is publishing messages with topic: "devices/hvac1". - agent_b.vip.pubsub.publish("pubsub", topic="devices/hvac1", headers={}, message="foo"). - -5. PubSub subsystem internally creates a VIP message object and publishes on the VOLTTRON exchange. - -6. RabbitMQ broker routes the message to Agent B as routing key in the message matches with the binding key of the topic subscription. - -7. The pubsub subsystem unwraps the message and calls the appropriate callback method of Agent A. - -If agent wants to subscribe to topic from remote instances, it uses -agent.vip.subscribe(“pubsub”, “devices.hvac1”, all_platforms=True”) -It is internally set to “__pubsub__.*.” - -Pubsub subsystem for ZeroMQ message bus performs O(N) comparisons where N is the number of unique -subscriptions. RabbitMQ Topic Exchange was enhanced in version 2.6.0 to reduce the overhead of -additional unique subscriptions to almost nothing in most cases. We speculate they are using a tree -structure to store the binding keys which would reduce the search time to O(1) in most cases -and O(ln) in the worst case. VOLTTRON PUBSUB with ZeroMQ could be updated to match this performance -scalability with some effort. - -Multi-Platform Communication In RabbitMQ VOLTTRON -================================================= -With ZeroMQ based VOLTTRON, multi-platform communication was accomplished in three different ways. - -1. Direct connection to remote instance - Write an agent that would connect to remote instance directly. - -2. Special agents - Use special agents such as forward historian/data puller agents that would -forward/receive messages to/from remote instances. -In RabbitMQ-VOLTTRON, we make use of shovel plugin to achieve this behavior. Please refer to -:ref:`Shovel Plugin ` to get an overview of shovels. - -3. Multi-Platform RPC and PubSub - Configure VIP address of all remote instances that an instance has -to connect to in it's $VOLTTRON_HOME/external_discovery.json and let the router module in each -instance manage the connection and take care of the message routing for us. In RabbitMQ-VOLTTRON, we -make use of federation plugin to achieve this behavior. Please refer to -:ref:`Federation Plugin ` get an overview of federation. - -Using Federation Plugin ------------------------ -We can connect multiple VOLTTRON instances using the federation plugin. Before setting up federation -links, we need to first identify upstream server and downstream server. Upstream Server is the node -that is publishing some message of interest and downStream server is the node that wants to receive -messages from the upstream server. A federation link needs to be established from a downstream VOLTTRON -instance to upstream VOLTTRON instance. To setup a federation link, we will need to add upstream server -information in a RabbitMQ federation configuration file - -Path: $VOLTTRON_HOME/rabbitmq_federation_config.yml - -.. code-block:: yaml - - # Mandatory parameters for federation setup - federation-upstream: - rabbit-4: - port: '5671' - virtual-host: volttron4 - rabbit-5: - port: '5671' - virtual-host: volttron5 - -To configure the VOLTTRON instance to setup federation, run the following command. - - vcfg --rabbitmq federation [optional path to rabbitmq_federation_config.yml] - -This will setup federation links to upstream servers and sets policy to make the VOLTTRON -exchange *federated*. Once a federation link is established to remote instance, the messages -published on the remote instance become available to local instance as if it were published on -the local instance. - -For detailed instructions to setup federation, please refer to README section <>. - -Multi-Platform RPC With Federation ----------------------------------- -For multi-platform RPC communication, federation links need to be established on both the VOLTTRON -nodes. Once the federation links are established, RPC communication becomes fairly simple. - -.. image:: files/multiplatform_rpc.png - -Consider Agent A on volttron instance "volttron1" on host "host_A" wants to make RPC call on Agent B -on VOLTTRON instance "volttron2" on host "host_B". - -1. Agent A makes RPC call. - -.. code-block:: Python - - kwargs = {"external_platform": self.destination_instance_name} - agent_a.vip.rpc.call("agent_b", set_point, "point_name", 2.5, \**kwargs) - -2. The message is transferred over federation link to VOLTTRON instance "volttron2" as both the exchanges are made *federated*. - -3. RPC subsystem of Agent B calls the actual RPC method and gets the result. It encapsulates the message result into VIP message object and sends it back to Agent A on VOLTTRON instance "volttron1". - -4. The RPC subsystem on Agent A receives the message result and gives it to Agent A application. - -Multi-Platform PubSub With Federation -------------------------------------- -For multi-platform PubSub communication, it is sufficient to have federation link from downstream server -to upstream server. In case of bi-directional data flow, links have to established in both the directions. - -.. image:: files/multiplatform_pubsub.png - -Consider Agent B on volttron instance "volttron2" on host "host_B" wants to subscribe to messages from -VOLTTRON instance "volttron2" on host "host_B". Firstly, federation link needs to be established from -"volttron2" to "volttron1". - -1. Agent B makes a subscribe call. - - agent_b.vip.subscribe.call("pubsub", prefix="devices", all_platforms=True) - -2. The PubSub subsystem converts the prefix to "__pubsub__.*.devices.#". Here, "*" indicates that agent is subscribing to "devices" topic from all the VOLTTRON platforms. - -3. A new queue is created and bound to VOLTTRON exchange with above binding key. Since the VOLTTRON exchange is a *federated exchange*, any subscribed message on the upstream server becomes available on the federated exchange and Agent B will be able to receive it. - -4. Agent A publishes message to topic "devices/pnnl/isb1/hvac1" - -5. PubSub subsystem publishes this messgae on it's VOLTTRON exchange. - -6. Due to the federation link, message is received by the Pubsub subsytem of Agent A. - -Using Shovel Plugin -------------------- -Shovels act as well written client application which moves messages from source to destination broker. -Below configuration shows how to setup a shovel to forward PubSub messages or perform -multi-platform RPC communication from local to a remote instance. It expects hostname, -port and virtual host of remote instance. - -Path: $VOLTTRON_HOME/rabbitmq_shovel_config.yml - -.. code-block:: yaml - - # Mandatory parameters for shovel setup - shovel: - rabbit-2: - port: '5671' - virtual-host: volttron - # Configuration to forward pubsub topics - pubsub: - # Identity of agent that is publishing the topic - platform.driver: - - devices - # Configuration to make remote RPC calls - rpc: - # Remote instance name - volttron2: - # List of pair of agent identities (local caller, remote callee) - - [scheduler, platform.actuator] - -To forward PubSub messages, the topic and agent identity of the publisher agent is needed. -To perform RPC, instance name of the remote instance and agent identities of the local agent -and remote agent are needed. - -To configure the VOLTTRON instance to setup shovel, run the following command. - - vcfg --rabbitmq shovel [optional path to rabbitmq_shovel_config.yml] - -This setups up a shovel that forwards messages (either PubSub or RPC) from local exchange -to remote exchange. - -Multi-Platform PubSub With Shovel ---------------------------------- -After the shovel link is established for Pubsub, the below figure shows how the communication happens. -Please note, for bi-directional pubsub communication, shovel links need to be created on -both the nodes. The "blue" arrows show the shovel binding key. The pubsub topic configuration -in `$VOLTTRON_HOME/rabbitmq_shovel_config.yml` get internally converted to shovel binding key, -`"__pubsub__.."`. - -.. image:: files/multiplatform_shovel_pubsub.png - -Now consider a case where shovels are setup in both the directions for forwarding "devices" -topic. - -1. Agent B makes a subscribe call to receive messages with topic "devices" from all connected platforms. - - agent_b.vip.subscribe.call("pubsub", prefix="devices", all_platforms=True) - -2. The PubSub subsystem converts the prefix to "__pubsub__.*.devices.#" -"*" indicates that agent is subscribing to "devices" topic from all the VOLTTRON platforms. - -3. A new queue is created and bound to VOLTTRON exchange with above binding key. - -4. Agent A publishes message to topic "devices/pnnl/isb1/hvac1" - -5. PubSub subsystem publishes this message on it's VOLTTRON exchange. - -6. Due to a shovel link from VOLTTRON instance "volttron1" to "volttron2", the message is forwarded from volttron exchange "volttron1" to "volttron2" and picked up by Agent A on "volttron2". - -Multi-Platform RPC With Shovel ------------------------------- -After the shovel link is established for multi-platform RPC, the below figure shows how the -RPC communication happens. Please note it is mandatory to have shovel links on both directions -as it is request-response type of communication. We will need to set the agent identities for -caller and callee in the `$VOLTTRON_HOME/rabbitmq_shovel_config.yml`. The "blue" arrows show -the resulting the shovel binding key. - -.. image:: files/multiplatform_shovel_rpc.png - -Consider Agent A on volttron instance "volttron1" on host "host_A" wants to make RPC call on Agent B -on VOLTTRON instance "volttron2" on host "host_B". - -1. Agent A makes RPC call. - -.. code-block:: Python - - kwargs = {"external_platform": self.destination_instance_name} - agent_a.vip.rpc.call("agent_b", set_point, "point_name", 2.5, \**kwargs) - -2. The message is transferred over shovel link to VOLTTRON instance "volttron2". - -3. RPC subsystem of Agent B calls the actual RPC method and gets the result. It encapsulates the message result into VIP message object and sends it back to Agent A on VOLTTRON instance "volttron1". - -4. The RPC subsystem on Agent A receives the message result and gives it to Agent A application. - -RabbitMQ Management Tool Integrated Into VOLTTRON -================================================= -Some of the important native RabbitMQ control and management commands are now integrated with -"volttron-ctl" (vctl) utility. Using volttron-ctl RabbitMQ management utility, we can control and -monitor the status of RabbitMQ message bus. - -:: - - vctl rabbitmq --help - usage: vctl command [OPTIONS] ... rabbitmq [-h] [-c FILE] [--debug] - [-t SECS] - [--msgdebug MSGDEBUG] - [--vip-address ZMQADDR] - ... - subcommands: - - add-vhost add a new virtual host - add-user Add a new user. User will have admin privileges - i.e,configure, read and write - add-exchange add a new exchange - add-queue add a new queue - list-vhosts List virtual hosts - list-users List users - list-user-properties - List users - list-exchanges add a new user - list-exchange-properties - list exchanges with properties - list-queues list all queues - list-queue-properties - list queues with properties - list-bindings list all bindings with exchange - list-federation-parameters - list all federation parameters - list-shovel-parameters - list all shovel parameters - list-policies list all policies - remove-vhosts Remove virtual host/s - remove-users Remove virtual user/s - remove-exchanges Remove exchange/s - remove-queues Remove queue/s - remove-federation-parameters - Remove federation parameter - remove-shovel-parameters - Remove shovel parameter - remove-policies Remove policy diff --git a/docs/source/core_services/messagebus_refactor/files/multiplatform_ssl.png b/docs/source/core_services/messagebus_refactor/files/multiplatform_ssl.png deleted file mode 100644 index f9872cce05..0000000000 Binary files a/docs/source/core_services/messagebus_refactor/files/multiplatform_ssl.png and /dev/null differ diff --git a/docs/source/core_services/messagebus_refactor/index.rst b/docs/source/core_services/messagebus_refactor/index.rst deleted file mode 100644 index 559a351142..0000000000 --- a/docs/source/core_services/messagebus_refactor/index.rst +++ /dev/null @@ -1,28 +0,0 @@ -.. _VOLTTRON-MessageBusRefactor: - -==================== -Message Bus Refactor -==================== - -Refactoring of the existing message bus became necessary as we needed to reduce long term costs of -maintenance, enhancement and support of the message bus. It made sense to move to a more widely used, -industry accepted messaging library such as RabbitMQ that has many of the features that we need already -built in. - - 1. It has many different messaging patterns and routing topologies. - 2. It offers flexibility in deployment and supports large scale deployment - 3. It has well-developed SSL based authentication plugin. - -The goal of the message bus refactor task is to - 1. Maintain essential features of current message bus and minimize transition cost - 2. Leverage an existing and growing community dedicated to the further development of RabbitMQ - 3. Move services provided currently by VOLTTRON agents to services natively provided by RabbitMQ - 4. Decrease VOLTTRON development time spent on supporting message bus which is now a commodity technology - 5. Address concerns from community about ZeroMQ. - - -.. toctree:: - :glob: - :maxdepth: 2 - - * diff --git a/docs/source/core_services/multiplatform/Multiplatform-PubSub.rst b/docs/source/core_services/multiplatform/Multiplatform-PubSub.rst deleted file mode 100644 index d35cc8fb50..0000000000 --- a/docs/source/core_services/multiplatform/Multiplatform-PubSub.rst +++ /dev/null @@ -1,28 +0,0 @@ - .. _Multi-Platform-PubSub: - -=================================== -Multi-Platform PubSub Communication -=================================== - -Multi-Platform pubsub communication allows an agent on one platform to subscribe to receive messages from another -platform without having to setup connection to the remote platform directly. The connection will be internally managed -by the VOLTTRON platform router module. Please refer here -:ref:`Multi-Platform Communication Setup `) for more details regarding setting up of -Multi-Platform connections. - -External Platform Message Subscription -************************************** - - -To subscribe for topics from remote platform, the subscriber agent has to add an additional input parameter - -``all_platforms`` to the pubsub subscribe method. - -Here is an example, - -.. code:: python - - self.vip.pubsub.subscribe('pubsub', 'foo', self.on_match, all_platforms=True) - -There is no change in the publish method pf PubSub subsystem. If all the configurations are correct and the publisher -agent on the remote platform is publishing message to topic=``foo``, then the subscriber agent will start receiving -those messages. diff --git a/docs/source/core_services/multiplatform/index.rst b/docs/source/core_services/multiplatform/index.rst deleted file mode 100644 index 45d5303e9f..0000000000 --- a/docs/source/core_services/multiplatform/index.rst +++ /dev/null @@ -1,16 +0,0 @@ -.. _VOLTTRON-MultiPlatform: - -======================================= -MultiPlatform Message Bus Communication -======================================= - -The multi platform message bus communication allows the user to connect to remote VOLTTTRON platforms seamlessly. This -bypasses the need for an agent wanting to send/receive messages to/from remote platforms from having to setup the -connection to remote platform directly. Instead, the router module in each platform will maintain connections to the -remote platforms internally, that means it will connect, disconnect and monitor the status of each connection. - -.. toctree:: - :glob: - :maxdepth: 2 - - * diff --git a/docs/source/core_services/openadr/VenAgentConfig.rst b/docs/source/core_services/openadr/VenAgentConfig.rst deleted file mode 100644 index 5736d416f9..0000000000 --- a/docs/source/core_services/openadr/VenAgentConfig.rst +++ /dev/null @@ -1,140 +0,0 @@ -.. _VenAgentConfig: - -OpenADR VEN Agent: Installation and Configuration -================================================= - -The VEN agent can be configured, built and launched using the VOLTTRON agent installation -process described in -http://volttron.readthedocs.io/en/develop/devguides/agent_development/Agent-Development.html#agent-development. - -The VEN agent depends on some third-party libraries that are not in the standard -VOLTTRON installation. They should be installed in the VOLTTRON virtual environment prior to building the agent: -:: - - (volttron) $ cd $VOLTTRON_ROOT/services/core/OpenADRVenAgent - (volttron) $ pip install -r requirements.txt - -where **$VOLTTRON_ROOT** is the base directory of the cloned VOLTTRON code repository. - -The VEN agent is designed to work in tandem with a “control agent,” another -VOLTTRON agent that uses VOLTTRON RPC calls to manage events and supply report data. -A sample control agent has been provided in the **test/ControlAgentSim** subdirectory -under OpenADRVenAgent. - -The VEN agent maintains a persistent store of event and report data in -**$VOLTTRON_HOME/data/openadr.sqlite**. Some care should be taken in managing the -disk consumption of this data store. If no events or reports are active, -it is safe to take down the VEN agent and delete the file; the persistent -store will be reinitialized automatically on agent startup. - -Configuration Parameters ------------------------- - -The VEN agent’s configuration file contains JSON that includes several parameters -for configuring VTN server communications and other behavior. A sample configuration -file, **openadrven.config**, has been provided in the agent directory. - -The VEN agent supports the following configuration parameters: - -========================= ======================== ==================================================== -Parameter Example Description -========================= ======================== ==================================================== -db_path “$VOLTTRON_HOME/data/ Pathname of the agent's sqlite database. Shell - openadr.sqlite” variables will be expanded if they are present - in the pathname. -ven_id “0” The OpenADR ID of this virtual end node. Identifies - this VEN to the VTN. If automated VEN registration - is used, the ID is assigned by the VTN at that - time. If the VEN is registered manually with the - VTN (i.e., via configuration file settings), then - a common VEN ID should be entered in this config - file and in the VTN's site definition. -ven_name "ven01" Name of this virtual end node. This name is used - during automated registration only, identiying - the VEN before its VEN ID is known. -vtn_id “vtn01” OpenADR ID of the VTN with which this VEN - communicates. -vtn_address “http://openadr-vtn. URL and port number of the VTN. - ki-evi.com:8000” -send_registration “False” (“True” or ”False”) If “True”, the VEN sends - a one-time automated registration request to - the VTN to obtain the VEN ID. If automated - registration will be used, the VEN should be run - in this mode initially, then shut down and run - with this parameter set to “False” thereafter. -security_level “standard” If 'high', the VTN and VEN use a third-party - signing authority to sign and authenticate each - request. The default setting is “standard”: the - XML payloads do not contain Signature elements. -poll_interval_secs 30 (integer) How often the VEN should send an OadrPoll - request to the VTN. The poll interval cannot be - more frequent than the VEN’s 5-second process - loop frequency. -log_xml “False” (“True” or “False”) Whether to write each - inbound/outbound request’s XML data to the - agent's log. -opt_in_timeout_secs 1800 (integer) How long to wait before making a - default optIn/optOut decision. -opt_in_default_decision “optOut” (“True” or “False”) Which optIn/optOut choice - to make by default. -request_events_on_startup "False" ("True" or "False") Whether to ask the VTN for a - list of current events during VEN startup. -report_parameters (see below) A dictionary of definitions of reporting/telemetry - parameters. -========================= ======================== ==================================================== - -Reporting Configuration ------------------------ - -The VEN’s reporting configuration, specified as a dictionary in the agent configuration, -defines each telemetry element (metric) that the VEN can report to the VTN, if requested. -By default, it defines reports named “telemetry” and "telemetry_status", with a report -configuration dictionary containing the following parameters: - -======================================================= =========================== ==================================================== -"telemetry" report: parameters Example Description -======================================================= =========================== ==================================================== -report_name "TELEMETRY_USAGE" Friendly name of the report. -report_name_metadata "METADATA_TELEMETRY_USAGE" Friendly name of the report’s metadata, when sent - by the VEN’s oadrRegisterReport request. -report_specifier_id "telemetry" Uniquely identifies the report’s data set. -report_interval_secs_default "300" How often to send a reporting update to the VTN. -telemetry_parameters (baseline_power_kw): r_id "baseline_power" (baseline_power) Unique ID of the metric. -telemetry_parameters (baseline_power_kw): report_type "baseline" (baseline_power) The type of metric being reported. -telemetry_parameters (baseline_power_kw): reading_type "Direct Read" (baseline_power) How the metric was calculated. -telemetry_parameters (baseline_power_kw): units "powerReal" (baseline_power) The reading's data type. -telemetry_parameters (baseline_power_kw): method_name "get_baseline_power" (baseline_power) The VEN method to use when - extracting the data for reporting. -telemetry_parameters (baseline_power_kw): min_frequency 30 (baseline_power) The metric’s minimum sampling - frequency. -telemetry_parameters (baseline_power_kw): max_frequency 60 (baseline_power) The metric’s maximum sampling - frequency. -telemetry_parameters (current_power_kw): r_id "actual_power" (current_power) Unique ID of the metric. -telemetry_parameters (current_power_kw): report_type "reading" (current_power) The type of metric being reported. -telemetry_parameters (current_power_kw): reading_type "Direct Read" (current_power) How the metric was calculated. -telemetry_parameters (current_power_kw): units "powerReal" (baseline_power) The reading's data type. -telemetry_parameters (current_power_kw): method_name "get_current_power" (current_power) The VEN method to use when - extracting the data for reporting. -telemetry_parameters (current_power_kw): min_frequency 30 (current_power) The metric’s minimum sampling - frequency. -telemetry_parameters (current_power_kw): max_frequency 60 (current_power) The metric’s maximum sampling - frequency. -======================================================= =========================== ==================================================== - -======================================================= =========================== ==================================================== -"telemetry_status" report: parameters Example Description -======================================================= =========================== ==================================================== -report_name "TELEMETRY_STATUS" Friendly name of the report. -report_name_metadata "METADATA_TELEMETRY_STATUS" Friendly name of the report’s metadata, when sent - by the VEN’s oadrRegisterReport request. -report_specifier_id "telemetry_status" Uniquely identifies the report’s data set. -report_interval_secs_default "300" How often to send a reporting update to the VTN. -telemetry_parameters (Status): r_id "Status" Unique ID of the metric. -telemetry_parameters (Status): report_type "x-resourceStatus" The type of metric being reported. -telemetry_parameters (Status): reading_type "x-notApplicable" How the metric was calculated. -telemetry_parameters (Status): units "" The reading's data type. -telemetry_parameters (Status): method_name "" The VEN method to use when extracting the data - for reporting. -telemetry_parameters (Status): min_frequency 60 The metric’s minimum sampling frequency. -telemetry_parameters (Status): max_frequency 120 The metric’s maximum sampling frequency. -======================================================= =========================== ==================================================== diff --git a/docs/source/core_services/openadr/VenAgentGuide.rst b/docs/source/core_services/openadr/VenAgentGuide.rst deleted file mode 100644 index 42b009b0e2..0000000000 --- a/docs/source/core_services/openadr/VenAgentGuide.rst +++ /dev/null @@ -1,183 +0,0 @@ -.. _VenAgentGuide: - -OpenADR VEN Agent: Operation -============================ - -Events: - -- The VEN maintains a persistent record of DR events. -- Event updates (including creation) trigger publication of event JSON on the VOLTTRON message bus. -- Another VOLTTRON agent (a “control agent”) can get notified immediately of event updates by subscribing to event publication. It can also call get_events() to retrieve the current status of each active DR event. - -Reporting: - -- The VEN reports device status and usage telemetry to the VTN, relying on information received periodically from other VOLTTRON agents. -- The VEN config defines telemetry values (data points) that can be reported to the VTN. -- The VEN maintains a persistent record of telemetry values over time. -- Other VOLTTRON agents are expected to call report_telemetry() to supply the VEN with a regular stream of telemetry values for reporting. -- The VTN can identify which of the VEN’s supported data points needs to be actively reported at a given time, including their reporting frequency. -- Another VOLTTRON agent (a “control agent”) can get notified immediately of changes in telemetry reporting requirements by subscribing to publication of “telemetry parameters.” It can also call get_telemetry_parameters() to retrieve the current set of reporting requirements. -- The VEN persists these reporting requirements so that they survive VOLTTRON restarts. - -VOLTTRON Agent Interface ------------------------- - -The VEN implements the following VOLTTRON PubSub and RPC calls. - -PubSub: Event Update --------------------- - -When an event is created/updated, the event is published with a topic that includes 'openadr/event/{ven_id}'. - -Event JSON structure: -:: - - { - "event_id" : String, - "creation_time" : DateTime - UTC, - "start_time" : DateTime - UTC, - "end_time" : DateTime - UTC, - "priority" : Integer, # Values: 0, 1, 2, 3. Usually expected to be 1. - "signals" : String, # Values: json string describing one or more signals. - "status" : String, # Values: unresponded, far, near, active, completed, canceled. - "opt_type" : String # Values: optIn, optOut, none. - } - -If an event status is 'unresponded', the VEN is awaiting a decision on whether to optIn or optOut. -The downstream agent that subscribes to this PubSub message should communicate that choice -to the VEN by calling respond_to_event() (see below). The VEN then relays the choice to the VTN. - - -PubSub: Telemetry Parameters Update ------------------------------------ - -When the VEN telemetry reporting parameters have been updated (by the VTN), they -are published with a topic that includes 'openadr/status/{ven_id}'. - -These parameters include state information about the current report. - -Telemetry parameters structure: -:: - - { - 'telemetry': '{ - "baseline_power_kw": { - "r_id" : "baseline_power", # ID of the reporting metric - "report_type" : "baseline", # Type of reporting metric, e.g. baseline or reading - "reading_type" : "Direct Read", # (per OpenADR telemetry_usage report requirements) - "units" : "powerReal", # (per OpenADR telemetry_usage reoprt requirements) - "method_name" : "get_baseline_power", # Name of the VEN agent method that gets the metric - "min_frequency" : (Integer), # Data capture frequency in seconds (minimum) - "max_frequency" : (Integer) # Data capture frequency in seconds (maximum) - }, - "current_power_kw": { - "r_id" : "actual_power", # ID of the reporting metric - "report_type" : "reading", # Type of reporting metric, e.g. baseline or reading - "reading_type" : "Direct Read", # (per OpenADR telemetry_usage report requirements) - "units" : "powerReal", # (per OpenADR telemetry_usage report requirements) - "method_name" : "get_current_power", # Name of the VEN agent method that gets the metric - "min_frequency" : (Integer), # Data capture frequency in seconds (minimum) - "max_frequency" : (Integer) # Data capture frequency in seconds (maximum) - } - }' - 'report parameters': '{ - "status" : (String), # active, inactive, completed, or cancelled - "report_specifier_id" : "telemetry", # ID of the report definition - "report_request_id" : (String), # ID of the report request; supplied by the VTN - "request_id" : (String), # Request ID of the most recent VTN report modification - "interval_secs" : (Integer), # How often a report update is sent to the VTN - "granularity_secs" : (Integer), # How often a report update is sent to the VTN - "start_time" : (DateTime - UTC), # When the report started - "end_time" : (DateTime - UTC), # When the report is scheduled to end - "last_report" : (DateTime - UTC), # When a report update was last sent - "created_on" : (DateTime - UTC) # When this set of information was recorded in the VEN db - }', - 'manual_override' : (Boolean) # VEN manual override status, as supplied by Control Agent - 'online' : (Boolean) # VEN online status, as supplied by Control Agent - } - -Telemetry value definitions such as baseline_power_kw and current_power_kw come from the VEN agent config. - -RPC Calls ---------- - -respond_to_event() -:: - - @RPC.export - def respond_to_event(self, event_id, opt_in=True): - """ - Respond to an event, opting in or opting out. - - If an event's status=unresponded, it is awaiting this call. - When this RPC is received, the VEN sends an eventResponse to - the VTN, indicating whether optIn or optOut has been chosen. - If an event remains unresponded for a set period of time, - it times out and automatically opts in to the event. - - Since this call causes a change in the event's status, it triggers - a PubSub call for the event update, as described above. - - @param event_id: (String) ID of an event. - @param opt_type: (Boolean) Whether to opt in to the event (default True). - """ - -get_events() -:: - - @RPC.export - def get_events(self, active_only=True, started_after=None, end_time_before=None): - """ - Return a list of events. - - By default, return only event requests with status=active or status=unresponded. - - If an event's status=active, a DR event is currently in progress. - - @param active_only: (Boolean) Default True. - @param started_after: (DateTime) Default None. - @param end_time_before: (DateTime) Default None. - @return: (JSON) A list of events -- see 'PubSub: event update'. - """ - -get_telemetry_parameters() -:: - - @RPC.export - def get_telemetry_parameters(self): - """ - Return the VEN's current set of telemetry parameters. - - @return: (JSON) Current telemetry parameters -- see 'PubSub: telemetry parameters update'. - """ - -set_telemetry_status() -:: - - @RPC.export - def set_telemetry_status(self, online, manual_override): - """ - Update the VEN's reporting status. - - @param online: (Boolean) Whether the VEN's resource is online. - @param manual_override: (Boolean) Whether resource control has been overridden. - """ - -report_telemetry() -:: - - @RPC.export - def report_telemetry(self, telemetry_values): - """ - Update the VEN's report metrics. - - Examples of telemetry_values are: - { - 'baseline_power_kw': '6.2', - 'current_power_kw': '6.145', - 'start_time': '2017-12-05 16:11:42.977298+00:00', - 'end_time': '2017-12-05 16:12:12.977298+00:00' - } - - @param telemetry_values: (JSON) Current value of each report metric. - """ diff --git a/docs/source/core_services/security/Key-Stores.rst b/docs/source/core_services/security/Key-Stores.rst deleted file mode 100644 index 33e9b6a9b6..0000000000 --- a/docs/source/core_services/security/Key-Stores.rst +++ /dev/null @@ -1,41 +0,0 @@ -.. _Key-Stores: - -========== -Key Stores -========== - -*Note: most VOLTTRON users should not need to directly interact with -agent key stores. These are notes for VOLTTRON platform developers. -This is not a stable interface and the implementation details are -subject to change.* - -Each agent has its own encryption key-pair that is used to -:ref:`authenticate` itself to the VOLTTRON -platform. A key-pair comprises a public key and a private (secret) key. -These keys are saved in a key store, which is implemented by the -:py:class:`KeyStore class`. -Each agent has its own key store. - -Key Store Locations -------------------- - -There are two main locations key stores will be saved. Installed agents' -key stores are in the the agent's data directory:: - - $VOLTTRON_HOME/agents///keystore.json - -Agents that are not installed, such as platform services and stand-alone -agents, store their key stores here:: - - $VOLTTRON_HOME/keystores//keystore.json - -Generating a Key Store ----------------------- - -Agents automatically retrieve keys from their key store unless -both the ``publickey`` and ``secretkey`` parameters are specified -when the agent is initialized. If an agent's key store does not exist -it will automatically be generated upon access. - -Users can generate a key pair by running the -``vctl auth keypair`` command. diff --git a/docs/source/core_services/security/Known-Hosts-File.rst b/docs/source/core_services/security/Known-Hosts-File.rst deleted file mode 100644 index ca6f705ab0..0000000000 --- a/docs/source/core_services/security/Known-Hosts-File.rst +++ /dev/null @@ -1,70 +0,0 @@ -.. _Known-Hosts-File: - -================ -Known Hosts File -================ - -Before an agent can connect to a VOLTTRON platform that agent must know the -platform's VIP address and public key (known as the *server key*). -It can be tedious to manually keep -track of server keys and match them with their corresponding -addresses. - -The purpose of the known-hosts file is to save a mapping of platform addresses -to server keys. This way the user only has to specify a server key one time. - -Saving a Server Key -------------------- - -Suppose a user wants to connect to a platform at ``192.168.0.42:22916``, and the -platform's public key is ``uhjbCUm3kT5QWj5Py9w0XZ7c1p6EP8pdo4Hq4dNEIiQ``. -To save this address-to-server-key association, the user can run:: - - volttron-ctl auth add-known-host --host 192.168.0.42:22916 --serverkey uhjbCUm3kT5QWj5Py9w0XZ7c1p6EP8pdo4Hq4dNEIiQ - -Now agents on this system will automatically use the correct server key when -connecting to the platform at ``192.168.0.42:22916``. - -Server Key for Local Platforms ------------------------------- - -When a platform starts it automatically adds its public key to the -known-hosts file. Thus agents connecting to the local VOLTTRON platform -(on the same system and using the same ``$VOLTTRON_HOME``) will automatically -be able to retrieve the platform's public key. - -Know-Host-File Details ----------------------- - -*Note: the following details regarding the known-hosts file are subject to -change. These notes are primarily for developers, but the may be helpful -if troubleshooting an issue.* **The known-hosts file should not be edited -directly.** - -File Location -~~~~~~~~~~~~~ - -The known-hosts-file is stored at ``$VOLTTRON_HOME/known_hosts``. - -File Contents -~~~~~~~~~~~~~ - -Here are the contents of an example known-hosts file: - -.. code:: JSON - - { - "@": "FSG7LHhy3v8tdNz3gK35G6-oxUcyln54pYRKu5fBJzU", - "127.0.0.1:22916": "FSG7LHhy3v8tdNz3gK35G6-oxUcyln54pYRKu5fBJzU", - "127.0.0.2:22916": "FSG7LHhy3v8tdNz3gK35G6-oxUcyln54pYRKu5fBJzU", - "127.0.0.1:12345": "FSG7LHhy3v8tdNz3gK35G6-oxUcyln54pYRKu5fBJzU", - "192.168.0.42:22916": "uhjbCUm3kT5QWj5Py9w0XZ7c1p6EP8pdo4Hq4dNEIiQ" - } - -The first four entries are for the local platform. (They were automatically -added when the platform started.) The first entry with the ``@`` key is for -IPC connections, and the entries with the ``127.0.0.*`` keys are for -local TCP connections. Note that a single VOLTTRON platform can bind to -multiple TCP addresses, and each address will be automatically added -to the known-hosts file. The last entry is for a remote VOLTTRON platform. -(It was added in the `Saving a Server Key`_ section.) diff --git a/docs/source/core_services/security/Protecting-Pub-Sub-Topics.rst b/docs/source/core_services/security/Protecting-Pub-Sub-Topics.rst deleted file mode 100644 index 7a89b2e217..0000000000 --- a/docs/source/core_services/security/Protecting-Pub-Sub-Topics.rst +++ /dev/null @@ -1,67 +0,0 @@ -.. _Protected-Topics: - -Protecting Pub/Sub Topics -========================= - -VIP :ref:`authorization ` enables -VOLTTRON platform owners to protect pub/sub topics. More -specifically, a platform owner can limit who can publish to a given -topic. This protects subscribers on that platform from receiving -messages (on the protected topic) from unauthorized agents. - -Example -------- - -To protect a topic, add the topic name to -``$VOLTTRON_HOME/protected_topics.json``. For example, the following -protected-topics file declares that the topic ``foo`` is protected: - -.. code:: JSON - - { - "write-protect": [ - {"topic": "foo", "capabilities": ["can_publish_to_foo"]} - ] - } - -**Note:** The capability name ``can_publish_to_foo`` is not special. It -can be any string, but it is easier to manage capabilities with -meaningful names. - -Now only agents with the capability ``can_publish_to_foo`` can publish -to the topic ``foo``. To add this capability to authenticated agents, -run ``vctl auth update`` (or ``volttron-ctl auth add`` for new -authentication entries), and enter ``can_publish_to_foo`` in the capabilities -field: - -.. code:: Bash - - capabilities (delimit multiple entries with comma) []: can_publish_to_foo - -Agents that have the ``can_publish_to_foo`` capabilites can publish to topic ``foo``. -That is, such agents can call: - -.. code:: Python - - self.vip.pubsub.publish('pubsub', 'foo', message='Here is a message') - -If unauthorized agents try to publish to topic ``foo`` they will get an exception: - -``to publish to topic "foo" requires capabilities ['can_publish_to_foo'], but capability list [] was provided`` - -Regular Expressions -------------------- - -Topic names in ``$VOLTTRON_HOME/protected_topics.json`` can be specified -as regular expressions. In order to use a regular expression, the topic name -must begin and end with a "/". For example: - -.. code:: JSON - - { - "write-protect": [ - {"topic": "/foo/*.*/", "capabilities": ["can_publish_to_foo"]} - ] - } - -This protects topics such as ``foo/bar`` and ``foo/anything``. diff --git a/docs/source/core_services/security/index.rst b/docs/source/core_services/security/index.rst deleted file mode 100644 index 57a072d05c..0000000000 --- a/docs/source/core_services/security/index.rst +++ /dev/null @@ -1,33 +0,0 @@ -================= -VOLTTRON Security -================= - -There are various security-related topics throughout VOLTTRON's documentation. -This is a quick roadmap for finding security documentation. - -A core component of VOLTTRON is its :ref:`message bus`. -The security of this message bus is crucial to the entire system. -The :ref:`VOLTTRON Interconnect Protocol` provides -communication over the message bus. VIP was built with security in mind -from the ground up. VIP uses encrypted channels and enforces agent -:ref:`authentication` by default for all network communication. -VIP's :ref:`authorization` mechanism allows system -administrators to limit agent capabilities with fine granularity. - -Even with these security mechanisms built into VOLTTRON, it is important -for system administrators to -:ref:`harden VOLTTRON's underlying OS`. - -The VOLTTRON team has engaged with PNNL's Secure Software Central team to create -a threat profile document. You can read about the threat assessment findings and -how the VOLTTRON team is addressing them here: `SSC Threat Profile -`_ - -Additional documentation related to VIP authentication and authorization -is available here: - -.. toctree:: - :glob: - :maxdepth: 1 - - * diff --git a/docs/source/core_services/security/running_agent_as_user.rst b/docs/source/core_services/security/running_agent_as_user.rst deleted file mode 100644 index 29e4c1b58c..0000000000 --- a/docs/source/core_services/security/running_agent_as_user.rst +++ /dev/null @@ -1,100 +0,0 @@ -.. _Running Agents as unique Unix user: - -============================== -Running Agents as unique users -============================== - -This VOLTTRON feature will cause the platform to create a new, unique Unix user(agent users) -on the host machine for each agent installed on the platform. This user will -have restricted permissions for the file system, and will be used to run the -agent process. The Unix user starting the VOLTTRON platform will be given -limited sudo access to create and delete agent users. - -Since this feature require system level changes (sudo access, user creation, file permission changes) the initial step -needs to be run as root or user with sudo access. This can be a user other than Unix user used to run volttron platform. -All files and folder created by VOLTTRON process in this mode would by default not have any access to others. -Permission for Unix group others would be provided to specific files and folder based on VOLTTRON process requirement. -It is recommended that you use a new volttron home to run volttron in secure mode. Converting a existing VOLTTRON -instance to secure mode is also possible but would involve some manual changes. Please see section -`Porting existing volttron home to secure mode`_. -Please note VOLTTRON has to be bootstrapped as prerequisite to running agents as unique users. - - -Setup agents to run using unique users ---------------------------------------- - -1. **This feature requires acl to be installed.** - - Make sure acl library is installed. If you are running on docker image acl might not be installed by default - - **apt-get install acl** - -2. Agents now run as a user different from VOLTTRON platform user. Agent users should have read and execute permissions - to all directories in the path to the python executable used by VOLTTRON. For example, if VOLTTRON is using a virtual - environment, then agent users should have read permissions to /bin/python and read - and execute permission to all the directories in the path /bin. This can be achieved by running - **chmod -R o+rx /bin** - -3. **Run scripts/secure_user_permissions.sh as root or using sudo** - - This script should be run as root or using sudo. This script gives the VOLTTRON platform user limited sudo access to - create a new unix user for each agent. All users created will be of the format volttron_. - - This script prompts for: - - a. **volttron platform user** - Unix user who would be running VOLTTRON platform. This should be an existing unix user. - On a development machine this could be the unix user you logged in as to check out VOLTTRON source - - b. **VOLTTRON_HOME directory** - The absolute path of volttron home directory. - - c. **Volttron instance name if VOLTTRON_HOME/config does not exist** - - - If VOLTTRON_HOME/config file exists instance name is got from config file. If not user will be prompted for - instance name. volttron_ must be a 23 characters or shorter containing only characters valid as Unix user names. - - This script will create necessary entries in /etc/sudoers.d/volttron to allow the volttron platform user to create - and delete agent users, Volttron agent group, and run any non-sudo command as agent users. - - This script will also create VOLTTRON_HOME and the config file if given a new volttron home directory when prompted. - -4. **Continue with VOLTTRON bootstrap and setup as normal** - point to the VOLTTRON_HOME that you provided in step 2. - -5. **On agent install (or agent start for existing agents)** - a unique agent user(Unix user) is created and the agent - is started as this user. The agent user name is recorded in USER_ID file under the agent install directory - (VOLTTRON_HOME/agents//USER_ID). Subsequent agent restarts will read content of USER_ID file and start - the agent process as that user. - -6. **On agent uninstall** - The agent user is deleted and the agent install directory is deleted. - -Creating new Agents -------------------- - -In this secure mode, agents will only have read write access to agent-data directory under the agent install -directory - VOLTTRON_HOME/agents///.agent-data. Attempting to write in any other -folder under VOLTTRON_HOME will result in permission errors. - -Changes to existing agents in secure mode ------------------------------------------ - -Due to the above change, **SQL historian has been modified to create its database by default under its agent-data directory** -if no path is given in the config file. If providing a path to the database in the config file, please provide a -directory where agent will have write access. This can be an external directory for which agent user (recorded in -VOLTTRON_HOME/agents//USER_ID) has read, write, and execute access. - - -Porting existing volttron home to secure mode ----------------------------------------------- - -When running scripts/secure_users_permissions.sh you will be prompted for a VOLTTRON_HOME directory. If this directory -exists and contains a volttron config file. The script will update the file locations and permissions of existing -volttron files including installed directories. However this step has the following limitations - -#. **You will NOT be able to revert to insecure mode once the changes are done.** Once setup is complete, changing the - config file manually to make parameter "secure-agent-users" to False, may result inconsistent volttron behavior -#. Volttron process and all agents have to be restarted to take effect. -#. **Agents can only to write to its own agent-data dir.** So if your agents writes to any directory outside - vhome/agents///agent-name.agent-data move existing files and update configuration such that - agent writes to agent-name.agent-data dir. For example, if you have SQLHistorian in writing .sqlite file to a - subdirectory under VOLTTRON_HOME that is not vhome/agents///agent-name.agent-data this needs - to be manually updated. - diff --git a/docs/source/core_services/service_agents/actuator/ActuatorAgentProgrammingNotes.rst b/docs/source/core_services/service_agents/actuator/ActuatorAgentProgrammingNotes.rst deleted file mode 100644 index f655a36ebf..0000000000 --- a/docs/source/core_services/service_agents/actuator/ActuatorAgentProgrammingNotes.rst +++ /dev/null @@ -1,46 +0,0 @@ -.. _ActuatorAgentProgrammingNotes: - -Notes on Working With the ActuatorAgent ---------------------------------------- - -- An agent can watch the window value from `device state - updates `__ to perform scheduled actions - within a timeslot. - - - If an Agent's Task is LOW\_PREEMPT priority it can watch for - `device state updates `__ where the window - is less than or equal to the grace period (default 60.0). - -- When considering if to schedule long or multiple short time slots on - a single device: - - - Do we need to ensure the device state for the duration between - slots? - - Yes. Schedule one long time slot instead. - - No. Is it all part of the same Task or can we break it up in case - there is a conflict with one of our time slots? - -- When considering time slots on multiple devices for a single Task: - - - Is the Task really dependent on all devices or is it actually - multiple Tasks? - -- When considering priority: - - - Does the Task have to happen **on an exact day**? - - No. Consider LOW and reschedule if preempted. - - Yes. Use HIGH. - - Is it problematic to prematurely stop a Task once started? - - No. Consider LOW\_PREEMPT and watch the `device state - updates `__ for a small window value. - - Yes. Consider LOW or HIGH. - -- If an agent is only observing but needs to assure that no another - Task is going on while taking readings it can schedule the time to - prevent other agents from messing with a devices state. The schedule - updates can be used as a reminder as to when to start watching. -- **Any** device, existing or not, can be scheduled. This allows for - agents to schedule fake devices to create reminders to start working - later rather then setting up their own internal timers and schedules. - - diff --git a/docs/source/core_services/service_agents/actuator/ActuatorConfig.rst b/docs/source/core_services/service_agents/actuator/ActuatorConfig.rst deleted file mode 100644 index 0c9e92cc83..0000000000 --- a/docs/source/core_services/service_agents/actuator/ActuatorConfig.rst +++ /dev/null @@ -1,23 +0,0 @@ -.. _ActuatorConfig: - -ActuatorAgent Configuration ---------------------------- - -| ``schedule_publish_interval:: Interval between ``\ ```published`` - ``schedule`` - - ``announcements`` `__\ `` in seconds. Defaults to 30.`` - -| ``preempt_grace_time:: Minimum time given to Tasks which have been preempted  - to clean up in seconds. Defaults to 60.`` -| ``schedule_state_file:: File used to save and restore Task states if the  - ActuatorAgent restarts for any reason. File will be created if it does not  - exist when it is needed.`` - -Sample configuration file -~~~~~~~~~~~~~~~~~~~~~~~~~ - -| { -| "schedule\_publish\_interval": 30, -| "schedule\_state\_file": "actuator\_state.pickle" -| } diff --git a/docs/source/core_services/service_agents/actuator/ActuatorHeartbeat.rst b/docs/source/core_services/service_agents/actuator/ActuatorHeartbeat.rst deleted file mode 100644 index d1ef00cdf5..0000000000 --- a/docs/source/core_services/service_agents/actuator/ActuatorHeartbeat.rst +++ /dev/null @@ -1,21 +0,0 @@ -.. _ActuatorHeartbeat: - -Heartbeat Signal ----------------- - -The ActuatorAgent can be configured to send a heartbeat message to the -device to indicate the platform is running. Ideally, if the heartbeat -signal is not sent the device should take over and resume normal -operation. - -The configuration has two parts, the interval (in seconds) for sending -the heartbeat and the specific point that should be modified each -iteration. - -The heart beat interval is specified with a global "heartbeat\_interval" -setting. The ActuatorAgent will automatically set the heartbeat point to -alternating "1" and "0" values. Changes to the heartbeat point will be -published like any other value change on a device. - -The heartbeat points are specified in the driver configuration file of -individual devices diff --git a/docs/source/core_services/service_agents/actuator/ActuatorSchedulePreemption.rst b/docs/source/core_services/service_agents/actuator/ActuatorSchedulePreemption.rst deleted file mode 100644 index 80cecd631e..0000000000 --- a/docs/source/core_services/service_agents/actuator/ActuatorSchedulePreemption.rst +++ /dev/null @@ -1,46 +0,0 @@ -.. _ActuatorSchedulePreemption: - -Task Preemption ---------------- - -Both LOW and LOW\_PREEMPT priority Tasks can be preempted. LOW priority -Tasks may be preempted by a conflicting HIGH priority Task before it -starts. LOW\_PREEMPT priority Tasks can be preempted by HIGH priority -Tasks even after they start. - -When a Task is preempted the ActuatorAgent will publish to -"devices/actuators/schedule/response" with the following header: - -:: - - #python - { - 'type': 'CANCEL_SCHEDULE', - 'requesterID': , - 'taskID': - } - -And the message (after parsing the json): - -:: - - #python - { - 'result': 'PREEMPTED', - 'info': '', - 'data': - { - 'agentID': , - 'taskID': - } - } - -Preemption Grace Time -~~~~~~~~~~~~~~~~~~~~~ - -If a LOW\_PREEMPT priority Task is preempted while it is running the -Task will be given a grace period to clean up before ending. For every -device which has a current time slot the window of remaining time will -be reduced to the grace time. At the end of the grace time the Task will -finish. If the Task has no currently open time slots on any devices it -will end immediately. diff --git a/docs/source/core_services/service_agents/actuator/ActuatorScheduleRequest.rst b/docs/source/core_services/service_agents/actuator/ActuatorScheduleRequest.rst deleted file mode 100644 index d22ed22464..0000000000 --- a/docs/source/core_services/service_agents/actuator/ActuatorScheduleRequest.rst +++ /dev/null @@ -1,122 +0,0 @@ -.. _ActuatorScheduleRequest: - -Requesting Schedule Changes ---------------------------- - -For information on responses see `AcutatorAgent responses to a schedule -or cancel requests. `__ - -**For 2.0 Agents using the pubsub interface: The actuator agent expects -all messages to be JSON and will parse them accordingly. Use -publish\_json to send messages where possible.** - -3.0 agents using pubsub for scheduling and setting point values should -publish python objects like normal. - -Scheduling a Task -~~~~~~~~~~~~~~~~~ - -An agent can request a task schedule by publishing to the -"devices/actuators/schedule/request" topic with the following header: - -:: - - #python - { - 'type': 'NEW_SCHEDULE', - 'requesterID': - 'taskID': , #The desired task ID for this task. It must be unique among all other scheduled tasks. - 'priority': , #The desired task priority, must be 'HIGH', 'LOW', or 'LOW_PREEMPT' - } - -with the following message: - -:: - - #python - [ - ["campus/building/device1", #First time slot. - "2013-12-06 16:00:00", #Start of time slot. - "2013-12-06 16:20:00"], #End of time slot. - ["campus/building/device1", #Second time slot. - "2013-12-06 18:00:00", #Start of time slot. - "2013-12-06 18:20:00"], #End of time slot. - ["campus/building/device2", #Third time slot. - "2013-12-06 16:00:00", #Start of time slot. - "2013-12-06 16:20:00"], #End of time slot. - #etc... - ] - -.. warning:: - - If time zones are not included in schedule requests then the Actuator will - interpret them as being in local time. This may cause remote interaction - with the actuator to malfunction. - -Points on Task Scheduling -^^^^^^^^^^^^^^^^^^^^^^^^^ - -- Everything in the header is required. -- Task id and requester id (agentid) should be a non empty value of - type string -- A Task schedule must have at least one time slot. -- The start and end times are parsed with `dateutil's date/time - parser `__. - **The default string representation of a python datetime object will - parse without issue.** -- Two Tasks are considered conflicted if at least one time slot on a - device from one task overlaps the time slot of the other on the same - device. -- The end time of one time slot can be the same as the start time of - another time slot for the same device. This will not be considered a - conflict. For example, time\_slot1(device0, time1, **time2**) and - time\_slot2(device0,\ **time2**, time3) are not considered a conflict -- A request must not conflict with itself. -- If something goes wrong see `this failure string - list `__ for an explanation - of the error. - -Task Priorities -^^^^^^^^^^^^^^^ - -HIGH: - This Task cannot be preempted under any circumstance.  - This task may preempt other conflicting preemptable Tasks. - -LOW: - This Task cannot be preempted \ **once it has started**\ .  - A Task is considered started once the earliest time slot on any device  - has been reached. This Task may **not** preempt other Tasks. - -LOW\_PREEMPT: - This Task may be preempted at any time. If the Task is preempted  - once it has begun running any current time slots will be given a grace period  - (configurable in the ActuatorAgent configuration file, defaults to 60 seconds) - before being revoked. This Task may **not** preempt other Tasks. - -Canceling a Task -~~~~~~~~~~~~~~~~ - -A task may be canceled by publishing to the -"devices/actuators/schedule/request" topic with the following header: - -:: - - #python - { - 'type': 'CANCEL_SCHEDULE', - 'requesterID': - 'taskID': , #The desired task ID for this task. It must be unique among all other scheduled tasks. - } - -Points on Task Canceling -^^^^^^^^^^^^^^^^^^^^^^^^ - -- The requesterID and taskID must match the original values from the - original request header. -- After a Tasks time has passed there is no need to cancel it. Doing so - will result in a "TASK\_ID\_DOES\_NOT\_EXIST" error. -- If something goes wrong see `this failure string - list `__ for an explanation - of the error. - diff --git a/docs/source/core_services/service_agents/actuator/ActuatorScheduleResponse.rst b/docs/source/core_services/service_agents/actuator/ActuatorScheduleResponse.rst deleted file mode 100644 index 3109f09d1c..0000000000 --- a/docs/source/core_services/service_agents/actuator/ActuatorScheduleResponse.rst +++ /dev/null @@ -1,94 +0,0 @@ -.. _ActuatorScheduleResponse: - -ActuatorAgent Response ----------------------- - -In response to a `Task schedule request `__ the -ActuatorAgent will respond on the topic -"devices/actuators/schedule/result" with the header: - -:: - - #python - { - 'type': <'NEW_SCHEDULE', 'CANCEL_SCHEDULE'> - 'requesterID': , - 'taskID': - } - -And the message (after parsing the json): - -:: - - #python - { - 'result': <'SUCCESS', 'FAILURE', 'PREEMPTED'>, - 'info': , - 'data': - } - -The ActuatorAgent may publish cancellation notices for preempted Tasks -using the "PREEMPTED" result. - -Preemption Data -~~~~~~~~~~~~~~~ - -Preemption data takes the form: - -:: - - #python - { - 'agentID': , - 'taskID': - } - -Failure Reasons -~~~~~~~~~~~~~~~ - -In many cases the ActuatorAgent will try to give good feedback as to why -a request failed. - -General Failures -^^^^^^^^^^^^^^^^ - -| ``INVALID_REQUEST_TYPE:: Request type was not "NEW_SCHEDULE" or "CANCEL_SCHEDULE".`` -| ``MISSING_TASK_ID:: Failed to supply a taskID.`` -| ``MISSING_AGENT_ID:: AgentID not supplied.`` - -Task Schedule Failures -^^^^^^^^^^^^^^^^^^^^^^ - -| ``TASK_ID_ALREADY_EXISTS: The supplied taskID already belongs to an existing task.`` -| ``MISSING_PRIORITY: Failed to supply a priority for a Task schedule request.`` -| ``INVALID_PRIORITY: Priority not one of "HIGH", "LOW", or "LOW_PREEMPT".`` -| ``MALFORMED_REQUEST_EMPTY: Request list is missing or empty.`` -| ``REQUEST_CONFLICTS_WITH_SELF: Requested time slots on the same device overlap.`` - ``MALFORMED_REQUEST: Reported when the request parser raises an unhandled exception. The exception name and info are appended to this info string.`` - ``CONFLICTS_WITH_EXISTING_SCHEDULES: This schedule conflict with an existing schedules that it cannot preempt. The data item for the results will contain info about the conflicts in this form (after parsing json):`` - -:: - - #python - { - '': - { - '': - [ - ["campus/building/device1", - "2013-12-06 16:00:00", - "2013-12-06 16:20:00"], - ["campus/building/device1", - "2013-12-06 18:00:00", - "2013-12-06 18:20:00"] - ] - '':[...] - } - '': {...} - } - -Task Cancel Failures -^^^^^^^^^^^^^^^^^^^^ - -``TASK_ID_DOES_NOT_EXIST:: Trying to cancel a Task which does not exist. This error can also occur when trying to cancel a finished Task.`` -``AGENT_ID_TASK_ID_MISMATCH:: A different agent ID is being used when trying to cancel a Task.`` diff --git a/docs/source/core_services/service_agents/actuator/ActuatorScheduleState.rst b/docs/source/core_services/service_agents/actuator/ActuatorScheduleState.rst deleted file mode 100644 index bda8f84b2c..0000000000 --- a/docs/source/core_services/service_agents/actuator/ActuatorScheduleState.rst +++ /dev/null @@ -1,28 +0,0 @@ -.. _ActuatorScheduleState: - -Schedule State Broadcast ------------------------- - -Periodically the ActuatorAgent will publish the state of all currently -used devices. - -For each device the ActuatorAgent will publish to an associated topic: - -:: - - #python - 'devices/actuators/schedule/announce/' - -With the following header: - -:: - - #python - { - 'requesterID': , - 'taskID': - 'window': - } - -The frequency of the updates is configurable with the -"schedule\_publish\_interval" setting. diff --git a/docs/source/core_services/service_agents/actuator/ActuatorValueRequest.rst b/docs/source/core_services/service_agents/actuator/ActuatorValueRequest.rst deleted file mode 100644 index aa0020cca7..0000000000 --- a/docs/source/core_services/service_agents/actuator/ActuatorValueRequest.rst +++ /dev/null @@ -1,113 +0,0 @@ -.. _ActuatorValueRequest: - -ActuatorAgent Interaction -------------------------- - -Once an Task has been scheduled and the time slot for one or more of the -devices has started an agent may interact with the device using the -**get** and **set** topics. - -Both **get** and **set** are responded to the same way. See -[#ActuatorReply Actuator Reply] below. - -Getting values -~~~~~~~~~~~~~~ - -While the sMap driver for a device should always be setup to -periodically broadcast the state of a device you may want an up to the -moment value for an actuation point on a device. - -To request a value publish a message to the following topic: - -:: - - #python - 'devices/actuators/get//' - -Setting Values -~~~~~~~~~~~~~~ - -Value are set in a similar manner: - -To set a value publish a message to the following topic: - -:: - - #python - 'devices/actuators/set//' - -With this header: - -:: - - #python - { - 'requesterID': - } - -And the message contents being the new value of the actuator. - -**The actuator agent expects all messages to be JSON and will parse them -accordingly. Use publish\_json to send messages where possible. This is -significant for Boolean values especially.** - -Actuator Reply -~~~~~~~~~~~~~~ - -#ActuatorReply The ActuatorAgent will reply to both **get** and *set*' -on the **value** topic for an actuator: - -:: - - #python - 'devices/actuators/value//' - -With this header: - -:: - - #python - { - 'requesterID': - } - -With the message containing the value encoded in JSON. - -Actuator Error Reply -~~~~~~~~~~~~~~~~~~~~ - -If something goes wrong the ActuatorAgent will reply to both **get** and -*set*' on the **error** topic for an actuator: - -:: - - #python - 'devices/actuators/error//' - -With this header: - -:: - - #python - { - 'requesterID': - } - -The message will be in the following form: - -:: - - #python - { - 'type': - 'value': - } - -Common Error Types -^^^^^^^^^^^^^^^^^^ - -| ``LockError:: Returned when a request is made when we do not have permission to use a device. (Forgot to schedule, preempted and we did not handle the preemption message correctly, ran out of time in time slot, etc...)`` -| ``ValueError:: Message missing or could not be parsed as JSON.`` - -Other error types involve problem with communication between the -ActuatorAgent and sMap. diff --git a/docs/source/core_services/service_agents/actuator/index.rst b/docs/source/core_services/service_agents/actuator/index.rst deleted file mode 100644 index fb721625f3..0000000000 --- a/docs/source/core_services/service_agents/actuator/index.rst +++ /dev/null @@ -1,40 +0,0 @@ -.. _ActuatorAgent: - -ActuatorAgent -============= - -This agent is used to manage write access to devices. Agents -may request scheduled times, called Tasks, to interact with one or more -devices. - -Actuator Agent Communication ----------------------------- - -:doc:`Scheduling and canceling a Task. ` - -:doc:`Interacting with a device via the -ActuatorAgent. ` - -:doc:`AcutatorAgent responses to a schedule or cancel -request. ` - -:doc:`Schedule state announcements. ` - -:doc:`What happens when a running Task is -preempted. ` - -:doc:`Setup heartbeat signal for a device. ` - -:doc:`ActuatorAgent configuration. ` - -:doc:`Notes on programming agents to work with the -ActuatorAgent ` - - - - -.. toctree:: - :glob: - :maxdepth: 2 - - * diff --git a/docs/source/core_services/service_agents/central_management/VOLTTRON-Central.rst b/docs/source/core_services/service_agents/central_management/VOLTTRON-Central.rst deleted file mode 100644 index aff2727a1b..0000000000 --- a/docs/source/core_services/service_agents/central_management/VOLTTRON-Central.rst +++ /dev/null @@ -1,115 +0,0 @@ -.. _VOLTTRON-Central: - -VOLTTRON Central Management Agent -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -Agent Introduction -================== - -The VOLTTRON Central Agent (VCM) is responsible for controlling multiple -VOLTTRON instances through a single interfaces. The VOLTTRON instances -can be either local or remote. VCM leverages an internal VOLTTRON web server -providing a interface to our JSON-RPC based web api. Both the web api and -the interface are served through the VCM agent. There is a :ref:`VOLTTRON Central Demo ` that will allow you to quickly setup and see the current offerings of the interface. -VOLTTRON Central will allow you to - -- See a list of platforms being managed. -- Add and remove platforms. -- Install, start and stop agents to the registered platforms. -- Create dynamic graphs from the historians based upon points. -- Execute functions on remote platforms. - -.. note:: - - see :ref:`VCM json-rpc web api ` for how the web interface - works. - -Instance Configuration -====================== - -In order for any web agent to be enabled, there must be a port configured to -serve the content. The easiest way to do this is to create a config file in -the root of your VOLTTRON_HOME directory. ( to do this automatically see :ref:`VOLTTRON Config ` ) - -The following is an example of the configuration file - -:: - - [volttron] - vip-addres=tcp://127.0.0.1:22916 - bind-web-address=http://127.0.0.1:8080/vc/ - -** Note the above configuration will open a discoverable port for the volttron - instance. In addition, the opening of this web address allows you to serve - both static as well as dynamic pages. - -Verify that the instance is serving properly by pointing your web browser to - -:: - - http://127.0.0.1:8080/discovery/ - -This is the required information for a VolttronCentralPlatform to be able to -be managed. - -VOLTTRON Central Manager Configuration -====================================== -The following is the default configuration file for VOLTTRON Central - -:: - - { - # The agentid is used during display on the VOLTTRON central platform - # it does not need to be unique. - "agentid": "volttron central", - - # Authentication for users is handled through a naive password algorithm - # Note in the following example the user and password are both admin. - - # DO NOT USE IN PRODUCTION ENVIRONMENT! - - # import hashlib - # hashlib.sha512(password).hexdigest() where password is the plain text password. - "users" : { - "reader" : { - "password" : "2d7349c51a3914cd6f5dc28e23c417ace074400d7c3e176bcf5da72fdbeb6ce7ed767ca00c6c1fb754b8df5114fc0b903960e7f3befe3a338d4a640c05dfaf2d", - "groups" : [ - "reader" - ] - }, - "writer" : { - "password" : "f7c31a682a838bbe0957cfa0bb060daff83c488fa5646eb541d334f241418af3611ff621b5a1b0d327f1ee80da25e04099376d3bc533a72d2280964b4fab2a32", - "groups" : [ - "writer" - ] - }, - "admin" : { - "password" : "c7ad44cbad762a5da0a452f9e854fdc1e0e7a52a38015f23f3eab1d80b931dd472634dfac71cd34ebc35d16ab7fb8a90c81f975113d6c7538dc69dd8de9077ec", - "groups" : [ - "admin" - ] - }, - "dorothy" : { - "password" : "cf1b67402d648f51ef6ff8805736d588ca07cbf018a5fba404d28532d839a1c046bfcd31558dff658678b3112502f4da9494f7a655c3bdc0e4b0db3a5577b298", - "groups" : [ - "reader, writer" - ] - } - } - } - -Agent Execution -=============== - -To start VOLTTRON Central first make sure the -`VOLTTRON instance is running <../../../devguides/eclipse/Eclipse-Dev-Environment.html#execute-volttron-through-shell>`__ -Next create/choose the config file to use. Finally from an activated -shell in the root of the VOLTTRON repository execute - -:: - - # Arguments are package to execute, config file to use, tag to use as reference - ./scripts/core/pack_install.sh services/core/VolttronCentral services/core/VolttronCentral/config vc - - # Start the agent - vctl start --tag vc diff --git a/docs/source/core_services/service_agents/central_management/index.rst b/docs/source/core_services/service_agents/central_management/index.rst deleted file mode 100644 index 73997edec5..0000000000 --- a/docs/source/core_services/service_agents/central_management/index.rst +++ /dev/null @@ -1,9 +0,0 @@ -=========================== -VOLTTRON Central Management -=========================== - -.. toctree:: - :glob: - :maxdepth: 2 - - * diff --git a/docs/source/core_services/service_agents/datamover/datamover.rst b/docs/source/core_services/service_agents/datamover/datamover.rst deleted file mode 100644 index fa46b06b64..0000000000 --- a/docs/source/core_services/service_agents/datamover/datamover.rst +++ /dev/null @@ -1,57 +0,0 @@ -.. _DataMover_Historian: - -=================== -DataMover Historian -=================== - -The DataMover Historian is used to send data from one instance of VOLTTRON to -another. This agent is similar to the Forward Historian but does not publish -data on the target platform's message bus. Messages are instead inserted into -the backup queue in the target's historian. This helps to ensure that messages -are recorded. - -If the target instance becomes unavailable or the target historian is stopped -then this agent's cache will build up until it reaches it's maximum capacity -or the instance and agent comes back online. - -The DataMover now uses the configuration store for storing its -configurations. This allows dynamic updating of configuration without having -to rebuild the agent. - -Configuration Options ---------------------- - -The following JSON configuration file shows all the options currently supported -by the DataMover agent. - -.. code-block:: python - - { - # destination-serverkey - # The destination instance's publickey. Required if the - # destination-vip-address has not been added to the known-host file. - # See vctl auth --help for all instance security options. - # - # This can be retrieved either through the command: - # vctl auth serverkey - # Or if the web is enabled on the destination through the browser at: - # http(s)://hostaddress:port/discovery/ - "destination-serverkey": null, - - # destination-vip-address - REQUIRED - # Address of the target platform. - # Examples: - # "destination-vip": "ipc://@/home/volttron/.volttron/run/vip.socket" - # "destination-vip": "tcp://127.0.0.1:23916" - "destination-vip": "tcp://:", - - # destination_historian_identity - # Identity of the historian to send data to. Only needed if data - # should be sent an agent other than "platform.historian" - "destination-historian-identity": "platform.historian", - - # remote_identity - OPTIONAL - # identity that will show up in peers list on the remote platform - # By default this identity is randomly generated - "remote-identity": "22916.datamover" - } diff --git a/docs/source/core_services/service_agents/datamover/index.rst b/docs/source/core_services/service_agents/datamover/index.rst deleted file mode 100644 index 4228e73d2a..0000000000 --- a/docs/source/core_services/service_agents/datamover/index.rst +++ /dev/null @@ -1,9 +0,0 @@ -================ -Data Mover Agent -================ - -.. toctree:: - :glob: - :maxdepth: 2 - - * diff --git a/docs/source/core_services/service_agents/emailer/index.rst b/docs/source/core_services/service_agents/emailer/index.rst deleted file mode 100644 index 3e32da90bb..0000000000 --- a/docs/source/core_services/service_agents/emailer/index.rst +++ /dev/null @@ -1,9 +0,0 @@ -============= -Emailer Agent -============= - -.. toctree:: - :glob: - :maxdepth: 2 - - * diff --git a/docs/source/core_services/service_agents/externaldata/index.rst b/docs/source/core_services/service_agents/externaldata/index.rst deleted file mode 100644 index a1552724fd..0000000000 --- a/docs/source/core_services/service_agents/externaldata/index.rst +++ /dev/null @@ -1,9 +0,0 @@ -=================== -External Data Agent -=================== - -.. toctree:: - :glob: - :maxdepth: 2 - - * diff --git a/docs/source/core_services/service_agents/failover/index.rst b/docs/source/core_services/service_agents/failover/index.rst deleted file mode 100644 index 283ff53654..0000000000 --- a/docs/source/core_services/service_agents/failover/index.rst +++ /dev/null @@ -1,9 +0,0 @@ -============== -Failover Agent -============== - -.. toctree:: - :glob: - :maxdepth: 2 - - * diff --git a/docs/source/core_services/service_agents/file_watch_publisher/index.rst b/docs/source/core_services/service_agents/file_watch_publisher/index.rst deleted file mode 100644 index fd5d85fd89..0000000000 --- a/docs/source/core_services/service_agents/file_watch_publisher/index.rst +++ /dev/null @@ -1,9 +0,0 @@ -=========================== -File Watch Publisher Agent -=========================== - -.. toctree:: - :glob: - :maxdepth: 2 - - * diff --git a/docs/source/core_services/service_agents/index.rst b/docs/source/core_services/service_agents/index.rst deleted file mode 100644 index 2c255279c4..0000000000 --- a/docs/source/core_services/service_agents/index.rst +++ /dev/null @@ -1,19 +0,0 @@ -============== -Service Agents -============== - -.. toctree:: - :glob: - :maxdepth: 2 - - actuator/index - alerter/index - emailer/index - failover/index - file_watch_publisher/index - platform/index - market_service/index - threshold/index - central_management/index - weather/index - * diff --git a/docs/source/core_services/service_agents/market_service/index.rst b/docs/source/core_services/service_agents/market_service/index.rst deleted file mode 100644 index c62f5e35c4..0000000000 --- a/docs/source/core_services/service_agents/market_service/index.rst +++ /dev/null @@ -1,9 +0,0 @@ -==================== -Market Service Agent -==================== - -.. toctree:: - :glob: - :maxdepth: 2 - - * diff --git a/docs/source/core_services/service_agents/platform/Platform-Agent.rst b/docs/source/core_services/service_agents/platform/Platform-Agent.rst deleted file mode 100644 index b2d44094cf..0000000000 --- a/docs/source/core_services/service_agents/platform/Platform-Agent.rst +++ /dev/null @@ -1,28 +0,0 @@ -.. _Platform-Agent: - -Platform Agent -~~~~~~~~~~~~~~ - -Introduction -============ - -The Platform Agent allows communication from a VOLTTRON Central -instance. Each VOLTTRON instance that is to be controlled through the -VOLTTRON Central agent should have one and only one Platform Agent. The -Platform Agent must have the VIP identity of platform.agent. - -Configuration -------------- - -The minimal configuration (and most likely the only used) for a Platform -Agent is as follows - -:: - - { - # Agent id is used in the display on volttron central. - "agentid": "Platform 1", - } - -The other options for the Platform Agent configuration can be found in -the Platform Agent source directory. diff --git a/docs/source/core_services/service_agents/platform/index.rst b/docs/source/core_services/service_agents/platform/index.rst deleted file mode 100644 index ea5b1fe283..0000000000 --- a/docs/source/core_services/service_agents/platform/index.rst +++ /dev/null @@ -1,9 +0,0 @@ -============== -Platform Agent -============== - -.. toctree:: - :glob: - :maxdepth: 2 - - * diff --git a/docs/source/core_services/service_agents/sysmon/index.rst b/docs/source/core_services/service_agents/sysmon/index.rst deleted file mode 100644 index 625baed63b..0000000000 --- a/docs/source/core_services/service_agents/sysmon/index.rst +++ /dev/null @@ -1,9 +0,0 @@ -======================= -System Monitoring Agent -======================= - -.. toctree:: - :glob: - :maxdepth: 2 - - * diff --git a/docs/source/core_services/service_agents/threshold/index.rst b/docs/source/core_services/service_agents/threshold/index.rst deleted file mode 100644 index 86843a54a5..0000000000 --- a/docs/source/core_services/service_agents/threshold/index.rst +++ /dev/null @@ -1,9 +0,0 @@ -========================= -Threshold Detection Agent -========================= - -.. toctree:: - :glob: - :maxdepth: 2 - - * diff --git a/docs/source/core_services/service_agents/topic_watcher/index.rst b/docs/source/core_services/service_agents/topic_watcher/index.rst deleted file mode 100644 index 46f6af7a7f..0000000000 --- a/docs/source/core_services/service_agents/topic_watcher/index.rst +++ /dev/null @@ -1,9 +0,0 @@ -=================== -Topic Watcher Agent -=================== - -.. toctree:: - :glob: - :maxdepth: 2 - - * diff --git a/docs/source/deploying-volttron/bootstrap-process.rst b/docs/source/deploying-volttron/bootstrap-process.rst new file mode 100644 index 0000000000..974327c7ee --- /dev/null +++ b/docs/source/deploying-volttron/bootstrap-process.rst @@ -0,0 +1,108 @@ +.. _Bootstrap-Process: + +================= +Bootstrap Process +================= + +The `bootstrap.py` Python script in the root directory of the VOLTTRON repository may be used to create +VOLTTRON's Python virtual environment and install or update service agent dependencies. + +The first running of `bootstrap.py` will be against the systems `python3` executable. During this initial step a +virtual environment is created using the `venv` module. Additionally, all requirements for running a base volttron +instance are installed. A user can specify additional arguments to the `bootstrap.py` script allowing a way to +quickly install dependencies for service agents (e.g. bootstrap.py --mysql). + +.. code-block:: bash + + # boostrap with additional dependency requirements for web enabled agents. + user@machine$ python3 bootstrap.py --web + +After activating an environment (source env/bin/activate) one can use the `bootstrap.py` script to install more +service agent dependencies by executing the same boostrap.py command. + +.. note:: + + In the following example one can tell the environment is activated based upon the (volttron) prefix to the + command prompt + +.. code-block:: bash + + # Adding additional database requirement for crate + (volttron) user@machine$ python3 bootstrap.py --crate + +If a fresh install is necessary one can use the --force argument to rebuild the virtual environment from scratch. + +.. code-block:: bash + + # Rebuild the environment from the system's python3 + user@machine$ python3 bootstrap.py --force + +.. note:: + + Multiple options can be specified on the command line `python3 bootstrap.py --web --crate` installs + dependencies for web enabled agents as well as the Crate database historian. + +Bootstrap Options +================= + +The `bootstrap.py` script takes several options that allow customization of the environment, installing and +update packages, and setting the package locations. The following sections can be reproduced by executing: + +.. code-block:: bash + + # Show the help output from bootstrap.py + user@machine$ python3 bootstrap --help + +The options for customizing the location of the virtual environment are as follows. + +.. code-block:: bash + + --envdir VIRTUAL_ENV alternate location for virtual environment + --force force installing in non-empty directory + -o, --only-virtenv create virtual environment and exit (skip install) + --prompt PROMPT provide alternate prompt in activated environment + (default: volttron) + +Additional options are available for customizing where an environment will retrieve packages and/or upgrade +existing packages installed. + +.. code-block:: bash + + update options: + --offline install from cache without downloading + -u, --upgrade upgrade installed packages + -w, --wheel build wheels in the pip wheelhouse + +To help boostrap an environment in the shortest number of steps we have grouped dependency packages under named +collections. For example, the --web argument will install six different packages from a single call to +boostrap.py --web. The following collections are available to use. + +.. code-block:: bash + + ... + + Extra packaging options: + --all All dependency groups. + --crate Crate database adapter + --databases All of the databases (crate, mysql, postgres, etc). + --dnp3 Dependencies for the dnp3 agent. + --documentation All dependency groups to allow generation of documentation without error. + --drivers All drivers known to the platform driver. + --influxdb Influx database adapter + --market Base market agent dependencies + --mongo Mongo database adapter + --mysql Mysql database adapter + --pandas Pandas numerical analysis tool + --postgres Postgres database adapter + --testing A variety of testing tools for running unit/integration tests. + --web Packages facilitating the building of web enabled agents. + --weather Packages for the base weather agent + + rabbitmq options: + --rabbitmq [RABBITMQ] + install rabbitmq server and its dependencies. optional + argument: Install directory that exists and is + writeable. RabbitMQ server will be installed in a + subdirectory.Defaults to /home/osboxes/rabbitmq_server + + ... diff --git a/docs/source/deploying-volttron/deployment-planning-options.rst b/docs/source/deploying-volttron/deployment-planning-options.rst new file mode 100644 index 0000000000..3addbb9241 --- /dev/null +++ b/docs/source/deploying-volttron/deployment-planning-options.rst @@ -0,0 +1,282 @@ +.. _Planning-Deployments: + +===================== +Planning a Deployment +===================== + +The 3 major installation types for VOLTTRON are doing development, doing research using VOLTTRON, and +collecting and managing physical devices. + +Development and Research installation tend to be smaller footprint installations. For development, the +data is usually synthetic or copied from another source. The existing documentation covers development +installs in significant detail. + +Other deployments will have a better installation experience if they consider certain kinds of questions +while they plan their installation. + + +Questions +========= + + * Do you want to send commands to the machines ? + * Do you want to store the data centrally ? + * How many machines do you expect to collect data from on each "collector" ? + * How often will the machines collect data ? + * Are all the devices visible to the same network ? + * What types of VOLTTRON applications do you want to run ? + + +Commands +-------- + +If you wish to send commands to the devices, you will want to install and configure the Volttron Central +agent. If you are only using VOLTTRON to securely collect the data, you can turn off the extra agents +to reduce the footprint. + + +Storing Data +------------ + +VOLTTRON supports multiple historians. MySQL and MongoDB are the most commonly used. As you plan your +installation, you should consider how quickly you need access to the data and where. If you are looking +at the health and well-being of an entire suite of devices, its likely that you want to do that from a +central location. Analytics can be performed at the edge by VOLTTRON applications or can be performed +across the data usually from a central data repository. The latency that you can tolerate in your data +being available will also determine choices in different agents (ForwardHistorian versus Data Mover) + + +How Many +-------- + +The ratio of how many devices-to-collector machine is based on several factors. These include: + + * how much memory and network bandwidth the collection machine has. More = More devices + * how fast the local storage is can affect how fast the data cache can be written. Very slow + storage devices can fall behind + +The second half of the "how many" question is how many collector platforms are writing to a single +VOLTTRON platform to store data - and whether that storage is local, remote, big enough, etc. + +If you are storing more than moderate amount of data, you will probably benefit from installing +your database on a different machine than your concrete historian machine. + +.. note:: + + This is contra-indicated if you have a slow network connection between you concrete historian and your database + machine. + +In synthetic testing up to 6 virtual machines hosting 500 devices each (18 points) were easily +supported by a single centralized platform writing to a Mongo database - using a high speed network. +That central platform experienced very little CPU or memory load when the VOLTTRON Central agent was disabled. + + +How Often +--------- + +This question is closely related to the last. A higher sampling frequency will create more data. This +will place more work in the storage phase. + + +Networks +-------- + +In many cases, there are constraints on how networks can interact with each other. In many cases, +these include security considerations. On some sites, the primary network will be protected from less +secure networks and may require different installation considerations. For example, if a data collector +machine and the database machine are on the same network with sufficient security, you may choose +to have the data collector write directly to the database. If the collector is on an isolated building +network then you will likely need to use the ForwardHistorian to bridge the two networks. + + +Other Considerations +-------------------- + +Physical location and maintenance of collector machines must be considered in all live deployments. +Although the number of data points may imply a heavy load on a data collection box, the physical constraints +may limit the practicality of having more than a single box. The other side of that discussion is deploying +many collector boxes may be simpler initially, but may create a maintenance challenge if you don't +plan ahead on how you apply patches, etc. + +Naming conventions should also be considered. The ability to trace data through the system and identify +the collector machine and device can be invaluable in debugging and analysis. + + +.. _Deployment-Options: + +Deployment Options +================== + +There are several ways to deploy the VOLTTRON platform in a Linux environment. It is up to the user to determine which +is right for them. The following assumes that the platform has already been bootstrapped and is ready to run. + + +Simple Command Line +------------------- + +With the VOLTTRON environment activated the platform can be started simply by running VOLTTRON on the command +line. + +:: + + $volttron -vv + +This will start the platform in the current terminal with very verbose logging turned on. This +is most appropriate for testing Agents or testing a deployment for problems before switching to a +more long term solution. This will print all log messages to the console in real time. + +This should not be used for long term deployment. As soon as an SSH session is terminated for whatever reason +the processes attached to that session will be killed. This also will not capture log message to a file. + + +Running VOLTTRON as a Background Process +---------------------------------------- + +A simple, more long term solution, is to run volttron in the background and disown it from the current terminal. + +.. warning:: + If you plan on running VOLTTRON in the background and detaching it from the + terminal with the ``disown`` command be sure to redirect stderr and stdout to ``/dev/null``. + Even if logging to a file is used some libraries which VOLTTRON relies on output + directly to stdout and stderr. This will cause problems if those file descriptors + are not redirected to ``/dev/null``. + +.. code-block:: bash + + $volttron -vv -l volttron.log > /dev/null 2>&1& + +Alternatively: + +.. code-block:: bash + + ``./start-volttron`` + +.. note:: + + If you are not in an activated environment, this script will start the platform running in the background in the + correct environment, however the environment will not be activated for you, you must activate it yourself. + +**If there are other jobs running in your terminal be sure to disown the correct one.** + +.. code-block:: console + + $jobs + [1]+ Running something else + [2]+ Running ./start-volttron + + #Disown VOLTTRON + $disown %2 + +This will run the VOLTTRON platform in the background and turn it into a daemon. The log output will be directed +to a file called ``volttron.log`` in the current directory. + +To keep the size of the log under control for more longer term deployments us the rotating log configuration file +``examples/rotatinglog.py``. + +.. code-block:: bash + + $volttron -vv --log-config examples/rotatinglog.py > /dev/null 2>&1& + +This will start a rotate the log file at midnight and limit the total log data to seven days worth. + +The main downside to this approach is that the VOLTTRON platform will not automatically +resume if the system is restarted. It will need to be restarted manually after reboot. + +.. _system service setup: + +Setting up VOLTTRON as a System Service +--------------------------------------- + + +Systemd +^^^^^^^ + +An example service file ``scripts/admin/volttron.service`` for systemd cas be used as a starting point +for setting up VOLTTRON as a service. Note that as this will redirect all the output that would +be going to stdout - to the syslog. This can be accessed using `journalctl`. For systems that run +all the time or have a high level of debugging turned on, we recommend checking the system's +logrotate settings. + +.. code-block:: console + + [Unit] + Description=VOLTTRON Platform Service + After=network.target + + [Service] + Type=simple + + #Change this to the user that VOLTTRON will run as. + User=volttron + Group=volttron + + #Uncomment and change this to specify a different VOLTTRON_HOME + #Environment="VOLTTRON_HOME=/home/volttron/.volttron" + + #Change these to settings to reflect the install location of VOLTTRON + WorkingDirectory=/var/lib/volttron + ExecStart=/var/lib/volttron/env/bin/volttron -vv + ExecStop=/var/lib/volttron/env/bin/volttron-ctl shutdown --platform + + + [Install] + WantedBy=multi-user.target + +After the file has been modified to reflect the setup of the platform you can install it with the +following commands. These need to be run as root or with sudo as appropriate. + +.. code-block:: console + + #Copy the service file into place + cp scripts/admin/volttron.service /etc/systemd/system/ + + #Set the correct permissions if needed + chmod 644 /etc/systemd/system/volttron.service + + #Notify systemd that a new service file exists (this is crucial!) + systemctl daemon-reload + + #Start the service + systemctl start volttron.service + + +Init.d +^^^^^^ + +An example init script ``scripts/admin/volttron`` can be used as a starting point for +setting up VOLTTRON as a service on init.d based systems. + +Minor changes may be needed for the file to work on the target system. Specifically +the ``USER``, ``VLHOME``, and ``VOLTTRON_HOME`` variables may need to be changed. + +.. code-block:: console + + ... + #Change this to the user VOLTTRON will run as. + USER=volttron + #Change this to the install location of VOLTTRON + VLHOME=/var/lib/volttron + + ... + + #Uncomment and change this to specify a different VOLTTRON_HOME + #export VOLTTRON_HOME=/home/volttron/.volttron + + +The script can be installed with the following commands. These need to be run as root or with `sudo` as appropriate. + +.. code-block:: console + + #Copy the script into place + cp scripts/admin/volttron /etc/init.d/ + + #Make the file executable + chmod 755 /etc/init.d/volttron + + #Change the owner to root + chown root:root /etc/init.d/volttron + + #These will set it to startup automatically at boot + update-rc.d volttron defaults + + #Start the service + /etc/init.d/volttron start diff --git a/docs/source/deploying-volttron/linux-system-hardening.rst b/docs/source/deploying-volttron/linux-system-hardening.rst new file mode 100644 index 0000000000..9aea79bc18 --- /dev/null +++ b/docs/source/deploying-volttron/linux-system-hardening.rst @@ -0,0 +1,48 @@ +.. _Linux-System-Hardening: + +====================== +Linux System Hardening +====================== + + +Introduction +============ + +VOLTTRON is built with modern security principles in mind [security-wp] and implements many security features for hosted +agents. +However, VOLTTRON is deployed on top of a Linux-based operating system and evaluating the security of a deployment must +include the configuration of the host system itself, as well as any other applications deployed on the system, both of +which provide additional attack surface and failure opportunities. + +There is no such thing as "a secure system." +Rather, any computing system must be evaluated in the context of its deployment environment with considerations for +assurance of confidentiality, integrity, and availability. +The impact of a compromised system must be considered, along with the costs assocated with risk mitigation. +Threat profile analyses have been comleted for several VOLTTRON deployment configurations; the reports are available on +the `VOLTTRON website's publications section `_. + + +Recommendations +=============== + +The VOLTTRON team recommends a risk-based cyber security approach that considers each risk, the impact of an +exploit or failure, and the costs associated with the available mitigation strategies. +Based on this evaluation, a set of mitigations can be identified to meet deployment requirements. + +In many cases, the first step is to coordinate with the cyber security team at your institution; they should be able +to help you with risk assessment and mitigation strategies, as well s as understanding any relevant regulartory +requirements. + +For continuously running and production-like systems, one common area of concern is hardening of the host operating +system. +Instructions are maintained by OpenSCAP for a large number of operating systems and guides are available for a +`range of common linux distributions `_. +You are encouraged to select the operating system and profile corresponding to your security requirements. +The guides there provide instruction for compliance in regulated environments, but are also appropriate in less +regulated environments where risk levels are equivalent. + +It is also important to evaluate any other applications running on the same system. +In addtion to the potential for exploitation or failure of the individual application, it is important to consider +the ways in which the risks associated with one application may expose new risks in another application. +For example, if a system is running a webserver which is exploited in a way that provides unauthorized access to +the host system, then the VOLTTRON system is now exposed to attack from local users. diff --git a/docs/source/deploying-volttron/multi-platform/datamover-historian-deployment.rst b/docs/source/deploying-volttron/multi-platform/datamover-historian-deployment.rst new file mode 100644 index 0000000000..c5d3d64dd6 --- /dev/null +++ b/docs/source/deploying-volttron/multi-platform/datamover-historian-deployment.rst @@ -0,0 +1,128 @@ +.. _DataMover-Historian-Deployment: + +=================== +DataMover Historian +=================== + +This guide describes how a DataMover historian can be used to transfer data from one VOLTTRON instance to another. The +DataMover historian is different from Forward historian in the way it sends the data to the remote instance. It first +batches the data and makes a RPC call to a remote historian instead of publishing data on the remote message bus +instance. The remote historian then stores the data into it's database. + +The walk-through below demonstrates how to setup DataMover historian to send data from one VOLTTRON instance to another. + + +VOLTTRON instance 1 sends data to platform historian on VOLTTRON instance 2 +--------------------------------------------------------------------------- + +As an example two VOLTTRON instances will be created and to send data from one VOLTTRON instance running a fake driver +(subscribing to publishes from a fake device) and sending the values to a remote historian running on the second +VOLTTRON instance. + + +VOLTTRON instance 1 +^^^^^^^^^^^^^^^^^^^ + +- ``vctl shutdown –platform`` (if the platform is already working) +- ``volttron-cfg`` (this helps in configuring the volttron instance + http://volttron.readthedocs.io/en/releases-4.1/core_services/control/VOLTTRON-Config.html + + - Specify the :term:`VIP address` of the instance: ``tcp://127.0.0.1:22916`` + - Install Platform Driver Agent with a fake driver for the instance. + - Install a listener agent so see the topics that are coming from the diver agent +- Then run the volttron instance by using the following command: ``./start-volttron`` + + +VOLTTRON instance 2 +^^^^^^^^^^^^^^^^^^^ + +- ``vctl shutdown –platform`` (if the platform is already working) +- ``volttron-cfg`` (this helps in configuring the volttron instance) + http://volttron.readthedocs.io/en/releases-4.1/core_services/control/VOLTTRON-Config.html + + - Specify the VIP address of the instance : ``tcp://127.0.0.2:22916`` + - Install a platform historian. ``volttron-cfg`` installs a default SQL historian. +- Start the VOLTTRON instance by using following command: ``./start-volttron`` + + +DataMover Configuration +^^^^^^^^^^^^^^^^^^^^^^^ + +An example config file is available in ``services/core/DataMover/config``. We need to update the +`destination-vip`, `destination-serverkey`, and `destination-historian-identity` entries as per our setup. + +.. note:: + + Here the topics from the driver on VOLTTRON instance 1 will be sent to instance 2. + + - **destination-vip**: The VIP address of the volttron instance to which we need to send data. Example : + ``tcp://127.0.0.2:22916`` + - **destination-serverkey**: The server key of remote VOLTTRON instance + - Get the server key of VOLTTRON instance 2 and set `destination-serverkey` property with the server key + + .. code-block:: console + + vctl auth serverkey + + - destination-historian-identity: Identity of remote platform historian. Default is "platform.historian" + + +Running DataMover Historian +^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +- Install the DataMover historian on the VOLTTRON instance 1 + +.. code-block:: console + + python scripts/install-agent.py -s services/core/DataMover -c services/core/DataMover/config -i datamover --start + +- Add the public key of the DataMover historian on VOLTTRON instance 2 to enable authentication of the DataMover on + VOLTTRON instance 2. + + - Get the public key of the DataMover. Run the below command on instance 1 terminal. + + .. code-block:: console + + vctl auth publickey --name datamoveragent-0.1 + + - Add the credentials of the DataMover historian in VOLTTRON instance 2 + + .. code-block:: console + + vctl auth add --credentials + + +Check data in SQLite database +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +To check if data is transferred and stored in the database of remote platform historian, we need to check the +entries in the database. The default location of SQL database (if not explicitly specified in the config file) will be +in the `data` directory inside the platform historian's installed directory within it's `$VOLTTRON_HOME`. + +- Get the uuid of the platform historian. This can be found by running the ``vctl status`` on the terminal of instance + 2. The first column of the data mover historian entry in the status table gives the first alphabet/number of the + uuid. + +- Go the `data` directory of platform historian's install directory. For example, + `/home/ubuntu/.platform2/agents/6292302c-32cf-4744-bd13-27e78e96184f/sqlhistorianagent-3.7.0/data` + +- Run the SQL command to see the data + .. code-block:: console + + sqlite3 platform.historian.sqlite + select * from data; + +- You will see similar entries + + .. code-block:: console + + 2020-10-27T15:07:55.006549+00:00|14|true + 2020-10-27T15:07:55.006549+00:00|15|10.0 + 2020-10-27T15:07:55.006549+00:00|16|20 + 2020-10-27T15:07:55.006549+00:00|17|true + 2020-10-27T15:07:55.006549+00:00|18|10.0 + 2020-10-27T15:07:55.006549+00:00|19|20 + 2020-10-27T15:07:55.006549+00:00|20|true + 2020-10-27T15:07:55.006549+00:00|21|0 + 2020-10-27T15:07:55.006549+00:00|22|0 + diff --git a/docs/source/devguides/walkthroughs/files/add-charts-button.png b/docs/source/deploying-volttron/multi-platform/files/add-charts-button.png similarity index 100% rename from docs/source/devguides/walkthroughs/files/add-charts-button.png rename to docs/source/deploying-volttron/multi-platform/files/add-charts-button.png diff --git a/docs/source/devguides/walkthroughs/files/add-charts.png b/docs/source/deploying-volttron/multi-platform/files/add-charts.png similarity index 100% rename from docs/source/devguides/walkthroughs/files/add-charts.png rename to docs/source/deploying-volttron/multi-platform/files/add-charts.png diff --git a/docs/source/deploying-volttron/multi-platform/files/admin_request.png b/docs/source/deploying-volttron/multi-platform/files/admin_request.png new file mode 100644 index 0000000000..505c61d8a1 Binary files /dev/null and b/docs/source/deploying-volttron/multi-platform/files/admin_request.png differ diff --git a/docs/source/deploying-volttron/multi-platform/files/admin_request_federation.png b/docs/source/deploying-volttron/multi-platform/files/admin_request_federation.png new file mode 100644 index 0000000000..dc025329be Binary files /dev/null and b/docs/source/deploying-volttron/multi-platform/files/admin_request_federation.png differ diff --git a/docs/source/setup/RabbitMQ/images/central_no_pending.png b/docs/source/deploying-volttron/multi-platform/files/central_no_pending.png similarity index 100% rename from docs/source/setup/RabbitMQ/images/central_no_pending.png rename to docs/source/deploying-volttron/multi-platform/files/central_no_pending.png diff --git a/docs/source/setup/RabbitMQ/images/central_pending.png b/docs/source/deploying-volttron/multi-platform/files/central_pending.png similarity index 100% rename from docs/source/setup/RabbitMQ/images/central_pending.png rename to docs/source/deploying-volttron/multi-platform/files/central_pending.png diff --git a/docs/source/devguides/walkthroughs/files/chart-multiple-lines.png b/docs/source/deploying-volttron/multi-platform/files/chart-multiple-lines.png similarity index 100% rename from docs/source/devguides/walkthroughs/files/chart-multiple-lines.png rename to docs/source/deploying-volttron/multi-platform/files/chart-multiple-lines.png diff --git a/docs/source/devguides/walkthroughs/files/chart-type.png b/docs/source/deploying-volttron/multi-platform/files/chart-type.png similarity index 100% rename from docs/source/devguides/walkthroughs/files/chart-type.png rename to docs/source/deploying-volttron/multi-platform/files/chart-type.png diff --git a/docs/source/devguides/walkthroughs/files/charts-window.png b/docs/source/deploying-volttron/multi-platform/files/charts-window.png similarity index 100% rename from docs/source/devguides/walkthroughs/files/charts-window.png rename to docs/source/deploying-volttron/multi-platform/files/charts-window.png diff --git a/docs/source/deploying-volttron/multi-platform/files/cmd_line.png b/docs/source/deploying-volttron/multi-platform/files/cmd_line.png new file mode 100644 index 0000000000..03945f113f Binary files /dev/null and b/docs/source/deploying-volttron/multi-platform/files/cmd_line.png differ diff --git a/docs/source/deploying-volttron/multi-platform/files/cmd_line_federation.png b/docs/source/deploying-volttron/multi-platform/files/cmd_line_federation.png new file mode 100644 index 0000000000..8534190697 Binary files /dev/null and b/docs/source/deploying-volttron/multi-platform/files/cmd_line_federation.png differ diff --git a/docs/source/setup/RabbitMQ/images/csr-approve.png b/docs/source/deploying-volttron/multi-platform/files/csr-approve.png similarity index 100% rename from docs/source/setup/RabbitMQ/images/csr-approve.png rename to docs/source/deploying-volttron/multi-platform/files/csr-approve.png diff --git a/docs/source/setup/RabbitMQ/images/csr-collector-forwarder-approved.png b/docs/source/deploying-volttron/multi-platform/files/csr-collector-forwarder-approved.png similarity index 100% rename from docs/source/setup/RabbitMQ/images/csr-collector-forwarder-approved.png rename to docs/source/deploying-volttron/multi-platform/files/csr-collector-forwarder-approved.png diff --git a/docs/source/setup/RabbitMQ/images/csr-collector-forwarder-request.png b/docs/source/deploying-volttron/multi-platform/files/csr-collector-forwarder-request.png similarity index 100% rename from docs/source/setup/RabbitMQ/images/csr-collector-forwarder-request.png rename to docs/source/deploying-volttron/multi-platform/files/csr-collector-forwarder-request.png diff --git a/docs/source/setup/RabbitMQ/images/csr-collector-vcp-approve.png b/docs/source/deploying-volttron/multi-platform/files/csr-collector-vcp-approve.png similarity index 100% rename from docs/source/setup/RabbitMQ/images/csr-collector-vcp-approve.png rename to docs/source/deploying-volttron/multi-platform/files/csr-collector-vcp-approve.png diff --git a/docs/source/setup/RabbitMQ/images/csr-collector-vcp-request.png b/docs/source/deploying-volttron/multi-platform/files/csr-collector-vcp-request.png similarity index 100% rename from docs/source/setup/RabbitMQ/images/csr-collector-vcp-request.png rename to docs/source/deploying-volttron/multi-platform/files/csr-collector-vcp-request.png diff --git a/docs/source/setup/RabbitMQ/images/csr-initial-state.png b/docs/source/deploying-volttron/multi-platform/files/csr-initial-state.png similarity index 100% rename from docs/source/setup/RabbitMQ/images/csr-initial-state.png rename to docs/source/deploying-volttron/multi-platform/files/csr-initial-state.png diff --git a/docs/source/setup/RabbitMQ/images/csr-login-page.png b/docs/source/deploying-volttron/multi-platform/files/csr-login-page.png similarity index 100% rename from docs/source/setup/RabbitMQ/images/csr-login-page.png rename to docs/source/deploying-volttron/multi-platform/files/csr-login-page.png diff --git a/docs/source/setup/RabbitMQ/images/csr-no-requests-page.png b/docs/source/deploying-volttron/multi-platform/files/csr-no-requests-page.png similarity index 100% rename from docs/source/setup/RabbitMQ/images/csr-no-requests-page.png rename to docs/source/deploying-volttron/multi-platform/files/csr-no-requests-page.png diff --git a/docs/source/setup/RabbitMQ/images/csr-request.png b/docs/source/deploying-volttron/multi-platform/files/csr-request.png similarity index 100% rename from docs/source/setup/RabbitMQ/images/csr-request.png rename to docs/source/deploying-volttron/multi-platform/files/csr-request.png diff --git a/docs/source/setup/RabbitMQ/images/csr-sequence-approval.png b/docs/source/deploying-volttron/multi-platform/files/csr-sequence-approval.png similarity index 100% rename from docs/source/setup/RabbitMQ/images/csr-sequence-approval.png rename to docs/source/deploying-volttron/multi-platform/files/csr-sequence-approval.png diff --git a/docs/source/setup/RabbitMQ/images/csr-sequence-deny.png b/docs/source/deploying-volttron/multi-platform/files/csr-sequence-deny.png similarity index 100% rename from docs/source/setup/RabbitMQ/images/csr-sequence-deny.png rename to docs/source/deploying-volttron/multi-platform/files/csr-sequence-deny.png diff --git a/docs/source/setup/RabbitMQ/images/csr-set-admin.png b/docs/source/deploying-volttron/multi-platform/files/csr-set-admin.png similarity index 100% rename from docs/source/setup/RabbitMQ/images/csr-set-admin.png rename to docs/source/deploying-volttron/multi-platform/files/csr-set-admin.png diff --git a/docs/source/deploying-volttron/multi-platform/files/csr_accepted.png b/docs/source/deploying-volttron/multi-platform/files/csr_accepted.png new file mode 100644 index 0000000000..b53afde3f6 Binary files /dev/null and b/docs/source/deploying-volttron/multi-platform/files/csr_accepted.png differ diff --git a/docs/source/deploying-volttron/multi-platform/files/csr_accepted_federation.png b/docs/source/deploying-volttron/multi-platform/files/csr_accepted_federation.png new file mode 100644 index 0000000000..8913d653fe Binary files /dev/null and b/docs/source/deploying-volttron/multi-platform/files/csr_accepted_federation.png differ diff --git a/docs/source/devguides/walkthroughs/files/filter-and-select.png b/docs/source/deploying-volttron/multi-platform/files/filter-and-select.png similarity index 100% rename from docs/source/devguides/walkthroughs/files/filter-and-select.png rename to docs/source/deploying-volttron/multi-platform/files/filter-and-select.png diff --git a/docs/source/devguides/walkthroughs/files/filter-button.png b/docs/source/deploying-volttron/multi-platform/files/filter-button.png similarity index 100% rename from docs/source/devguides/walkthroughs/files/filter-button.png rename to docs/source/deploying-volttron/multi-platform/files/filter-button.png diff --git a/docs/source/devguides/walkthroughs/files/filter-name.png b/docs/source/deploying-volttron/multi-platform/files/filter-name.png similarity index 100% rename from docs/source/devguides/walkthroughs/files/filter-name.png rename to docs/source/deploying-volttron/multi-platform/files/filter-name.png diff --git a/docs/source/devguides/walkthroughs/files/filter-status.png b/docs/source/deploying-volttron/multi-platform/files/filter-status.png similarity index 100% rename from docs/source/devguides/walkthroughs/files/filter-status.png rename to docs/source/deploying-volttron/multi-platform/files/filter-status.png diff --git a/docs/source/devguides/walkthroughs/files/go-to-charts.png b/docs/source/deploying-volttron/multi-platform/files/go-to-charts.png similarity index 100% rename from docs/source/devguides/walkthroughs/files/go-to-charts.png rename to docs/source/deploying-volttron/multi-platform/files/go-to-charts.png diff --git a/docs/source/devguides/walkthroughs/files/inspect-charts.png b/docs/source/deploying-volttron/multi-platform/files/inspect-charts.png similarity index 100% rename from docs/source/devguides/walkthroughs/files/inspect-charts.png rename to docs/source/deploying-volttron/multi-platform/files/inspect-charts.png diff --git a/docs/source/devguides/walkthroughs/files/load-chart.png b/docs/source/deploying-volttron/multi-platform/files/load-chart.png similarity index 100% rename from docs/source/devguides/walkthroughs/files/load-chart.png rename to docs/source/deploying-volttron/multi-platform/files/load-chart.png diff --git a/docs/source/devguides/walkthroughs/files/load-topics.png b/docs/source/deploying-volttron/multi-platform/files/load-topics.png similarity index 100% rename from docs/source/devguides/walkthroughs/files/load-topics.png rename to docs/source/deploying-volttron/multi-platform/files/load-topics.png diff --git a/docs/source/devguides/walkthroughs/files/load-tree-item.png b/docs/source/deploying-volttron/multi-platform/files/load-tree-item.png similarity index 100% rename from docs/source/devguides/walkthroughs/files/load-tree-item.png rename to docs/source/deploying-volttron/multi-platform/files/load-tree-item.png diff --git a/docs/source/devguides/walkthroughs/files/login-screen.png b/docs/source/deploying-volttron/multi-platform/files/login-screen.png similarity index 100% rename from docs/source/devguides/walkthroughs/files/login-screen.png rename to docs/source/deploying-volttron/multi-platform/files/login-screen.png diff --git a/docs/source/devguides/walkthroughs/files/logout-button.png b/docs/source/deploying-volttron/multi-platform/files/logout-button.png similarity index 100% rename from docs/source/devguides/walkthroughs/files/logout-button.png rename to docs/source/deploying-volttron/multi-platform/files/logout-button.png diff --git a/docs/source/devguides/walkthroughs/files/manage-platforms.png b/docs/source/deploying-volttron/multi-platform/files/manage-platforms.png similarity index 100% rename from docs/source/devguides/walkthroughs/files/manage-platforms.png rename to docs/source/deploying-volttron/multi-platform/files/manage-platforms.png diff --git a/docs/source/devguides/walkthroughs/files/multiplatform-config.png b/docs/source/deploying-volttron/multi-platform/files/multiplatform-config.png similarity index 100% rename from docs/source/devguides/walkthroughs/files/multiplatform-config.png rename to docs/source/deploying-volttron/multi-platform/files/multiplatform-config.png diff --git a/docs/source/devguides/walkthroughs/files/multiplatform-discovery-config.png b/docs/source/deploying-volttron/multi-platform/files/multiplatform-discovery-config.png similarity index 100% rename from docs/source/devguides/walkthroughs/files/multiplatform-discovery-config.png rename to docs/source/deploying-volttron/multi-platform/files/multiplatform-discovery-config.png diff --git a/docs/source/devguides/walkthroughs/files/multiplatform-external-address.png b/docs/source/deploying-volttron/multi-platform/files/multiplatform-external-address.png similarity index 100% rename from docs/source/devguides/walkthroughs/files/multiplatform-external-address.png rename to docs/source/deploying-volttron/multi-platform/files/multiplatform-external-address.png diff --git a/docs/source/devguides/walkthroughs/files/multiplatform-pubsub.png b/docs/source/deploying-volttron/multi-platform/files/multiplatform-pubsub.png similarity index 100% rename from docs/source/devguides/walkthroughs/files/multiplatform-pubsub.png rename to docs/source/deploying-volttron/multi-platform/files/multiplatform-pubsub.png diff --git a/docs/source/devguides/walkthroughs/files/multiplatform-setupmode-auth-screen.png b/docs/source/deploying-volttron/multi-platform/files/multiplatform-setupmode-auth-screen.png similarity index 100% rename from docs/source/devguides/walkthroughs/files/multiplatform-setupmode-auth-screen.png rename to docs/source/deploying-volttron/multi-platform/files/multiplatform-setupmode-auth-screen.png diff --git a/docs/source/devguides/walkthroughs/files/multiplatform-terminator-setup.png b/docs/source/deploying-volttron/multi-platform/files/multiplatform-terminator-setup.png similarity index 100% rename from docs/source/devguides/walkthroughs/files/multiplatform-terminator-setup.png rename to docs/source/deploying-volttron/multi-platform/files/multiplatform-terminator-setup.png diff --git a/docs/source/core_services/messagebus_refactor/files/multiplatform_pubsub.png b/docs/source/deploying-volttron/multi-platform/files/multiplatform_pubsub.png similarity index 100% rename from docs/source/core_services/messagebus_refactor/files/multiplatform_pubsub.png rename to docs/source/deploying-volttron/multi-platform/files/multiplatform_pubsub.png diff --git a/docs/source/core_services/messagebus_refactor/files/multiplatform_rpc.png b/docs/source/deploying-volttron/multi-platform/files/multiplatform_rpc.png similarity index 100% rename from docs/source/core_services/messagebus_refactor/files/multiplatform_rpc.png rename to docs/source/deploying-volttron/multi-platform/files/multiplatform_rpc.png diff --git a/docs/source/core_services/messagebus_refactor/files/multiplatform_shovel_pubsub.png b/docs/source/deploying-volttron/multi-platform/files/multiplatform_shovel_pubsub.png similarity index 100% rename from docs/source/core_services/messagebus_refactor/files/multiplatform_shovel_pubsub.png rename to docs/source/deploying-volttron/multi-platform/files/multiplatform_shovel_pubsub.png diff --git a/docs/source/core_services/messagebus_refactor/files/multiplatform_shovel_rpc.png b/docs/source/deploying-volttron/multi-platform/files/multiplatform_shovel_rpc.png similarity index 100% rename from docs/source/core_services/messagebus_refactor/files/multiplatform_shovel_rpc.png rename to docs/source/deploying-volttron/multi-platform/files/multiplatform_shovel_rpc.png diff --git a/docs/source/deploying-volttron/multi-platform/files/multiplatform_ssl.png b/docs/source/deploying-volttron/multi-platform/files/multiplatform_ssl.png new file mode 100644 index 0000000000..73cf0406a6 Binary files /dev/null and b/docs/source/deploying-volttron/multi-platform/files/multiplatform_ssl.png differ diff --git a/docs/source/devguides/walkthroughs/files/pin-chart.png b/docs/source/deploying-volttron/multi-platform/files/pin-chart.png similarity index 100% rename from docs/source/devguides/walkthroughs/files/pin-chart.png rename to docs/source/deploying-volttron/multi-platform/files/pin-chart.png diff --git a/docs/source/devguides/walkthroughs/files/platforms.png b/docs/source/deploying-volttron/multi-platform/files/platforms.png similarity index 100% rename from docs/source/devguides/walkthroughs/files/platforms.png rename to docs/source/deploying-volttron/multi-platform/files/platforms.png diff --git a/docs/source/devguides/walkthroughs/files/problems-found.png b/docs/source/deploying-volttron/multi-platform/files/problems-found.png similarity index 100% rename from docs/source/devguides/walkthroughs/files/problems-found.png rename to docs/source/deploying-volttron/multi-platform/files/problems-found.png diff --git a/docs/source/setup/RabbitMQ/images/remote_rmq_pending.png b/docs/source/deploying-volttron/multi-platform/files/remote_rmq_pending.png similarity index 100% rename from docs/source/setup/RabbitMQ/images/remote_rmq_pending.png rename to docs/source/deploying-volttron/multi-platform/files/remote_rmq_pending.png diff --git a/docs/source/setup/RabbitMQ/images/rmq_remote_forwarder_accepted.png b/docs/source/deploying-volttron/multi-platform/files/rmq_remote_forwarder_accepted.png similarity index 100% rename from docs/source/setup/RabbitMQ/images/rmq_remote_forwarder_accepted.png rename to docs/source/deploying-volttron/multi-platform/files/rmq_remote_forwarder_accepted.png diff --git a/docs/source/setup/RabbitMQ/images/rmq_remote_forwarder_pending.png b/docs/source/deploying-volttron/multi-platform/files/rmq_remote_forwarder_pending.png similarity index 100% rename from docs/source/setup/RabbitMQ/images/rmq_remote_forwarder_pending.png rename to docs/source/deploying-volttron/multi-platform/files/rmq_remote_forwarder_pending.png diff --git a/docs/source/devguides/walkthroughs/files/side-panel-closed.png b/docs/source/deploying-volttron/multi-platform/files/side-panel-closed.png similarity index 100% rename from docs/source/devguides/walkthroughs/files/side-panel-closed.png rename to docs/source/deploying-volttron/multi-platform/files/side-panel-closed.png diff --git a/docs/source/devguides/walkthroughs/files/side-panel-open.png b/docs/source/deploying-volttron/multi-platform/files/side-panel-open.png similarity index 100% rename from docs/source/devguides/walkthroughs/files/side-panel-open.png rename to docs/source/deploying-volttron/multi-platform/files/side-panel-open.png diff --git a/docs/source/devguides/walkthroughs/files/terminator-setup.png b/docs/source/deploying-volttron/multi-platform/files/terminator-setup.png similarity index 100% rename from docs/source/devguides/walkthroughs/files/terminator-setup.png rename to docs/source/deploying-volttron/multi-platform/files/terminator-setup.png diff --git a/docs/source/devguides/walkthroughs/files/vc-agents.png b/docs/source/deploying-volttron/multi-platform/files/vc-agents.png similarity index 100% rename from docs/source/devguides/walkthroughs/files/vc-agents.png rename to docs/source/deploying-volttron/multi-platform/files/vc-agents.png diff --git a/docs/source/setup/RabbitMQ/images/vc-auth-failure.png b/docs/source/deploying-volttron/multi-platform/files/vc-auth-failure.png similarity index 100% rename from docs/source/setup/RabbitMQ/images/vc-auth-failure.png rename to docs/source/deploying-volttron/multi-platform/files/vc-auth-failure.png diff --git a/docs/source/devguides/walkthroughs/files/vc-cert-warning-1.png b/docs/source/deploying-volttron/multi-platform/files/vc-cert-warning-1.png similarity index 100% rename from docs/source/devguides/walkthroughs/files/vc-cert-warning-1.png rename to docs/source/deploying-volttron/multi-platform/files/vc-cert-warning-1.png diff --git a/docs/source/devguides/walkthroughs/files/vc-cert-warning-2.png b/docs/source/deploying-volttron/multi-platform/files/vc-cert-warning-2.png similarity index 100% rename from docs/source/devguides/walkthroughs/files/vc-cert-warning-2.png rename to docs/source/deploying-volttron/multi-platform/files/vc-cert-warning-2.png diff --git a/docs/source/devguides/walkthroughs/files/vc-cert-warning-3.png b/docs/source/deploying-volttron/multi-platform/files/vc-cert-warning-3.png similarity index 100% rename from docs/source/devguides/walkthroughs/files/vc-cert-warning-3.png rename to docs/source/deploying-volttron/multi-platform/files/vc-cert-warning-3.png diff --git a/docs/source/devguides/walkthroughs/files/vc-cert-warning-4.png b/docs/source/deploying-volttron/multi-platform/files/vc-cert-warning-4.png similarity index 100% rename from docs/source/devguides/walkthroughs/files/vc-cert-warning-4.png rename to docs/source/deploying-volttron/multi-platform/files/vc-cert-warning-4.png diff --git a/docs/source/setup/RabbitMQ/images/vc-collector1-forwarder.png b/docs/source/deploying-volttron/multi-platform/files/vc-collector1-forwarder.png similarity index 100% rename from docs/source/setup/RabbitMQ/images/vc-collector1-forwarder.png rename to docs/source/deploying-volttron/multi-platform/files/vc-collector1-forwarder.png diff --git a/docs/source/setup/RabbitMQ/images/vc-collector2-forwarder.png b/docs/source/deploying-volttron/multi-platform/files/vc-collector2-forwarder.png similarity index 100% rename from docs/source/setup/RabbitMQ/images/vc-collector2-forwarder.png rename to docs/source/deploying-volttron/multi-platform/files/vc-collector2-forwarder.png diff --git a/docs/source/devguides/walkthroughs/files/vc-dashboard.png b/docs/source/deploying-volttron/multi-platform/files/vc-dashboard.png similarity index 100% rename from docs/source/devguides/walkthroughs/files/vc-dashboard.png rename to docs/source/deploying-volttron/multi-platform/files/vc-dashboard.png diff --git a/docs/source/devguides/walkthroughs/files/vc-login.png b/docs/source/deploying-volttron/multi-platform/files/vc-login.png similarity index 100% rename from docs/source/devguides/walkthroughs/files/vc-login.png rename to docs/source/deploying-volttron/multi-platform/files/vc-login.png diff --git a/docs/source/devguides/walkthroughs/files/vc-platform.png b/docs/source/deploying-volttron/multi-platform/files/vc-platform.png similarity index 100% rename from docs/source/devguides/walkthroughs/files/vc-platform.png rename to docs/source/deploying-volttron/multi-platform/files/vc-platform.png diff --git a/docs/source/setup/RabbitMQ/images/vc_platforms.png b/docs/source/deploying-volttron/multi-platform/files/vc_platforms.png similarity index 100% rename from docs/source/setup/RabbitMQ/images/vc_platforms.png rename to docs/source/deploying-volttron/multi-platform/files/vc_platforms.png diff --git a/docs/source/devguides/walkthroughs/files/volttron-admin-page.png b/docs/source/deploying-volttron/multi-platform/files/volttron-admin-page.png similarity index 100% rename from docs/source/devguides/walkthroughs/files/volttron-admin-page.png rename to docs/source/deploying-volttron/multi-platform/files/volttron-admin-page.png diff --git a/docs/source/setup/RabbitMQ/images/zmq_pending_credential_1.png b/docs/source/deploying-volttron/multi-platform/files/zmq_pending_credential_1.png similarity index 100% rename from docs/source/setup/RabbitMQ/images/zmq_pending_credential_1.png rename to docs/source/deploying-volttron/multi-platform/files/zmq_pending_credential_1.png diff --git a/docs/source/setup/RabbitMQ/images/zmq_pending_credential_1_approved.png b/docs/source/deploying-volttron/multi-platform/files/zmq_pending_credential_1_approved.png similarity index 100% rename from docs/source/setup/RabbitMQ/images/zmq_pending_credential_1_approved.png rename to docs/source/deploying-volttron/multi-platform/files/zmq_pending_credential_1_approved.png diff --git a/docs/source/setup/RabbitMQ/images/zmq_pending_credential_2.png b/docs/source/deploying-volttron/multi-platform/files/zmq_pending_credential_2.png similarity index 100% rename from docs/source/setup/RabbitMQ/images/zmq_pending_credential_2.png rename to docs/source/deploying-volttron/multi-platform/files/zmq_pending_credential_2.png diff --git a/docs/source/deploying-volttron/multi-platform/forward-historian-deployment.rst b/docs/source/deploying-volttron/multi-platform/forward-historian-deployment.rst new file mode 100644 index 0000000000..1cb98e0b26 --- /dev/null +++ b/docs/source/deploying-volttron/multi-platform/forward-historian-deployment.rst @@ -0,0 +1,3 @@ +.. _Forward-Historian-Deployment: + +.. include:: ../../agent-framework/historian-agents/forwarder/forward-historian.rst diff --git a/docs/source/deploying-volttron/multi-platform/index.rst b/docs/source/deploying-volttron/multi-platform/index.rst new file mode 100644 index 0000000000..55a4de5c3b --- /dev/null +++ b/docs/source/deploying-volttron/multi-platform/index.rst @@ -0,0 +1,59 @@ +.. _Multi-Platform-Deployment: + +========================= +Multi-Platform Connection +========================= + +There are multiple ways to establish connection between external +VOLTTRON platforms. Given that VOLTTRON now supports ZeroMq and RabbitMQ +type of message bus with each using different type authentication mechanism, +the number of different ways that agents can connect to external +platforms has significantly increased. Various multi-platform deployment +scenarios will be covered in this section. + +#. Agents can directly connect to external platforms to send and receive messages. + Forward historian, Data Mover agents fall under this category. The deployment steps + for forward historian is described in :ref:`Forward Historian Deployment ` + and data mover historian in :ref:`DataMover Historian Deployment ` + +#. The platforms maintain the connection with other platforms and agents can send + to and receive messages from external platforms without having to establish + connection directly. The deployment steps + is described in :ref:`Multi Platform Router Deployment ` + +#. RabbitMQ has ready made plugins such as shovel and federation to connect to + external brokers. This feature is leveraged to make connections to external platforms. This is described in + :ref:`Multi Platform RabbitMQ Deployment ` + +#. A web based admin interface to authenticate multiple instances (ZeroMq or RabbitMQ) + wanting to connect to single central instance is now available. The deployment steps + is described in :ref:`Multi Platform Multi-Bus Deployment ` + +#. VOLTTRON Central is a platform management web application that allows + platforms to communicate and to be managed from a centralized server. The deployment steps + is described in :ref:`VOLTTRON Central Demo ` + + +Assumptions +=========== + +- `Data Collector` is the deployment box that has the drivers and is collecting data from devices which will be + forwarded to a `VOLTTRON Central`. +- `Volttron Central (VC)` is the deployment box that has the historian which will save data from all Data Collectors to + the central database. +- `VOLTTRON_HOME` is assumed to the default on both boxes (`/home//.volttron`). + +.. note:: + + :term:`VOLTTRON_HOME` is the directory used by the platform for managing state and configuration of the platform + and agents installed locally on the platform. Auth keys, certificates, the configuration store, etc. are stored in + this directory by the platform. + +.. toctree:: + + forward-historian-deployment + datamover-historian-deployment + multi-platform-router + multi-platform-rabbitmq-deployment + multi-platform-multi-bus + volttron-central-deployment diff --git a/docs/source/setup/RabbitMQ/Multi-Platform-RMQ-CSR-Walkthrough.rst b/docs/source/deploying-volttron/multi-platform/multi-platform-multi-bus.rst similarity index 63% rename from docs/source/setup/RabbitMQ/Multi-Platform-RMQ-CSR-Walkthrough.rst rename to docs/source/deploying-volttron/multi-platform/multi-platform-multi-bus.rst index e871e81afa..db9ae5bf5c 100644 --- a/docs/source/setup/RabbitMQ/Multi-Platform-RMQ-CSR-Walkthrough.rst +++ b/docs/source/deploying-volttron/multi-platform/multi-platform-multi-bus.rst @@ -1,28 +1,30 @@ -.. _Multi_Platform_Walkthrough: +.. _Multi-Platform-Multi-Bus: -Multi-Platform Multi-Bus Walk-through -===================================== +======================== +Multi-Platform Multi-Bus +======================== -This guide describes the setup process for a multi-platform connection that has a combination of ZeroMQ and RabbitMQ -instances. For this example, we want to use the Forwarder to pass device data from two VOLTTRON instance to -a single "central" instance for storage. It will also have a Volttron Central agent running on the "central" -instance and Volttron Central Platform agents on all 3 instances and connected to "central" instance to provide -operational status of it's instance to the "central" instance. For this document "node" will be used interchangeably -with VOLTTRON instance. +This guide describes the setup process for a multi-platform connection that has a combination of :term:`ZeroMQ` and +:term:`RabbitMQ` instances. For this example, we want to use the Forwarder to pass device data from two VOLTTRON +instance to a single "central" instance for storage. It will also have a Volttron Central agent running on the +"central" instance and Volttron Central Platform agents on all 3 instances and connected to "central" instance to +provide operational status of it's instance to the "central" instance. For this document "node" will be used +interchangeably with VOLTTRON instance. The authentication of remote connections can be performed either using +admin web interface or using command line interface. We will demonstrate both the approaches. Node Setup ---------- -For this example we will have two types of nodes; a data collector and a central node. Each of the data -collectors will have different message buses (VOLTTRON supports both RabbitMQ and ZeroMQ). The nodes will -be configured as in the following table. +For this example we will have two types of nodes; a data collector and a central node. Each of the data collectors will +have different message buses (VOLTTRON supports both RabbitMQ and ZeroMQ). The nodes will be configured as in the +following table. .. csv-table:: Node Configuration :header: "", "Central", "Node-ZMQ", "Node-RMQ" :widths: 20, 15, 10, 10 "Node Type", "Central", "Data Collector", "Data Collector" - "Master Driver", "", "yes", "yes" + "Platform Driver", "", "yes", "yes" "Forwarder", "", "yes", "yes" "SQL Historian", "yes", "", "" "Volttron Central", "yes", "", "" @@ -34,14 +36,16 @@ be configured as in the following table. The goal of this is to be able to see the data from Node-ZMQ and Node-RMQ in the Central SQL Historian and on the trending charts of Volttron Central. + Virtual Machine Setup --------------------- The first step in creating a VOLTTRON instance is to make sure the machine is ready for VOLTTRON. Each machine -should have its hostname setup. For this walkthrough, the hostnames "central", "node-zmq" and "node-rmq" will be used. +should have its hostname setup. For this walk-through, the hostnames "central", "node-zmq" and "node-rmq" will be used. + +For Central and Node-RMQ follow the instructions :ref:`platform installation steps for RMQ `. For +Node-ZMQ use :ref:`Platform Installation steps for ZeroMQ `. -For Central and Node-RMQ follow the instructions :ref:`Building-VOLTTRON#steps-for-rabbitmq`. For Node-ZMQ use -:ref:`Building-VOLTTRON#steps-for-zmq`. Instance Setup -------------- @@ -49,18 +53,22 @@ Instance Setup The following conventions/assumptions are made for the rest of this document: - Commands should be run from the VOLTTRON root - - Default values are used for VOLTTRON_HOME($HOME/.volttron), vip port (22916), HTTPS port (8443), rabbitmq ports( 5671 for AMQPs and 15671 for RabbitMQ management interface). If using different VOLTTRON_HOME or ports, please replace accordingly. + - Default values are used for VOLTTRON_HOME($HOME/.volttron), :term:`VIP` port (22916), HTTPS port (8443), RabbitMQ + ports (5671 for AMQPs and 15671 for RabbitMQ management interface). If using different :term:`VOLTTRON_HOME` or + ports, please replace accordingly. - Replace central, node-zmq and node-rmq with your own hostnames. - user will represent your current user. -The following will use vcfg (volttron-cfg) to configure the individual platforms. +The following will use `vcfg` (volttron-cfg) to configure the individual platforms. + Central Instance Setup ---------------------- .. note:: - This instance must have been bootstrapped using --rabbitmq see :ref:`Building-VOLTTRON#steps-for-rabbitmq`. + This instance must have been bootstrapped using ``--rabbitmq`` see + :ref:`RabbitMq installation instructions `. Next step would be to configure the instance to have a web interface to accept/deny incoming certificate signing requests from other instances. Additionally, we will need to install a Volttron Central agent, Volttron Central @@ -137,7 +145,7 @@ Platform agent, SQL historian agent and a Listener agent. The following shows an Configuring /home/user/volttron/services/core/SQLHistorian. ['volttron', '-vv', '-l', '/home/user/.volttron/volttron.cfg.log'] Should the agent autostart? [N]: y - Would you like to install a master driver? [N]: + Would you like to install a platform driver? [N]: Would you like to install a listener agent? [N]: y Configuring examples/ListenerAgent. ['volttron', '-vv', '-l', '/home/user/.volttron/volttron.cfg.log'] @@ -157,29 +165,33 @@ Start VOLTTRON instance and check if the agents are installed. ./start-volttron vctl status -Open browser and go to master admin authentication page `https://central:8443/index.html` to accept/reject incoming certificate signing request (CSR) from other platforms. +Using the web interface: +^^^^^^^^^^^^^^^^^^^^^^^^ + +Open browser and go to the platform web admin authentication page `https://central:8443/index.html` to accept/reject +incoming certificate signing request (CSR) from other platforms. .. note:: Replace "central" with the proper hostname of VC instance in the admin page URL. If opening the admin page from a different system, then please make that the hostname is resolvable in that machine. -Click on "Login To Admistration Area". +Click on "Login To Administration Area". -.. image:: images/csr-initial-state.png +.. image:: files/csr-initial-state.png -Set the master admin username and password. This can be later used to login into master admin authentication page. -This username and password will also be used to log in to Volttron Central. +Set the platform web admin username and password. This can be later used to login into the web admin +authentication page. This username and password will also be used to log in to Volttron Central. -.. image:: images/csr-set-admin.png +.. image:: files/csr-set-admin.png -Login into the Master Admin page. +Login into the platform web admin page. -.. image:: images/csr-login-page.png +.. image:: files/csr-login-page.png After logging in, you will see no CSR requests initially. -.. image:: images/central_no_pending.png +.. image:: files/central_no_pending.png Go back to the terminal and start Volttron Central Platform agent on the "central" instance. The agent will send a CSR request to the web interface. @@ -188,19 +200,51 @@ request to the web interface. vctl start --tag vcp -Now go to master admin page to check if there is a new pending CSR request. You will see a "PENDING" request from -"central.central.platform.agent" +Now go to the platform web admin page to check if there is a new pending CSR request. You will see a "PENDING" request +from "central.central.platform.agent" -.. image:: images/central_pending.png +.. image:: files/central_pending.png Approve the CSR request to allow authenticated SSL based connection to the "central" instance. Go back to the terminal and check the status of Volttron Central Platform agent. It should be set to "GOOD". +Using command line: +^^^^^^^^^^^^^^^^^^^ + +Alternatively, you can also check the status of pending CSRs via the command line. + +After starting the Volttron Central Platform agent, +use the auth remote sub-command's ``list`` to display the current pending certs. + +.. code-block:: console + + vctl auth remote list + +You will see the pending CSR appear in the list. + +.. code-block:: console + + USER_ID ADDRESS STATUS + central.central.platform.agent 192.168.56.101 PENDING + +Approve the pending CSR using the ``approve`` command. + +.. code-block:: console + + vctl auth remote approve central.central.platform.agent + +Run the ``list`` command again to verify that the CSR has been approved. + +.. code-block:: console + + USER_ID ADDRESS STATUS + central.central.platform.agent 192.168.56.101 APPROVED + Node-ZMQ Instance Setup ----------------------- On the "node-zmq" VM, setup a ZeroMQ based VOLTTRON instance. Using "vcfg" command, install Volttron Central Platform agent, -a master driver agent with a fake driver. +a platform driver agent with a fake driver. .. note:: @@ -225,10 +269,10 @@ a master driver agent with a fake driver. ['volttron', '-vv', '-l', '/home/user/.volttron/volttron.cfg.log'] Should the agent autostart? [N]: Would you like to install a platform historian? [N]: - Would you like to install a master driver? [N]: y - Configuring /home/user/volttron/services/core/MasterDriverAgent. + Would you like to install a platform driver? [N]: y + Configuring /home/user/volttron/services/core/PlatformDriverAgent. ['volttron', '-vv', '-l', '/home/user/.volttron/volttron.cfg.log'] - Would you like to install a fake device on the master driver? [N]: y + Would you like to install a fake device on the platform driver? [N]: y Should the agent autostart? [N]: y Would you like to install a listener agent? [N]: Finished configuration! @@ -257,37 +301,56 @@ Start Volttron Central Platform on this platform manually. Check the VOLTTRON log in the "central" instance, you will see "authentication failure" entry from the incoming connection. You will need to add the public key of VCP agent on the "central" instance. -.. image:: images/vc-auth-failure.png +.. image:: files/vc-auth-failure.png At this point, you can either accept the connection through the admin page or the command line. Using the admin page: +^^^^^^^^^^^^^^^^^^^^ -Navigate back to the master admin authentication page. You should see a pending request under the ZMQ Keys Pending Authorization header. +Navigate back to the platform web admin authentication page. You should see a pending request under the ZMQ Keys Pending +Authorization header. -.. image:: images/zmq_pending_credential_1.png +.. image:: files/zmq_pending_credential_1.png Accept the credential in the same method as a CSR. Using the command line: +^^^^^^^^^^^^^^^^^^^^^^ + +As with the pending CSR, list the current pending certs and credentials. + +.. code-block:: console + + vctl auth remote list -On the "node-zmq" box execute this command and grab the public key of the VCP agent. +You will see the pending ZMQ credential has been added to the list. .. code-block:: console - vctl auth publickey + USER_ID ADDRESS STATUS + central.central.platform.agent 192.168.56.101 APPROVED + 68ef33c4-97bc-4e1b-b5f6-2a6049993b65 127.0.0.1 PENDING -Add auth entry corresponding to VCP agent on "central" instance using the below command. Replace the user id value and credentials value appropriately before running +Approve the pending ZMQ credential using the ``approve`` command. .. code-block:: console - vctl auth add --user_id --credentials + vctl auth remote approve 68ef33c4-97bc-4e1b-b5f6-2a6049993b65 + +Run the ``list`` command again to verify that the credential has been approved. + +.. code-block:: console + + USER_ID ADDRESS STATUS + central.central.platform.agent 192.168.56.101 APPROVED + 68ef33c4-97bc-4e1b-b5f6-2a6049993b65 127.0.0.1 APPROVED Complete similar steps to start a forwarder agent that connects to "central" instance. Modify the configuration in -`services/core/ForwardHistorian/rmq_config.yml` to have a destination VIP address pointing to VIP address of the +`services/core/ForwardHistorian/rmq_config.yml` to have a destination :term:`VIP address` pointing to VIP address of the "central" instance and server key of the "central" instance. .. code-block:: yaml @@ -309,34 +372,54 @@ Install and start forwarder agent. python scripts/install-agent.py -s services/core/ForwardHistorian -c services/core/ForwardHistorian/rmq_config.yml --start -To accept the credential using the admin page: +Using the admin page: +^^^^^^^^^^^^^^^^^^^^^ -Navigate back to the master admin authentication page. You should see another pending request under the ZMQ Keys Pending Authorization header. +To accept the credential, navigate back to the platform web admin authentication page. You should see another pending request under the ZMQ Keys +Pending Authorization header. -.. image:: images/zmq_pending_credential_2.png +.. image:: files/zmq_pending_credential_2.png Accept this credential in the same method as before. -To accept the credential using the command line: +Using the command line: +^^^^^^^^^^^^^^^^^^^^^^^ -Grab the public key of the forwarder agent. +To accept the credential via the command line, .. code-block:: console - vctl auth publickey + vctl auth remote list +You will see the pending ZMQ credential has been added to the list. -Add auth entry corresponding to VCP agent on **central** instance. +.. code-block:: console + + USER_ID ADDRESS STATUS + central.central.platform.agent 192.168.56.101 APPROVED + 68ef33c4-97bc-4e1b-b5f6-2a6049993b65 127.0.0.1 APPROVED + fb30249d-b267-4bdd-b29a-d9112e6a6082 127.0.0.1 PENDING + +Approve the pending ZMQ credential using the ``approve`` command. + +.. code-block:: console + + vctl auth remote approve fb30249d-b267-4bdd-b29a-d9112e6a6082 + +Run the ``list`` command again to verify that the credential has been approved. .. code-block:: console - vctl auth add --user_id --credentials + USER_ID ADDRESS STATUS + central.central.platform.agent 192.168.56.101 APPROVED + 68ef33c4-97bc-4e1b-b5f6-2a6049993b65 127.0.0.1 APPROVED + fb30249d-b267-4bdd-b29a-d9112e6a6082 127.0.0.1 APPROVED In either case, you should start seeing messages from "collector1" instance on the "central" instance's VOLTTRON log now. -.. image:: images/vc-collector1-forwarder.png +.. image:: files/vc-collector1-forwarder.png Node-RMQ Instance Setup @@ -344,10 +427,11 @@ Node-RMQ Instance Setup .. note:: - This instance must have been bootstrapped using --rabbitmq see :ref:`Building-VOLTTRON#steps-for-rabbitmq`. + This instance must have been bootstrapped using --rabbitmq see + :ref:`RabbitMq installation instructions `. -Using "vcfg" command, install Volttron Central Platform agent, a master driver agent with fake driver. The instance +Using "vcfg" command, install Volttron Central Platform agent, a platform driver agent with fake driver. The instance name is set to "collector2". .. code-block:: console @@ -361,7 +445,7 @@ name is set to "collector2". Name of this volttron instance: [volttron1]: collector2 RabbitMQ server home: [/home/user/rabbitmq_server/rabbitmq_server-3.7.7]: Fully qualified domain name of the system: [node-rmq]: - Would you like to create a new self signed root CAcertificate for this instance: [Y]: + Would you like to create a new self signed root CA certificate for this instance: [Y]: Please enter the following details for root CA certificate Country: [US]: @@ -410,10 +494,10 @@ name is set to "collector2". ['volttron', '-vv', '-l', '/home/user/.volttron/volttron.cfg.log'] Should the agent autostart? [N]: Would you like to install a platform historian? [N]: - Would you like to install a master driver? [N]: y - Configuring /home/user/volttron/services/core/MasterDriverAgent. + Would you like to install a platform driver? [N]: y + Configuring /home/user/volttron/services/core/PlatformDriverAgent. ['volttron', '-vv', '-l', '/home/user/.volttron/volttron.cfg.log'] - Would you like to install a fake device on the master driver? [N]: y + Would you like to install a fake device on the platform driver? [N]: y Should the agent autostart? [N]: y Would you like to install a listener agent? [N]: Finished configuration! @@ -440,14 +524,53 @@ Start Volttron Central Platform on this platform manually. vctl start --tag vcp +Accept the pending CSR request. + +Using the admin page: +^^^^^^^^^^^^^^^^^^^^^ + Go the master admin authentication page and check if there is a new pending CSR request from VCP agent of "collector2" instance. -.. image:: images/remote_rmq_pending.png - +.. image:: files/remote_rmq_pending.png Approve the CSR request to allow authenticated SSL based connection to the "central" instance. + +Using the command line: +^^^^^^^^^^^^^^^^^^^^^^^ + +As before, this can be done via the command line as follows: + +.. code-block:: console + + vctl auth remote list + +.. code-block:: console + + USER_ID ADDRESS STATUS + central.central.platform.agent 192.168.56.101 APPROVED + central.collector2.forwarderagent-5.1_1 192.168.56.101 PENDING + 68ef33c4-97bc-4e1b-b5f6-2a6049993b65 127.0.0.1 APPROVED + fb30249d-b267-4bdd-b29a-d9112e6a6082 127.0.0.1 APPROVED + + +Approve the pending CSR using the ``approve`` command. + +.. code-block:: console + + vctl auth remote approve central.collector2.forwarderagent-5.1_1 + +Run the ``list`` command again to verify that the CSR has been approved. + +.. code-block:: console + + USER_ID ADDRESS STATUS + central.central.platform.agent 192.168.56.101 APPROVED + central.collector2.forwarderagent-5.1_1 192.168.56.101 APPROVED + 68ef33c4-97bc-4e1b-b5f6-2a6049993b65 127.0.0.1 APPROVED + fb30249d-b267-4bdd-b29a-d9112e6a6082 127.0.0.1 APPROVED + Now go back to the terminal and check the status of Volttron Central Platform agent. It should be set to "GOOD". @@ -466,14 +589,53 @@ Start forwarder agent. python scripts/install-agent.py -s services/core/ForwardHistorian -c services/core/ForwardHistorian/rmq_config.yml --start +Using the admin page: +^^^^^^^^^^^^^^^^^^^^ + Go the master admin authentication page and check if there is a new pending CSR request from forwarder agent of "collector2" instance. -.. image:: images/rmq_remote_forwarder_pending.png +.. image:: files/rmq_remote_forwarder_pending.png Approve the CSR request to allow authenticated SSL based connection to the "central" instance. -.. image:: images/rmq_remote_forwarder_accepted.png +.. image:: files/rmq_remote_forwarder_accepted.png + +Using the command line: +^^^^^^^^^^^^^^^^^^^^^^^ + +If using command line for this process: + +.. code-block:: console + + vctl auth remote list + +.. code-block:: console + + USER_ID ADDRESS STATUS + central.central.platform.agent 192.168.56.101 APPROVED + central.collector2.platform.agent 192.168.56.103 APPROVED + central.collector2.forwarderagent-5.1_1 192.168.56.103 PENDING + 68ef33c4-97bc-4e1b-b5f6-2a6049993b65 127.0.0.1 APPROVED + fb30249d-b267-4bdd-b29a-d9112e6a6082 127.0.0.1 APPROVED + + +Approve the pending CSR using the ``approve`` command. + +.. code-block:: console + + vctl auth remote approve central.collector2.forwarderagent-5.1_1 + +Run the ``list`` command again to verify that the CSR has been approved. + +.. code-block:: console + + USER_ID ADDRESS STATUS + central.central.platform.agent 192.168.56.101 APPROVED + central.collector2.platform.agent 192.168.56.103 APPROVED + central.collector2.forwarderagent-5.1_1 192.168.56.103 APPROVED + 68ef33c4-97bc-4e1b-b5f6-2a6049993b65 127.0.0.1 APPROVED + fb30249d-b267-4bdd-b29a-d9112e6a6082 127.0.0.1 APPROVED Now go back to the terminal and check the status of forwarder agent. It should be set to "GOOD". @@ -481,7 +643,7 @@ Now go back to the terminal and check the status of forwarder agent. It should b Check the VOLTTRON log of "central" instance. You should see messages with "devices" topic coming from "collector2" instance. -.. image:: images/vc-collector2-forwarder.png +.. image:: files/vc-collector2-forwarder.png @@ -490,4 +652,4 @@ To confirm that VolttronCentral is monitoring the status of all the 3 platforms, configuration step (using vcfg command in "central" instance). Click on "platforms" tab in the far right corner. You should see all three platforms listed in that page. Click on each of the platforms and check the status of the agents. -.. image:: images/vc_platforms.png +.. image:: files/vc_platforms.png diff --git a/docs/source/deploying-volttron/multi-platform/multi-platform-rabbitmq-deployment.rst b/docs/source/deploying-volttron/multi-platform/multi-platform-rabbitmq-deployment.rst new file mode 100644 index 0000000000..79544690eb --- /dev/null +++ b/docs/source/deploying-volttron/multi-platform/multi-platform-rabbitmq-deployment.rst @@ -0,0 +1,696 @@ +.. _Multi-platform-RabbitMQ-Deployment: + +================================== +Multi-platform RabbitMQ Deployment +================================== + +With ZeroMQ based VOLTTRON, multi-platform communication was accomplished in three different ways: + +#. Direct connection to remote instance - Write an agent that would connect to a remote instance directly. + +#. Special agents - Use special agents such as forward historian/data puller agents that would forward/receive messages + to/from remote instances. In RabbitMQ-VOLTTRON, we make use of the :ref:`Shovel Plugin ` to achieve + this behavior. + +#. Multi-Platform RPC and PubSub - Configure :term:`VIP` address of all remote instances that an instance has to connect + to its `$VOLTTRON_HOME/external_discovery.json` and let the router module in each instance manage the connection + and take care of the message routing for us. In RabbitMQ-VOLTTRON, we make use of the + :ref:`Federation Plugin ` to achieve this behavior. + + +Terminology +----------- + +For all the three different ways of setting up multiplatform links, we first need to identify the upstream server and downstream server. +The upstream server is the node that is publishing some message of interest; we shall refer to this node as the publisher node. +The downstream server is the node that will receive messages from the upstream server; we shall refer to this node as the subscriber node. +Note that upstream server & publisher node and downstream server & subscriber node will be used interchangeably for the rest of this guide. + + +Using the Federation Plugin +--------------------------- + +Connecting multiple VOLTTRON instances can be done using the federation plugin. To create a RabbitMQ federation, we have to +configure the downstream volttron instance to create federated exchange. A federated exchange links to other exchanges. +In this case, the downstream federated exchange links to the upstream exchange. Conceptually, messages published to the +upstream exchanges are copied to the federated exchange, as though they were published directly to the federated exchange. + +Path: `$VOLTTRON_HOME/rabbitmq_federation_config.yml` + +.. code-block:: yaml + + # Mandatory parameters for federation setup + federation-upstream: + volttron4: # hostname of upstream server + port: '5671' + virtual-host: volttron4 + certificates: + csr: true + private_key: "path to private key" # For example, /home/volttron/vhome/test_fed/certificates/private/volttron1.federation.pem + public_cert: "path to public cert" # For example, /home/volttron/vhome/test_fed/certificates/federation/volttron2.volttron1.federation.crt + remote_ca: "path to CA cert" # For example, /home/volttron/vhome/test_fed/certificates/federation/volttron2_ca.crt + federation-user: volttron4.federation #.federation + volttron5: # hostname of upstream server + port: '5671' + virtual-host: volttron5 + certificates: + csr: true + private_key: "path to private key" + public_cert: "path to public cert" + remote_ca: "path to CA cert" + federation-user: volttron5.federation #.federation + + +To setup federation on the VOLTTRON instance, run the following command on the downstream server: + +.. code-block:: bash + + vcfg --rabbitmq federation [optional path to rabbitmq_federation_config.yml] + + +This establishes federation links to upstream servers. Once a federation link to the upstream server is established on +the downstream server, the messages published on the upstream server become available to the downstream server as if +it were published locally. + + +Multi-Platform RPC With Federation +---------------------------------- + +For multi-platform RPC communication, federation links need to be established on both the VOLTTRON +nodes. Once the federation links are established, RPC communication becomes fairly simple. + +.. image:: files/multiplatform_rpc.png + +Consider Agent A on VOLTTRON instance "volttron1" on host "host_A" wants to make RPC call to Agent B +on VOLTTRON instance "volttron2" on host "host_B". + +1. Agent A makes RPC call. + +.. code-block:: Python + + kwargs = {"external_platform": self.destination_instance_name} + agent_a.vip.rpc.call("agent_b", set_point, "point_name", 2.5, \**kwargs) + +2. The message is transferred over federation link to VOLTTRON instance "volttron2" as both the exchanges are made + *federated*. + +3. The RPC subsystem of Agent B calls the actual RPC method and gets the result. It encapsulates the message result + into a VIP message object and sends it back to Agent A on VOLTTRON instance "volttron1". + +4. The RPC subsystem on Agent A receives the message result and gives it to the Agent A application. + + +Multi-Platform PubSub With Federation +------------------------------------- + +For multi-platform PubSub communication, it is sufficient to have a single federation link from the downstream server +to the upstream server. In case of bi-directional data flow, two links have to established in both the directions. + +.. image:: files/multiplatform_pubsub.png + +Consider Agent B on VOLTTRON instance "volttron2" on host "host_B" which wants to subscribe to messages from +VOLTTRON instance "volttron2" on host "host_B". First, a federation link needs to be established from +"volttron2" to "volttron1". + +1. Agent B makes a subscribe call: + +.. code-block:: python + + agent_b.vip.subscribe.call("pubsub", prefix="devices", all_platforms=True) + +2. The PubSub subsystem converts the prefix to ``__pubsub__.*.devices.#``. Here, ``*`` indicates that agent is subscribing + to the ``devices`` topic from all VOLTTRON platforms. + +3. A new queue is created and bound to VOLTTRON exchange with the above binding key. Since the VOLTTRON exchange is a + *federated exchange*, any subscribed message on the upstream server becomes available on the federated exchange and + Agent B will be able to receive it. + +4. Agent A publishes message to topic `devices/pnnl/isb1/hvac1` + +5. The PubSub subsystem publishes this message on its VOLTTRON exchange. + +6. The message is received by the Pubsub subsystem of Agent A via the federation link. + +.. _RabbitMQ-Multi-platform-SSL: + +Multi-Platform Federation Communication With RabbitMQ SSL +========================================================= + +For multi-platform communication over federation, we need the connecting instances to trust each other. + +.. image:: files/multiplatform_ssl.png + +Suppose there are two virtual machines (VOLTTRON1 and VOLTTRON2) running single instances of RabbitMQ; VOLTTRON1 and VOLTTRON2 +want to talk to each other via the federation or shovel plugins. For shovel/federation to have authenticated connection to the +remote instance, it needs to have it's public certificate signed by the remote instance's CA. So as part of the shovel +or federation creation steps, a certificate signing request is made to the remote instance. The admin of the remote instance +should be ready to accept/reject such a request through VOLTTRON's admin web interface. To facilitate this process, the +VOLTTRON platform exposes a web-based server API for requesting, listing, approving, and denying certificate requests. For +more detailed description, refer to :ref:`Agent communication to Remote RabbitMQ instance `. +After the CSR request is accepted, an authenticated shovel/federation connection can be established. + + +Installation Steps +------------------ + +1. Setup two VOLTTRON instances using the instructions at :ref:`platform installation steps for RMQ `. +**Please note that each instance should have a unique instance name and should be running on a machine/VM that has a unique host name.** + +2. Identify upstream servers (publisher nodes) and downstream servers +(collector nodes). To create a RabbitMQ federation, we have to configure +upstream servers on the downstream server and make the VOLTTRON exchange +"federated". + + a. On the downstream server (collector node) + + .. code-block:: bash + + vcfg --rabbitmq federation [optional path to rabbitmq_federation_config.yml + containing the details of the upstream hostname, port and vhost.] + + + Example configuration for federation is available + in examples/configurations/rabbitmq/rabbitmq_federation_config.yml + + + If no config file is provided, the script will prompt for + hostname (or IP address), port, and vhost of each upstream node you + would like to add and certificates for connecting to upstream server. For bi-directional data flow, + we will have to run the same script on both the nodes. + + b. If no config file is provided and certificates for connecting to upstream server have to be generated afresh, then the upstream server should be web enabled and admin should be ready to accept/reject incoming requests. Please refer to :ref:`Multiple Platform Multiple Bus connection ` on how to enable web feature and accept/reject incoming authentication requests. Below image shows steps to follow to create a federation link from downstream instance "volttron1" to upstream instance "volttron2". + + On downstream server (collector node), + + .. image:: files/cmd_line_federation.png + + On upstream server (publisher node), Login to "https://volttron2:8443/index.html" in a web browser. You will see incoming CSR request from "volttron1" instance. + + .. image:: files/admin_request_federation.png + + Accept the incoming CSR request from "volttron1" instance. + + .. image:: files/csr_accepted_federation.png + + + You can also find and accept the pending CSR via the command line, using the vctl auth remote sub-commands. + + First list the pending certs and credentials. + + .. code-block:: console + + vctl auth remote list + + .. code-block:: console + + USER_ID ADDRESS STATUS + volttron2.volttron1.federation 172.20.0.2 PENDING + + + Approve the pending CSR using the ``approve`` command. + + .. code-block:: console + + vctl auth remote approve volttron2.volttron1.federation + + Run the ``list`` command again to verify that the CSR has been approved. + + .. code-block:: console + + USER_ID ADDRESS STATUS + volttron2.volttron1.federation 172.20.0.2 APPROVED + + + c. Create a user in the upstream server (publisher) and provide it access to the virtual host of the upstream RabbitMQ server. + The username should take the form of .federation. + For example, if the downstream server name is "volttron1", and instance of local instance is "volttron2" then the instance name would be "volttron2.volttron1.federation". + Run the below command in the upstream server + + .. code-block:: bash + + vctl rabbitmq add-user + Do you want to set READ permission [Y/n] + Do you want to set WRITE permission [Y/n] + Do you want to set CONFIGURE permission [Y/n] + +5. Test the federation setup. + + a. On the downstream server run a listener agent which subscribes to messages from all platforms + + .. code-block:: bash + + vctl install examples/ListenerAgent --agent-config examples/ListenerAgent/config --start + + + b. Install platform driver, configure fake device on upstream server and start volttron and platform driver. + + .. code-block:: bash + + ./stop-volttron + vcfg --agent platform_driver + ./start-volttron + vctl start --tag platform_driver + + + c. Verify that the listener agent in downstream VOLTTRON instance is able to receive the messages. + The downstream volttron instance's volttron.log should display device data scrapped by platform driver agent in upstream volttron instance. + +6. Open ports and https service if needed. On Redhat based systems, ports used by RabbitMQ (defaults to 5671, 15671 for + SSL, 5672 and 15672 otherwise) might not be open by default. Please + contact system administrator to get ports opened on the downstream server. + + Following are commands used on centos 7. + + .. code-block:: bash + + sudo firewall-cmd --zone=public --add-port=15671/tcp --permanent + sudo firewall-cmd --zone=public --add-port=5671/tcp --permanent + sudo firewall-cmd --reload + +7. How to remove federation link + + a. Using the management web interface + + Log into management web interface using downstream server's admin username. + Navigate to admin tab and then to federation management page. The status of the + upstream link will be displayed on the page. Click on the upstream link name and + delete it. + + b. Using "vctl" command on the upstream server. + + .. code-block:: bash + + vctl rabbitmq list-federation-parameters + NAME URI + upstream-volttron2-volttron amqps://volttron2:5671/volttron?cacertfile=/home/volttron/vhome/test_fed/certificates/federation/volttron2_ca.crt&certfile=/home/volttron/vhome/test_fed/certificates/federation/volttron2.volttron1.federation.crt&keyfile=/home/volttron/vhome/test_fed/certificates/private/volttron1.federation.pem&verify=verify_peer&fail_if_no_peer_cert=true&auth_mechanism=external&server_name_indication=volttron2 + + + Copy the upstream link name and run the below command to remove it. + + .. code-block:: bash + + vctl rabbitmq remove-federation-parameters upstream-volttron2-volttron + +.. note:: + + These commands only remove the federation parameter from RabbitMQ and certificate entries from rabbitmq_federation_config.yml on the publisher node. + `It does not remove the actual certificates.` Rerunning the federation command for same setup will reuse the existing certificates. + If you need to rerun the federation command again for the same setup + and need to create fresh certificates, then you will need to manually remove public and private certificates. + Private certificates will be in + $VOLTTRON_HOME/certificates/private. Public certificates will be in two directories: + $VOLTTRON_HOME/certificates/federation and $VOLTTRON_HOME/certificates/certs. + Further, you should request the remote instance admin to delete earlier generated certificates through admin web + interface before a new CSR is sent for approval. + + + +Using the Shovel Plugin +----------------------- + +Shovels act as well-written client applications which move messages from a source to a destination broker. +The below configuration shows how to setup a shovel to forward PubSub messages or perform multi-platform RPC +communication from a local (i.e. publisher node) to a remote instance (i.e. subscriber node). The configuration expects `hostname`, `port` and +`virtual host` values of the remote instance. It also needs certificates, namely private certs, public certificate signed by remote instance, and remote CA certificate. + +Path: `$VOLTTRON_HOME/rabbitmq_shovel_config.yml` + +.. code-block:: yaml + + # Mandatory parameters for shovel setup + shovel: + rabbit-2: + port: '5671' + virtual-host: volttron + certificates: + csr: true + private_cert: "path to private key" # For example, /home/volttron/vhome/test_shovel/certificates/private/volttron1.shovelvolttron2.pem + public_cert: "path to public cert" # For example, /home/volttron/vhome/test_shovel/certificates/shovels/volttron2.volttron1.shovelvolttron2.crt + remote_ca: "path to CA cert" # For example, /home/volttron/vhome/test_shovel/certificates/shovels/volttron2_ca.crt + + # Configuration to forward pubsub topics + pubsub: + # Identity of agent that is publishing the topic + platform.driver: + - devices + # Configuration to make remote RPC calls + rpc: + # Remote instance name + volttron2: + # List of pair of agent identities (local caller, remote callee) + - [scheduler, platform.actuator] + +To forward PubSub messages, the topic and agent identity of the publisher agent is needed. To perform RPC, the instance +name of the remote instance and agent identities of the local agent and remote agent are needed. + +To configure the VOLTTRON instance to setup shovel, run the following command on the local instance. + +.. code-block:: bash + + vcfg --rabbitmq shovel [optional path to rabbitmq_shovel_config.yml] + +This sets up a shovel that forwards messages (either PubSub or RPC) from a local exchange to a remote exchange. + + +Multi-Platform PubSub With Shovel +--------------------------------- + +After the shovel link is established for Pubsub, the below figure shows how the communication happens. + +.. note:: + + For bi-directional pubsub communication, shovel links need to be created on both the nodes. The "blue" arrows show + the shovel binding key. The pubsub topic configuration in `$VOLTTRON_HOME/rabbitmq_shovel_config.yml` gets + internally converted to the shovel binding key: `"__pubsub__.."`. + +.. image:: files/multiplatform_shovel_pubsub.png + +Now consider a case where shovels are setup in both the directions for forwarding "devices" topic. + +1. Agent B makes a subscribe call to receive messages with topic "devices" from all connected platforms. + +.. code-block:: python + + agent_b.vip.subscribe.call("pubsub", prefix="devices", all_platforms=True) + +2. The PubSub subsystem converts the prefix to ``__pubsub__.*.devices.#`` The ``*`` indicates that the agent is subscribing to + the "devices" topic from all the VOLTTRON platforms. + +3. A new queue is created and bound to VOLTTRON exchange with above binding key. + +4. Agent A publishes message to topic `devices/pnnl/isb1/hvac1` + +5. PubSub subsystem publishes this message on its VOLTTRON exchange. + +6. Because of the shovel link from VOLTTRON instance "volttron1" to "volttron2", the message is forwarded from VOLTTRON + exchange "volttron1" to "volttron2" and is picked up by Agent B on "volttron2". + + +Multi-Platform RPC With Shovel +------------------------------ + +After the shovel link is established for multi-platform RPC, the below figure shows how the RPC communication happens. + +.. note:: + + It is mandatory to have shovel links in both directions because RPC is a request-response type of communication. We will + need to set the agent identities for caller and callee in the `$VOLTTRON_HOME/rabbitmq_shovel_config.yml`. The + "blue" arrows show the resulting the shovel binding key. + +.. image:: files/multiplatform_shovel_rpc.png + +Consider Agent A on VOLTTRON instance "volttron1" on host "host_A" wants to make RPC call on Agent B +on VOLTTRON instance "volttron2" on host "host_B". + +1. Agent A makes RPC call: + +.. code-block:: Python + + kwargs = {"external_platform": self.destination_instance_name} + agent_a.vip.rpc.call("agent_b", set_point, "point_name", 2.5, \**kwargs) + +2. The message is transferred over shovel link to VOLTTRON instance "volttron2". + +3. The RPC subsystem of Agent B calls the actual RPC method and gets the result. It encapsulates the message result + into a VIP message object and sends it back to Agent A on VOLTTRON instance "volttron1". + +4. The RPC subsystem on Agent A receives the message result and gives it to Agent A's application. + + +Installation Steps for Pubsub Communication +------------------------------------------- +For multi-platform communication over shovel, we need the connecting instances to trust each other. As part of the shovel +creation process, a certificate signing request is made to the remote instance. The admin of the remote instance has to +accept or reject such a request through VOLTTRON admin web interface. If accepted, a bundle containing a certificate +signed by the remote CA is sent as a response back to the local instance. Subsequently, shovel connection is +established with these certificates. If the user already has certificates signed by the remote CA, then that will be used for +connection. Otherwise, the user can run the command ``vcfg --rabbitmq shovel`` and it will prompt the user to make a CSR request as part of shovel setup. + +1. Setup two VOLTTRON instances using the steps described in installation section. +Please note that each instance should have a unique instance name. + +2. Identify the instance that is going to act as the "publisher" instance. Suppose + "volttron1" instance is the "publisher" instance and "volttron2" instance is the "subscriber" + instance. Then we need to create a shovel on "volttron1" to forward messages matching + certain topics to remote instance "volttron2". + + a. On the publisher node, + + .. code-block:: bash + + vcfg --rabbitmq shovel [optional path to rabbitmq_shovel_config.yml] + + rabbitmq_shovel_config.yml should contain the details of the remote hostname, port, vhost, + certificates for connecting to remote instance and list of topics to forward. + Example configuration for shovel is available in examples/configurations/rabbitmq/rabbitmq_shovel_config.yml + + For this example, let's set the topic to "devices" + + If no config file is provided, the script will prompt for + hostname (or IP address), port, vhost, certificates for connecting to remote instance and + list of topics for each remote instance you would like to add. For bi-directional data flow, we will have to + run the same script on both the nodes. + + b. If no config file is provided and certificates for connecting to remote instance have to be generated afresh, + then the remote instance should be web enabled and admin should be ready to accept/reject incoming requests. Please + refer to :ref:`Multiple Platform Multiple Bus connection ` on how to enable web feature and accept/reject incoming authentication requests. + Below image shows steps to follow to create a shovel to connect from "volttron1" to "volttron2" to + publish "devices" topic from "volttron1" to "volttron2". + + On publisher node, + + .. image:: files/cmd_line.png + + + On subscriber node, login to "https://volttron2:8443/index.html" in a web browser. You will see an incoming + CSR request from "volttron1" instance. + + .. image:: files/admin_request.png + + + Accept the incoming CSR request from "volttron1" instance. + + As before, you can find and accept the pending CSR via the command line, using the vctl auth remote sub-commands. + + First list the pending certs and credentials. + + .. code-block:: console + + vctl auth remote list + + .. code-block:: console + + USER_ID ADDRESS STATUS + volttron2.volttron1.shovelvolttron2 172.20.0.2 PENDING + + + Approve the pending CSR using the ``approve`` command. + + .. code-block:: console + + vctl auth remote approve volttron2.volttron1.shovelvolttron2 + + Run the ``list`` command again to verify that the CSR has been approved. + + .. code-block:: console + + USER_ID ADDRESS STATUS + volttron2.volttron1.shovelvolttron2 172.20.0.2 APPROVED + + + .. image:: files/csr_accepted.png + + c. Create a user in the subscriber node with username set to the publisher instance's + agent name (for example: volttron1-admin) and allow the shovel access to + the virtual host of the subscriber node. + + .. code-block:: bash + + cd $RABBITMQ_HOME + vctl rabbitmq add-user + +4. Test the shovel setup. + + a. Start VOLTTRON on publisher and subscriber nodes. + + b. On the publisher node, install and start a platform driver agent that publishes messages related to a fake device. + + .. code-block:: bash + + ./stop-volttron + vcfg --agent platform_driver + ./start-volttron + vctl start --tag platform_driver + + c. On the subscriber node, run a listener agent which subscribes to messages from all platforms. + + - Open the file examples/ListenerAgent/listener/agent.py. Search for ``@PubSub.subscribe('pubsub', '')`` and replace that line with ``@PubSub.subscribe('pubsub', 'devices', all_platforms=True)`` + - Install the listener + + .. code-block:: bash + + vctl install examples/ListenerAgent --agent-config examples/ListenerAgent/config --start + + + d. Verify listener agent in downstream VOLTTRON instance can receive the messages. + The downstream volttron instance's volttron.log should display device data scrapped by the platform driver agent in the upstream volttron instance. + +5. How to remove the shovel setup. + + a. On the subscriber node, remove the shovel on using the management web interface + + Log into management web interface using publisher instance's admin username. + Navigate to admin tab and then to shovel management page. The status of the + shovel will be displayed on the page. Click on the shovel name and delete the shovel. + + b. On the publisher node, run the following "vctl" commands: + + .. code-block:: bash + + vctl rabbitmq list-shovel-parameters + NAME SOURCE ADDRESS DESTINATION ADDRESS BINDING KEY + shovel-volttron2-devices amqps://volttron1:5671/volttron?cacertfile=/home/volttron/vhome/test_shovel/certificates/certs/volttron1-trusted-cas.crt&certfile=/home/volttron/vhome/test_shovel/certificates/certs/volttron1.platform.driver.crt&keyfile=/home/volttron/vhome/test_shovel/certificates/private/volttron1.platform.driver.pem&verify=verify_peer&fail_if_no_peer_cert=true&auth_mechanism=external&server_name_indication=volttron1 amqps://volttron2:5671/volttron?cacertfile=/home/volttron/vhome/test_shovel/certificates/shovels/volttron2_ca.crt&certfile=/home/volttron/vhome/test_shovel/certificates/shovels/volttron2.volttron1.shovelvolttron2.crt&keyfile=/home/volttron/vhome/test_shovel/certificates/private/volttron1.shovelvolttron2.pem&verify=verify_peer&fail_if_no_peer_cert=true&auth_mechanism=external&server_name_indication=volttron2 __pubsub__.volttron1.devices.# + + + Copy the shovel name and run following command to remove it. + + .. code-block:: bash + + vctl rabbitmq remove-shovel-parameters shovel-volttron2-devices + +.. note:: + + These commands only remove the shovel parameter from RabbitMQ and certificate entries from rabbitmq_shovel_config.yml on the publisher node. + `It does not remove the actual certificates.` Rerunning the shovel command for same setup will reuse the existing certificates. + But if you need to rerun the shovel command again for the same setup and need to create fresh certificates, then you will + need to manually remove public and private certificates. Private certificates will be in + $VOLTTRON_HOME/certificates/private. Public certificates will be in two directories: + $VOLTTRON_HOME/certificates/shovel and $VOLTTRON_HOME/certificates/certs. + Further, you should request the remote instance admin to delete earlier generated cert through the admin web + interface before a new CSR is sent for approval. + + +DataMover Communication +----------------------- + +The DataMover historian running on one instance makes RPC call to platform historian running on remote +instance to store data on remote instance. Platform historian agent returns response back to DataMover +agent. For such a request-response behavior, shovels need to be created on both instances. + +1. Please ensure that preliminary steps for multi-platform communication are completed (namely, + steps 1-3 described above) . + +2. To setup a data mover to send messages from local instance (say v1) to remote instance (say v2) + and back, we would need to setup shovels on both instances. + + Example of RabbitMQ shovel configuration on v1 + + .. code-block:: json + + shovel: + # hostname of remote machine + rabbit-2: + port: 5671 + certificates: + csr: true + private_cert: "path to private key" # For example, /home/volttron/vhome/test_shovel/certificates/private/volttron1.shovelvolttron2.pem + public_cert: "path to public cert" # For example, /home/volttron/vhome/test_shovel/certificates/shovels/volttron2.volttron1.shovelvolttron2.crt + remote_ca: "path to CA cert" # For example, /home/volttron/vhome/test_shovel/certificates/shovels/volttron2_ca.crt + rpc: + # Remote instance name + v2: + # List of pair of agent identities (local caller, remote callee) + - [data.mover, platform.historian] + virtual-host: v1 + + This says that DataMover agent on v1 wants to make RPC call to platform historian on v2. + + .. code-block:: bash + + vcfg --rabbitmq shovel [optional path to rabbitmq_shovel_config.yml + + + Example of RabbitMQ shovel configuration on v2 + + .. code-block:: json + + shovel: + # hostname of remote machine + rabbit-1: + port: 5671 + rpc: + # Remote instance name + v1: + # List of pair of agent identities (local caller, remote callee) + - [platform.historian, data.mover] + virtual-host: v2 + + This says that Hplatform historian on v2 wants to make RPC call to DataMover agent on v1. + + a. On v1, run below command to setup a shovel from v1 to v2. + + .. code-block:: bash + + vcfg --rabbitmq shovel [optional path to rabbitmq_shovel_config.yml + + b. Create a user on v2 with username set to remote agent's username + ( for example, v1.data.mover i.e., .) and allow + the shovel access to the virtual host of v2. + + .. code-block:: bash + + cd $RABBITMQ_HOME + vctl rabbitmq add-user + + c. On v2, run below command to setup a shovel from v2 to v1 + + .. code-block:: bash + + vcfg --rabbitmq shovel [optional path to rabbitmq_shovel_config.yml + + d. Create a user on v1 with username set to remote agent's username + ( for example, v2.patform.historian i.e., .) and allow + the shovel access to the virtual host of the v1. + + .. code-block:: bash + + cd $RABBITMQ_HOME + vctl rabbitmq add-user + +3. Start Platform driver agent on v1 + + .. code-block:: bash + + ./stop-volttron + vcfg --agent platform_driver + ./start-volttron + vctl start --tag platform_driver + +4. Install DataMover agent on v1. Contents of the install script can look like below. + + .. code-block:: bash + + #!/bin/bash + export CONFIG=$(mktemp /tmp/abc-script.XXXXXX) + cat > $CONFIG <`__ +- :ref:`Getting Started ` - `Multi-Platform Configuration <#multi-platform-configuration>`__ - `Configuration and Authentication in Setup Mode <#configuration-and-authentication-in-setup-mode>`__ - `Setup Configuration and Authentication Manually <#setup-configuration-and-authentication-manually>`__ -- `Start Master driver on VOLTTRON instance 1 <#start-master-driver-on-volttron-instance-1>`__ +- `Start Platform driver on VOLTTRON instance 1 <#start-platform-driver-on-volttron-instance-1>`__ - `Start Listener agents on VOLTTRON instance 2 and 3 <#start-listener-agents-on-volttron-instance-2-and-3>`__ - `Stopping All the Platforms <#stopping-all-the-platforms>`__ @@ -46,8 +47,8 @@ or add below line in the `onstart` method .. note:: If using the onstart method remove the @PubSub.subscribe('pubsub', '') from the top of the method. -After :ref:`building VOLTTRON `, open three shells with the current directory the root of the -VOLTTRON repository. Then activate the VOLTTRON environment and export the VOLTTRON\_HOME variable. The home +After :ref:`installing VOLTTRON `, open three shells with the current directory the root of the +VOLTTRON repository. Then activate the VOLTTRON environment and export the :term:`VOLTTRON_HOME` variable. The home variable needs to be different for each instance. .. code-block:: console @@ -56,20 +57,20 @@ variable needs to be different for each instance. $ export VOLTTRON_HOME=~/.volttron1 Run `vcfg` in all the three shells. This command will ask how the instance -should be set up. Many of the options have defaults and that will be sufficient. Enter a different VIP address for each -platform. Configure fake master driver in the first shell and listener agent in second and third shell. +should be set up. Many of the options have defaults and that will be sufficient. Enter a different VIP address for each +platform. Configure fake platform driver in the first shell and listener agent in second and third shell. |Terminator Setup| Multi-Platform Configuration ---------------------------- -For each instance, specify the instance name in platform config file under it's ``VOLTTRON_HOME`` directory. +For each instance, specify the instance name in platform config file under it's `VOLTTRON_HOME` directory. If the platform supports web server, add the ``bind-web-address`` as well. Here is an example, -Path of the config: $VOLTTRON_HOME/config +Path of the config: `$VOLTTRON_HOME/config` .. code-block:: console @@ -82,14 +83,17 @@ Instance name and bind web address entries added into each VOLTTRON platform's c |Multi-Platform Config| -Next, each instance needs to know the VIP address, platform name and server keys of the remote platforms that it is connecting -to. In addition, each platform has to authenticate or accept the connecting instances' public keys. We can do this step -either by running VOLTTRON in setup mode or configure the information manually. +Next, each instance needs to know the VIP address, platform name and server keys of the remote platforms that it is +connecting to. In addition, each platform has to authenticate or accept the connecting instances' public keys. We can +do this step either by running VOLTTRON in setup mode or configure the information manually. + Configuration and Authentication in Setup Mode ---------------------------------------------- -.. note:: It is necessary for **each** platform to have a web server if running in setup mode +.. note:: + + It is necessary for **each** platform to have a web server if running in setup mode Add list of web addresses of remote platforms in ``$VOLTTRON_HOME/external_address.json`` @@ -101,7 +105,7 @@ VOLTTRON to log to a file. The file name should be different for each instance. .. code-block:: console - $ ./start-volttron --setup-mode + $ volttron -vv -l volttron.log --setup-mode > volttron.log 2>&1 & A new auth entry is added for each new platform connection. This can be checked with below command in each terminal window. @@ -122,11 +126,12 @@ After all the connections are authenticated, we can start the instances in norma Setup Configuration and Authentication Manually ----------------------------------------------- + If you do not need web servers in your setup, then you will need to build the platform discovery config file manually. The config file should contain an entry containing VIP address, instance name and serverkey of each remote platform connection. -Name of the file: external_platform_discovery.json +Name of the file: `external_platform_discovery.json` Directory path: Each platform’s VOLTTRON_HOME directory. @@ -157,9 +162,9 @@ Contents of ``external_platform_discovery.json`` of VOLTTRON instance 1, 2, 3 is |Multi-Platform Discovery Config| -After this, you will need to add the server keys of the connecting platforms using the ``vctl`` utility. Type +After this, you will need to add the server keys of the connecting platforms using the ``vctl`` utility. Type **vctl auth add** command on the command prompt and simply hit Enter to select defaults on all fields -except **credentials**. Here, we can either add serverkey of connecting platform or type `/.*/` to allow ALL +except **credentials**. Here, we can either add serverkey of connecting platform or type `/.*/` to allow ALL connections. .. warning:: `/.*/` allows ALL agent and platform connections without authentication. @@ -181,7 +186,6 @@ connections. For more information on authentication see :ref:`authentication`. - Once the initial configuration are setup, you can start all the VOLTTRON instances in normal mode. .. code-block:: console @@ -190,18 +194,21 @@ Once the initial configuration are setup, you can start all the VOLTTRON instanc Next step is to start agents in each platform to observe the multi-platform PubSub communication behavior. -Start Master driver on VOLTTRON instance 1 ------------------------------------------- -If master driver is not configured to auto start when the instance starts up, we can start it explicitly with this + +Start Platform driver on VOLTTRON instance 1 +-------------------------------------------- + +If platform driver is not configured to auto start when the instance starts up, we can start it explicitly with this command. .. code-block:: console - $ vctl start --tag master_driver + $ vctl start --tag platform_driver Start Listener agents on VOLTTRON instance 2 and 3 -------------------------------------------------- + If the listener agent is not configured to auto start when the instance starts up, we can start it explicitly with this command. @@ -236,3 +243,27 @@ We can stop all the VOLTTRON instances by executing below command in each termin :target: ../../_images/multiplatform-discovery-config.png .. |Multi-Platform PubSub| image:: files/multiplatform-pubsub.png :target: ../../_images/multiplatform-pubsub.png + + +.. _External-Address-Configuration: + +Platform External Address Configuration +======================================= + +In the configuration file located in `$VOLTTRON_HOME/config` add ``vip-address=tcp://ip:port`` for each address you want +to listen on: + +:: + + Example + vip-address=tcp://127.0.0.102:8182 + vip-address=tcp://127.0.0.103:8083 + vip-address=tcp://127.0.0.103:8183 + +.. note:: + + The config file is generated after running the `vcfg` command. The VIP-address is for the local platform, NOT the + remote platform. + + + diff --git a/docs/source/devguides/walkthroughs/VOLTTRON-Central-Demo.rst b/docs/source/deploying-volttron/multi-platform/volttron-central-deployment.rst similarity index 72% rename from docs/source/devguides/walkthroughs/VOLTTRON-Central-Demo.rst rename to docs/source/deploying-volttron/multi-platform/volttron-central-deployment.rst index bd7d89825e..878f910f7c 100644 --- a/docs/source/devguides/walkthroughs/VOLTTRON-Central-Demo.rst +++ b/docs/source/deploying-volttron/multi-platform/volttron-central-deployment.rst @@ -1,7 +1,8 @@ -.. _VOLTTRON-Central-Demo: +.. _VOLTTRON-Central-Deployment: -VOLTTRON Central Demo -===================== +=========================== +VOLTTRON Central Deployment +=========================== VOLTTRON Central is a platform management web application that allows platforms to communicate and to be managed from a centralized server. @@ -29,12 +30,13 @@ interface. - `Dashboard Charts <#dashboard-charts>`__ - `Remove Charts <#remove-charts>`__ + Getting Started --------------- -After :ref:`building VOLTTRON `, open three shells +After :ref:`installing VOLTTRON `, open three shells with the current directory the root of the VOLTTRON repository. Then activate -the VOLTTRON environment and export the VOLTTRON\_HOME variable. The home +the VOLTTRON environment and export the :term:`VOLTTRON_HOME` variable. The home variable needs to be different for each instance. If you are using Terminator you can right click and select "Split Vertically". @@ -48,7 +50,10 @@ This helps us keep from losing terminal windows or duplicating work. |Terminator Setup| One of our instances will have a VOLTTRON Central agent. We will install a -platform agent and a historian on all three platforms. +platform agent and a historian on all three platforms. Please note, for this demo +all the instances run on the ZeroMQ message bus. For multi-platform, multi-bus deployment +setup please follow the steps described in :ref:`Multi Platform Multi-Bus Deployment `. + Run `vcfg` in the first shell. This command will ask how the instance should be set up. Many of the options have defaults that will be sufficient. @@ -86,11 +91,10 @@ and localhost is volttron-pc. Creating new web server certificate. Is this an instance of volttron central? [N]: y Configuring /home/user/volttron/services/core/VolttronCentral. - Enter volttron central admin user name: - Enter volttron central admin password: - Retype password: Installing volttron central. Should the agent autostart? [N]: y + VC admin and password are set up using the admin web interface. + After starting VOLTTRON, please go to https://volttron-pc:8443/admin/login.html to complete the setup. Will this instance be controlled by volttron central? [Y]: y Configuring /home/user/volttron/services/core/VolttronCentralPlatform. What is the name of this instance? [volttron1]: @@ -99,9 +103,9 @@ and localhost is volttron-pc. Would you like to install a platform historian? [N]: y Configuring /home/user/volttron/services/core/SQLHistorian. Should the agent autostart? [N]: y - Would you like to install a master driver? [N]: y - Configuring /home/user/volttron/services/core/MasterDriverAgent. - Would you like to install a fake device on the master driver? [N]: y + Would you like to install a platform driver? [N]: y + Configuring /home/user/volttron/services/core/PlatformDriverAgent. + Would you like to install a fake device on the platform driver? [N]: y Should the agent autostart? [N]: y Would you like to install a listener agent? [N]: y Configuring examples/ListenerAgent. @@ -130,17 +134,19 @@ to select defaults on all fields except **credentials**, where we will type For more information on authorization see :ref:`authentication`. + Remote Platform Configuration ----------------------------- The next step is to configure the instances that will connect to VOLTTRON Central. In the second and third terminal windows run `vcfg`. Like -the VOLTTRON\_HOME variable, these instances need to have unique addresses. +the `VOLTTRON_HOME` variable, these instances need to have a unique :term:`VIP address` and a unique instance name. Install a platform agent and a historian as before. Since we used the default options when configuring VOLTTRON Central, we can use the default options when configuring these platform agents as well. The configuration will be a little -different. +different. The example below is for the second volttron instance. Note the unique VIP address and instance name. +Please ensure the web-address of the volttron central is configured correctly. .. code-block:: console @@ -153,18 +159,17 @@ different. What type of message bus (rmq/zmq)? [zmq]: What is the vip address? [tcp://127.0.0.1]: tcp://127.0.0.2 What is the port for the vip address? [22916]: - Is this instance web enabled? [N]: - Is this an instance of volttron central? [N]: - Will this instance be controlled by volttron central? [Y]: y + Is this instance web enabled? [N]: + Will this instance be controlled by volttron central? [Y]: Configuring /home/user/volttron/services/core/VolttronCentralPlatform. - What is the name of this instance? [volttron1]: + What is the name of this instance? [volttron1]: volttron2 What is the hostname for volttron central? [https://volttron-pc]: What is the port for volttron central? [8443]: Should the agent autostart? [N]: y Would you like to install a platform historian? [N]: y Configuring /home/user/volttron/services/core/SQLHistorian. Should the agent autostart? [N]: y - Would you like to install a master driver? [N]: + Would you like to install a platform driver? [N]: Would you like to install a listener agent? [N]: Finished configuration! @@ -175,18 +180,28 @@ different. (volttron)user@volttron-pc:~/volttron$ + Starting the Demo ----------------- -Start each Volttron instance after configuration. The "-l" option in the -following command tells volttron to log to a file. The file name -should be different for each instance. +Start each Volttron instance after configuration. You have two options. + +Option 1: The following command starts the volttron process in the background. The "-l" option tells volttron to log +to a file. The file name should be different for each instance. + +.. code-block:: console + + $ volttron -vv -l volttron.log& + +Option 2: Use the utility script start-volttron. This will override the default log file each time the script is ran +unless the script is modified with a different filename for each instance. + .. code-block:: console $ ./start-volttron -.. note:: If you choose to not start your agents with their platforms they will need to be started by hand. +.. note:: If you chose to not start your agents with their platforms they will need to be started by hand. List the installed agents with @@ -214,14 +229,50 @@ or In each of the above examples one could use * suffix to match more than one agent. -Open your browser to `localhost:8443/vc/index.hmtl` and and log in with the -credentials you provided. The platform agents should be automatically register -with VOLTTRON central. -.. note:: +VOLTTRON Admin +-------------- + +The admin page is used to set the master username and password for both admin page and VOLTTRON Central page. Admin page +can then be used to manage RMQ and ZMQ certificates and credentials. + +Open a web browser and navigate to https://volttron-pc:8443/admin/login.html + +There may be a message warning about a potential security risk. Check to see if the certificate +that was created in vcfg is being used. The process below is for firefox. + +|vc-cert-warning-1| - localhost is the local host of your machine. In the above examples, - this was volttron-pc. +.. |vc-cert-warning-1| image:: files/vc-cert-warning-1.png + +|vc-cert-warning-2| + +.. |vc-cert-warning-2| image:: files/vc-cert-warning-2.png + +|vc-cert-warning-3| + +.. |vc-cert-warning-3| image:: files/vc-cert-warning-3.png + +|vc-cert-warning-4| + +.. |vc-cert-warning-4| image:: files/vc-cert-warning-4.png + +When the admin page is accessed for the first time, the user will be prompted to set up a master +username and password. + +|admin-page-login| + +.. |admin-page-login| image:: files/volttron-admin-page.png + + +Open your browser to the web address that you specified for the VOLTTRON Central agent that you configured for the +first instance. In the above examples, the configuration file would be located at `~/.volttron1/config` and the +VOLTTRON Central address would be defined in the "volttron-central-address" field. The VOLTTRON Central address takes the +pattern: `https://:8443/vc/index.html`, where localhost is the hostname of your machine. +In the above examples, our hostname is `volttron-pc`; thus our VC interface would be +`https://volttron-pc:8443/vc/index.html`. + +You will need to provide the username and password set earlier through admin web page. Stopping the Demo @@ -239,13 +290,18 @@ Once the demo is complete you may wish to see the :ref:`VOLTTRON Central Management Agent ` page for more details on how to configure the agent for your specific use case. + Log In ------ -To log in to VOLTTRON Central, navigate in a browser to localhost:8443/vc/index.html, and enter the user name and password on the login screen. +To log in to VOLTTRON Central, open a browser and login to the Volttron web interface, which takes the form +`https://localhost:8443/vc/index.html` where localhost is the hostname of your machine. In the above example, we open +the following URL in which our localhost is "volttron-pc": https://volttron-pc:8443/vc/index.html and enter the user +name and password on the login screen. |Login Screen| + Log Out ------- @@ -254,8 +310,9 @@ of the screen. |Logout Button| + Platforms Tree -~~~~~~~~~~~~~~ +^^^^^^^^^^^^^^ The side panel on the left of the screen can be extended to reveal the tree view of registered platforms. @@ -268,6 +325,7 @@ Top-level nodes in the tree are platforms. Platforms can be expanded in the tree to reveal installed agents, devices on buildings, and performance statistics about the platform instances. + Loading the Tree ---------------- @@ -279,6 +337,7 @@ node is expanded is when the items for that platform are loaded. After a platform has been loaded in the tree, all the items under a node can be quickly expanded by double-clicking on the node. + Health Status ------------- @@ -292,6 +351,7 @@ cursor over the item. |Status Tooltips| + Filter the Tree --------------- @@ -308,8 +368,9 @@ or "unknown." |Filter Status| + Platforms Screen -~~~~~~~~~~~~~~~~ +^^^^^^^^^^^^^^^^ This screen lists the registered VOLTTRON platforms and allows new platforms to be registered by clicking the Register Platform button. @@ -319,8 +380,9 @@ to go to the platform management view. |Platforms| + Platform View -~~~~~~~~~~~~~ +^^^^^^^^^^^^^ From the platforms screen, click on the name link of a platform to manage it. Managing a platform includes installing, starting, stopping, @@ -338,8 +400,9 @@ on a specific type of agent. For instance, platform agents and VOLTTRON Central agents can't be removed or stopped, but they can be restarted if they've been interrupted. + Add Charts -~~~~~~~~~~ +^^^^^^^^^^ Performance statistics and device points can be added to charts either from the Charts page or from the platforms tree in the side panel. @@ -390,6 +453,7 @@ a different option. |Chart Type| + Dashboard Charts ---------------- @@ -404,6 +468,7 @@ Charts that have been pinned to the Dashboard are saved to the database and will automatically load when the user logs in to VOLTTRON Central. Different users can save their own configurations of dashboard charts. + Remove Charts ------------- @@ -412,11 +477,6 @@ the X button next to the chart on the Charts page. Removing a chart removes it from the Charts page and the Dashboard. .. |Terminator Setup| image:: files/terminator-setup.png - :target: ../../_images/terminator-setup.png -.. |VC Config| image:: files/vc-config.png - :target: ../../_images/vc-config.png -.. |Platform Config| image:: files/platform-config.png - :target: ../../_images/platform-config.png .. |Login Screen| image:: files/login-screen.png .. |Logout Button| image:: files/logout-button.png .. |Platforms| image:: files/platforms.png @@ -439,3 +499,39 @@ removes it from the Charts page and the Dashboard. .. |Chart Type| image:: files/chart-type.png .. |Pin Chart| image:: files/pin-chart.png .. |Inspect Chart| image:: files/inspect-charts.png + + +VOLTTRON Central +^^^^^^^^^^^^^^^^ + +Navigate to https://volttron-pc:8443/vc/index.html + +Log in using the username and password you set up on the admin web page. + +|vc-login| + +.. |vc-login| image:: files/vc-login.png + + +Once you have logged in, click on the Platforms tab in the upper right corner of the window. + +|vc-dashboard| + +.. |vc-dashboard| image:: files/vc-dashboard.png + +Once in the Platforms screen, click on the name of the platform. + +|vc-platform| + +.. |vc-platform| image:: files/vc-platform.png + +You will now see a list of agents. They should all be running. + +|vc-agents| + +.. |vc-agents| image:: files/vc-agents.png + +For more information on VOLTTRON Central, please see: + +* :ref:`VOLTTRON Central Management ` +* :ref:`VOLTTRON Central Demo ` diff --git a/docs/source/deploying-volttron/platform-configuration.rst b/docs/source/deploying-volttron/platform-configuration.rst new file mode 100644 index 0000000000..e36fc8fb52 --- /dev/null +++ b/docs/source/deploying-volttron/platform-configuration.rst @@ -0,0 +1,174 @@ +.. _Platform-Configuration: + +====================== +Platform Configuration +====================== + +Each instance of the VOLTTRON platform includes a `config` file which is used to configure the platform instance on +startup. This file is kept in :term:`VOLTTRON_HOME` and is created using the `volttron-cfg` (`vcfg`) command, or will +be created with default values on start up of the platform otherwise. + +Following is helpful information about the `config` file and the `vcfg` command. + + +VOLTTRON_HOME +============= + +By default, the VOLTTRON project bases its files out of `VOLTTRON_HOME` which defaults to `~/.volttron`. This directory +features directories and files used by the platform for important operation and management tasks as well as containing +packaged agents and their individual runtime environments (including data directories, identity files, etc.) + +- **$VOLTTRON_HOME/agents** - contains the agents installed on the platform +- **$VOLTTRON_HOME/auth.json** - file containing authentication and authorization rules for agents connecting to the + VOLTTRON instance. +- **$VOLTTRON_HOME/certificates** - contains the certificates for use with the Licensed VOLTTRON code. +- **$VOLTTRON_HOME/configuration_store** - agent configuration store files are stored in this directory. Each agent + may have a file here in which JSON representations of their stored configuration files are stored. +- **$VOLTTRON_HOME/run** - contains files create by the platform during execution. The main ones are the ZMQ files + created for publish and subscribe functionality. +- **$VOLTTRON_HOME/ssh** - keys used by agent mobility in the Licensed VOLTTRON code +- **$VOLTTRON_HOME/config** - Default location to place a config file to override any platform settings. +- **$VOLTTRON_HOME/packaged** - agent packages created with `volttron-pkg` are created in this directory +- **$VOLTTRON_HOME/VOLTTRON_PID** - File containing the Unix process ID for the VOLTTRON platform - used for tracking + platform status. + + +.. _Platform-Config-File: + +VOLTTRON Config File +==================== + +The `config` file in `VOLTTRON_HOME` is the config file used by the platform. This configuration file specifies the +behavior of the platform at runtime, including which message bus it uses, the name of the platform instance, and the +address bound to by :term:`VIP`. The `VOLTTRON Config`_ wizard (explained below) can be used to configure an instance +for the first time. The user may run the wizard again or edit the config file directly as necessary for operations. +The following is a simple example `config` for a multi-platform deployment: + +:: + + [volttron] + message-bus = zmq + vip-address = tcp://127.0.0.1:22916 + bind-web-address = + web-ssl-cert = /certificates/certs/platform_web-server.crt + web-ssl-key = /certificates/private/platform_web-server.pem + instance-name = volttron1 + volttron-central-address = + +The example consists of the following entries: + +* **message-bus** - message bus being used for this instance (rmq/zmq) +* **vip-address** - address bound to by VIP for message bus communication +* **bind-web-address** - Optional, needed if platform has to support web feature. Represents address bound to by the + platform web service for handling HTTP(s) requests. Typical address would be ``https://:8443`` +* **web-ssl-cert** - Optional, needed if platform has to support web feature. Represents path to the certificate for the + instance's web service +* **web-ssl-key** - Optional, needed if platform has to support web feature. Represents secret key or path to secret key + file used by web service authenticate requests +* **instance-name** - name of this VOLTTRON platform instance, should be unique for the deployment +* **volttron-central-address** - Optional, needed if instance is running Volttron Central. Represents web address of + VOLTTRON Central agent managing this platform instance. Typical address would be ``https://:8443`` + + +.. _VOLTTRON-Config: + +VOLTTRON Config +=============== + +The `volttron-cfg` or `vcfg` command allows for an easy configuration of the VOLTTRON environment. The command includes +the ability to set up the platform configuration, an instance of the platform historian, VOLTTRON Central UI, and +VOLTTRON Central Platform agent. + +Running `vcfg` will create a `config` file in `VOLTTRON_HOME` which will be populated according to the answers to +prompts. This process should be repeated for each platform instance, and can be re-run to reconfigure a platform +instance. + +.. note:: + + To create a simple instance of VOLTTRON, leave the default response, or select yes (y) if prompted for a yes or no + response [Y/N]. You must choose a username and password for the VOLTTRON Central admin account if selected. + +A set of example responses are included here (`username` is ``user``, `localhost` is ``volttron-pc``): + +.. code-block:: console + + (volttron) user@volttron-pc:~/volttron$ vcfg + + Your VOLTTRON_HOME currently set to: /home/user/.volttron + + Is this the volttron you are attempting to setup? [Y]: + What type of message bus (rmq/zmq)? [zmq]: + What is the vip address? [tcp://127.0.0.1]: + What is the port for the vip address? [22916]: + Is this instance web enabled? [N]: y + What is the protocol for this instance? [https]: + Web address set to: https://volttron-pc + What is the port for this instance? [8443]: + Would you like to generate a new web certificate? [Y]: + WARNING! CA certificate does not exist. + Create new root CA? [Y]: + + Please enter the following details for web server certificate: + Country: [US]: + State: WA + Location: Richland + Organization: PNNL + Organization Unit: VOLTTRON + Created CA cert + Creating new web server certificate. + Is this an instance of volttron central? [N]: y + Configuring /home/user/volttron/services/core/VolttronCentral. + Installing volttron central. + ['volttron', '-vv', '-l', '/home/user/.volttron/volttron.cfg.log'] + Should the agent autostart? [N]: y + VC admin and password are set up using the admin web interface. + After starting VOLTTRON, please go to https://volttron-pc:8443/admin/login.html to complete the setup. + Will this instance be controlled by volttron central? [Y]: + Configuring /home/user/volttron/services/core/VolttronCentralPlatform. + What is the name of this instance? [volttron1]: + Volttron central address set to https://volttron-pc:8443 + ['volttron', '-vv', '-l', '/home/user/.volttron/volttron.cfg.log'] + Should the agent autostart? [N]: y + Would you like to install a platform historian? [N]: y + Configuring /home/user/volttron/services/core/SQLHistorian. + ['volttron', '-vv', '-l', '/home/user/.volttron/volttron.cfg.log'] + Should the agent autostart? [N]: y + Would you like to install a platform driver? [N]: y + Configuring /home/user/volttron/services/core/PlatformDriverAgent. + ['volttron', '-vv', '-l', '/home/user/.volttron/volttron.cfg.log'] + Would you like to**install a fake device on the platform driver? [N]: y + Should the agent autostart? [N]: y + Would you like to install a listener agent? [N]: y + Configuring examples/ListenerAgent. + ['volttron', '-vv', '-l', '/home/user/.volttron/volttron.cfg.log'] + Should the agent autostart? [N]: y + Finished configuration! + + You can now start the volttron instance. + + If you need to change the instance configuration you can edit + the config file is at /home/user/.volttron/config + +Once this is finished, run VOLTTRON and test the new configuration. + + +Optional Arguments +------------------ + + - **-v, --verbose** - Enables verbose output in standard-output (PIP output, etc.) + - **--vhome VHOME** - Provide a path to set `VOLTTRON_HOME` for this instance + - **--instance-name INSTANCE_NAME** - Provide a name for this instance. Required for running secure agents mode + - **--list-agents** - Display a list of configurable agents (Listener, Platform Driver, Platform Historian, VOLTTRON + Central, VOLTTRON Central Platform) + - **--agent AGENT [AGENT ...]** - Configure listed agents + - **--rabbitmq RABBITMQ [RABBITMQ ...]** - Configure rabbitmq for single instance, federation, or shovel either based + on configuration file in yml format or providing details when prompted. + + Usage: + + .. code-block:: bash + + vcfg --rabbitmq single|federation|shovel [rabbitmq config file]`` + + - **--secure-agent-users** - Require that agents run as their own Unix users (this requires running + `scripts/secure_user_permissions.sh` as `sudo`) diff --git a/docs/source/deploying-volttron/recipe-deployment.rst b/docs/source/deploying-volttron/recipe-deployment.rst new file mode 100644 index 0000000000..24a4fd2104 --- /dev/null +++ b/docs/source/deploying-volttron/recipe-deployment.rst @@ -0,0 +1,20 @@ +.. _volttron_recipes: + +================================== +Deployment Recipes (Multi-Machine) +================================== + +.. raw:: html + +
static content4
+ + +For more details about ansible recipes for scalable deployment strategies, see readthedocs pages of the +:std:doc:`VOLTTRON ansible repository` + diff --git a/docs/source/deploying-volttron/secure-deployment-considerations.rst b/docs/source/deploying-volttron/secure-deployment-considerations.rst new file mode 100644 index 0000000000..3bfb3875e8 --- /dev/null +++ b/docs/source/deploying-volttron/secure-deployment-considerations.rst @@ -0,0 +1,169 @@ +.. _Secure-Deployment-Considerations: + +===================================== +Security Considerations of Deployment +===================================== + +Security of computing systems is a complex topic which depends not only on the +security of each component, but on how software components interact and on the +environment in which they are running. +In the subsections here, we will discuss a variety of possible actions which +may increase the security of a particular deployment, along with their context. + +For more examples and discussion, see the `Publications section of the VOLTTRON website +`_ where there are a number of Threat Profile reports. + +Running as a Managed System Process +=================================== + +It is possible that the running VOLTTRON process could exit undesirably (either due +to a bug, or some malicious action). +For scenarios where not having the VOLTTRON process running presents a business +risk, it is recommended to follow the :ref:`system service setup` +to leverage the host system's process monitoring and management system. +Under this configuration, the system will be configured to restart VOLTTRON in the +event that it fails. + +.. note:: + + For this configuration to be effective, it is important that the platfrom + is configured such that it automatically starts up in the desired state. + In particular, review the installed agents and be sure that agents which + should be running are "enabled" and that their priorities are set such + that they start in the intended order. + +There are scenarios when this configuration may not be desired: + +1. If a system restarts cleanly after an unexpected failure, it is possible that + the underlying issue could go unnoticed (and therefore unresolved). This would + happen if a user checks the system and sees it is running but does not have a + way to realize that there has been one or more restarts. For development systems + it may be desirable to not restart, leaving the system in a failed state which + is more likely to be noticed as unusual, and with the failure details still present + in the recent logs. Consider the relative value of platform up-time and failure + this kind of failure discovery. If both are highly valuable, it may be possible + to add extra notifications to the process monitoring system (systemd, initd, or + other) so that records are retained while service is restored. +2. For development systems, or systems that are frequently stopped or restarted, + it can be more convenient to use the normal start and stop scripts packaged + with VOLTTRON. These do not require the user have system-level permissions + and are easily used from the terminal. + + +Run Web Server Behind Proxy +=========================== + +A VOLTTRON deployment may be web-enabled, allowing various interactions over HTTP. +There are many reasons why it is often desirable to deploy an external reverse +proxy in front of the system, including: + +- Allows regular security patching of the exposed web server independent of the VOLTTRON + process's lifecycle. +- Prevents DDoS and similar attacks, which may successfuly impact the web server, from + impacting the VOLTTRON process itself. +- Provides a opportunity for institutional cyber security experts to help maintain a + secure and compliant web server configuration without needing to gain VOLTTRON-specific + experience. +- Many other traffic management and filtering options which are documented by the various + tools (load balancing, http header management, etc.). + +Configuring a reverse proxy is outside the scope of this documentation. For reference, +two common open source options are `apache httpd `_ +and `nginx `_ +(relevant portions of their respective documentation pages are linked). + + +Monitor for Data Tampering +========================== + +One common indication of a potential problem, including tampering, would be the presence +of out of bounds values. +The :ref:`Threshold-Agent` can be used leveraged to create alerts in the event that a +topic has a value which is out of reasonable bounds. + +This approach has some limitations, including: + +- There can be subtleties in selecting the correct bounds to both ensure issues are seen + while minimizing false positives. +- Including value limits adds a significant amount of configuration to maintain, and which + is not necessarily high-visibility because it is in another agent. +- Currently there is only support for monitoring for values crossing a threshold, more + complex conditional logic would require a custom monitor. +- There could be cases where tampering adjusts values to incorrect but in-bounds values + which would not be detected. + + +Limit Publishing on the Devices Topic to Platform Driver +======================================================== + +To further reduce the chances of malicious data disrupting your system, you can limit the +ability to publish to the "devices" topic to the platform driver only. + +To accomplish this, you will need to modify protected_topics.json, +found in your $VOLTTRON_HOME directory. In this specific case, you would need +to add the topic "devices" and some capability, for example "can_publish_to_devices". + +.. code-block:: json + + { + "write-protect": [ + {"topic": "devices", "capabilities": ["can_publish_to_devices"]} + ] + } + +Next, using ``vctl auth list`` get the auth index for the platform.driver, +and use the command ``vctl auth update ``. +You will get a prompt to update the auth entry. Skip through the prompts until it prompts for +capabilities, and add can_publish_to_devices. + +.. code-block:: console + + capabilities (delimit multiple entries with comma) []: can_publish_to_devices + +For more information, refer to the section on :ref:`Protected-Topics`. + + +Limit Access to RPC Methods Using Capabilities +============================================== + +RPC enabled methods provide convenient interfaces between agents. +When they are unrestricted however, they open up the potential for malicious agents +to cause harm to your system. The best way to prevent this is through the use of capabilities. +A capability is a user defined arbitrary string used by an agent to describe its exported RPC method. +It is used to limit the access to that RPC method to only those agents who have that capability listed in +their authentication record. + +To add a capability restriction to an RPC method, the ``RPC.allow`` decorator is used. +For example, to limit those who can call the RPC enabled method "foo" to those with the capability "can_call_foo": + +.. code-block:: python + + @RPC.export + @RPC.allow("can_call_foo") + def foo: + print("hello") + +To give an agent permission to access this method, the auth file must be updated. +As in the above example for limiting publishing to the devices topic, vctl can be +used to update the auth file and grant the specific agent permission to access the RPC enabled method. + +.. code-block:: console + + capabilities (delimit multiple entries with comma) []: can_call_foo + +For a secure system, only add capabilties to the agents that will need to call a specific RPC enabled method, +and apply the allow decorator to all RPC enabled methods. + +For more information, refer to the section on :ref:`VIP-Authorization`. + + +Monitoring RabbitMQ Server +========================== + +Monitoring of RabbitMQ server in deployment setup can be achieved by running RabbitMQ server as a systemd service. +RabbitMQ server is configured to run as a systemd service and allow systemd to monitor the status of the service. It +can be further configured to detect and restart the RabbitMQ service if it crashes. VOLTTRON agents have the ability +to detect when the RabbitMQ server crashes/disconnects and reconnect when it becomes available. In this deployment +setup, a VOLTTRON platform will not start/stop the RabbitMQ server. + + diff --git a/docs/source/deploying-volttron/single-machine.rst b/docs/source/deploying-volttron/single-machine.rst new file mode 100644 index 0000000000..2c3ceff277 --- /dev/null +++ b/docs/source/deploying-volttron/single-machine.rst @@ -0,0 +1,320 @@ +.. _Single-Machine-Deployment: + +============== +Single Machine +============== + +The purpose of this demonstration is to show the process of setting up a simple VOLTTRON instance for use on a single +machine. + +.. note:: + + The simple deployment example below considers only the ZeroMQ deployment scenario. For RabbitMQ deployments, read + and perform the RabbitMQ installation steps from the :ref:`platform installation ` + instructions and configuration steps from :ref:`VOLTTRON Config `. + + +Install and Build VOLTTRON +========================== + +First, :ref:`install ` VOLTTRON: + +For a quick reference for Ubuntu machines: + +.. code-block:: console + + sudo apt-get update + sudo apt-get install build-essential libffi-dev python3-dev python3-venv openssl libssl-dev libevent-dev git + git clone https://github.com/VOLTTRON/volttron/ + cd volttron + python3 bootstrap.py --drivers --databases + +.. note:: + + For additional detail and more information on installing in other environments, please see the + :ref:`platform install ` section. See the :ref:`bootstrap process ` docs + for more information on its operation and available options. + + +Activate the Environment +------------------------ + +After the build is complete, activate the VOLTTRON environment. + +.. code-block:: console + + source env/bin/activate + + +Run VOLTTRON Config +------------------- + +The `volttron-cfg` or `vcfg` commands can be used to configure platform communication. For an example single machine +deployment, most values can be left at their default values. The following is a simple case example of running `vcfg`: + +.. code-block:: console + + (volttron) user@volttron-pc:~/volttron$ vcfg + + Your VOLTTRON_HOME currently set to: /home/james/.volttron + + Is this the volttron you are attempting to setup? [Y]: + What type of message bus (rmq/zmq)? [zmq]: + What is the vip address? [tcp://127.0.0.1]: + What is the port for the vip address? [22916]: + Is this instance web enabled? [N]: + Will this instance be controlled by volttron central? [Y]: N + Would you like to install a platform historian? [N]: + Would you like to install a platform driver? [N]: + Would you like to install a listener agent? [N]: + Finished configuration! + + You can now start the volttron instance. + + If you need to change the instance configuration you can edit + the config file is at /home/james/.volttron/config + +To learn more, read the :ref:`volttron-config ` section of the Platform Features docs. + +.. note:: + + Steps below highlight manually installing some example agents. To skip manual install, supply `y` or `Y` for the + ``platform historian``, ``platform driver`` and ``listener agent`` installation options. + + +Start VOLTTRON +-------------- + +The most convenient way to start the platform is with the `.start-volttron` command (from the volttron root +directory). + +.. code-block:: bash + + ./start-volttron + +The output following the platform starting successfully will appear like this: + +.. code-block:: console + + 2020-10-27 11:34:33,593 () volttron.platform.agent.utils DEBUG: value from env None + 2020-10-27 11:34:33,593 () volttron.platform.agent.utils DEBUG: value from config False + 2020-10-27 11:34:35,656 () root DEBUG: Creating ZMQ Core config.store + 2020-10-27 11:34:35,672 () volttron.platform.store INFO: Initializing configuration store service. + 2020-10-27 11:34:35,717 () root DEBUG: Creating ZMQ Core platform.auth + 2020-10-27 11:34:35,728 () volttron.platform.auth INFO: loading auth file /home/james/.volttron/auth.json + 2020-10-27 11:34:35,731 () volttron.platform.auth INFO: auth file /home/james/.volttron/auth.json loaded + 2020-10-27 11:34:35,732 () volttron.platform.agent.utils INFO: Adding file watch for /home/james/.volttron/auth.json dirname=/home/james/.volttron, filename=auth.json + 2020-10-27 11:34:35,734 () volttron.platform.agent.utils INFO: Added file watch for /home/james/.volttron/auth.json + 2020-10-27 11:34:35,734 () volttron.platform.agent.utils INFO: Adding file watch for /home/james/.volttron/protected_topics.json dirname=/home/james/.volttron, filename=protected_topics.json + 2020-10-27 11:34:35,736 () volttron.platform.agent.utils INFO: Added file watch for /home/james/.volttron/protected_topics.json + 2020-10-27 11:34:35,737 () volttron.platform.vip.pubsubservice INFO: protected-topics loaded + 2020-10-27 11:34:35,739 () volttron.platform.vip.agent.core INFO: Connected to platform: router: fc054c9f-aa37-4842-a618-6e70d53530f0 version: 1.0 identity: config.store + 2020-10-27 11:34:35,743 () volttron.platform.vip.agent.core INFO: Connected to platform: router: fc054c9f-aa37-4842-a618-6e70d53530f0 version: 1.0 identity: platform.auth + 2020-10-27 11:34:35,746 () volttron.platform.vip.pubsubservice INFO: protected-topics loaded + 2020-10-27 11:34:35,750 () volttron.platform.vip.agent.subsystems.configstore DEBUG: Processing callbacks for affected files: {} + 2020-10-27 11:34:35,879 () root DEBUG: Creating ZMQ Core control + 2020-10-27 11:34:35,908 () root DEBUG: Creating ZMQ Core keydiscovery + 2020-10-27 11:34:35,913 () root DEBUG: Creating ZMQ Core pubsub + 2020-10-27 11:34:35,924 () volttron.platform.auth INFO: loading auth file /home/james/.volttron/auth.json + 2020-10-27 11:34:38,010 () volttron.platform.vip.agent.core INFO: Connected to platform: router: fc054c9f-aa37-4842-a618-6e70d53530f0 version: 1.0 identity: control + 2020-10-27 11:34:38,066 () volttron.platform.vip.agent.core INFO: Connected to platform: router: fc054c9f-aa37-4842-a618-6e70d53530f0 version: 1.0 identity: pubsub + 2020-10-27 11:34:38,069 () volttron.platform.vip.agent.core INFO: Connected to platform: router: fc054c9f-aa37-4842-a618-6e70d53530f0 version: 1.0 identity: keydiscovery + 2020-10-27 11:34:38,429 () volttron.platform.auth WARNING: Attempt 1 to get peerlist failed with exception 0.5 seconds + 2020-10-27 11:34:38,430 () volttron.platform.auth WARNING: Get list of peers from subsystem directly + 2020-10-27 11:34:38,433 () volttron.platform.auth INFO: auth file /home/james/.volttron/auth.json loaded + 2020-10-27 11:34:38,434 () volttron.platform.auth INFO: loading auth file /home/james/.volttron/auth.json + 2020-10-27 11:34:40,961 () volttron.platform.auth WARNING: Attempt 1 to get peerlist failed with exception 0.5 seconds + 2020-10-27 11:34:40,961 () volttron.platform.auth WARNING: Get list of peers from subsystem directly + 2020-10-27 11:34:40,969 () volttron.platform.auth INFO: auth file /home/james/.volttron/auth.json loaded + + +.. note:: + + While running the platform with verbose logging enabled, the `volttron.log` file is useful for confirming successful + platform operations or debugging. It is commonly recommended to open a new terminal window and run the following + command to view the VOLTTRON logs as they are created: + + .. code-block:: bash + + tail -f volttron.log + + +Install Agents and Historian +============================ + +Out of the box, VOLTTRON includes a number of agents which may be useful for single machine deployments: + + * historians - Historians automatically record a data from a number of topics published to the bus. For more + information on the historian framework or one of the included concrete implementations, view the + :ref:`docs ` + * Listener - This example agent can be useful for debugging drivers or other agents publishing to the bus. + :ref:`docs ` + * Platform Driver - The :ref:`Platform-Driver` is responsible for managing device communication on a platform instance. + * weather agents - weather agents can be used to collect weather data from sources like + :ref:`Weather.gov ` + + .. note:: + + The `services/core`, `services/ops`, and `examples` directories in the repository contain additional agents to + use to fit individual use cases. + +For a simple setup example, a Platform Driver, SQLite Historian, and Listener are installed using the following steps: + +#. Create a configuration file for the Platform Driver and SQLite Historian (it is advised to create a `configs` directory + in volttron root to keep configs for a deployment). For information on how to create configurations for these + agents, view their docs: + + * :ref:`Platform Driver ` + * :ref:`SQLite Historian ` + * :ref:`Listener ` + + For a simple example, the configurations can be copied as-is to the `configs` directory: + + .. code-block:: bash + + cp services/core/PlatformDriverAgent/platform-driver.agent configs + cp services/core/SQLHistorian/config.sqlite configs + cp examples/ListenerAgent/config configs/listener.config + +#. Use the `install-agent.py` script to install the agent on the platform: + +.. code-block:: bash + + python scripts/install-agent.py -s services/core/SQLHistorian -c configs/config.sqlite --tag listener + python scripts/install-agent.py -s services/core/PlatformDriverAgent -c configs/platform-driver.agent --tag platform_driver + python scripts/install-agent.py -s examples/ListenerAgent -c configs/listener.config --tag platform_historian + + .. note:: + + The `volttron.log` file will contain logging indicating that the agent has installed successfully. + + .. code-block:: console + + 2020-10-27 11:42:08,882 () volttron.platform.auth INFO: AUTH: After authenticate user id: control.connection, b'c61dff8e-f362-4906-964f-63c32b99b6d5' + 2020-10-27 11:42:08,882 () volttron.platform.auth INFO: authentication success: userid=b'c61dff8e-f362-4906-964f-63c32b99b6d5' domain='vip', address='localhost:1000:1000:3249', mechanism='CURVE', credentials=['ZrDvPG4JNLE26GoPUrTP22rV0PV8uGCnrXThrNFk_Ec'], user='control.connection' + 2020-10-27 11:42:08,898 () volttron.platform.aip DEBUG: Using name template "listeneragent-3.3_{n}" to generate VIP ID + 2020-10-27 11:42:08,899 () volttron.platform.aip INFO: Agent b3e7053c-28e8-414f-b685-8522eb230c7a setup to use VIP ID listeneragent-3.3_1 + 2020-10-27 11:42:08,899 () volttron.platform.agent.utils DEBUG: missing file /home/james/.volttron/agents/b3e7053c-28e8-414f-b685-8522eb230c7a/listeneragent-3.3/listeneragent-3.3.dist-info/keystore.json + 2020-10-27 11:42:08,899 () volttron.platform.agent.utils INFO: creating file /home/james/.volttron/agents/b3e7053c-28e8-414f-b685-8522eb230c7a/listeneragent-3.3/listeneragent-3.3.dist-info/keystore.json + 2020-10-27 11:42:08,899 () volttron.platform.keystore DEBUG: calling generate from keystore + 2020-10-27 11:42:08,909 () volttron.platform.auth INFO: loading auth file /home/james/.volttron/auth.json + 2020-10-27 11:42:11,415 () volttron.platform.auth WARNING: Attempt 1 to get peerlist failed with exception 0.5 seconds + 2020-10-27 11:42:11,415 () volttron.platform.auth WARNING: Get list of peers from subsystem directly + 2020-10-27 11:42:11,419 () volttron.platform.auth INFO: auth file /home/james/.volttron/auth.json loaded + +#. Use the `vctl status` command to ensure that the agents have been successfully installed: + +.. code-block:: bash + + vctl status + +.. code-block:: console + + (volttron)user@volttron-pc:~/volttron$ vctl status + AGENT IDENTITY TAG STATUS HEALTH + 8 listeneragent-3.2 listeneragent-3.2_1 listener + 0 platform_driveragent-3.2 platform.driver platform_driver + 3 sqlhistorianagent-3.7.0 platform.historian platform_historian + +.. note:: + + After installation, the `STATUS` and `HEALTH` columns of the `vctl status` command will be vacant, indicating that + the agent is not running. The `--start` option can be added to the `install-agent.py` script arguments to + automatically start agents after they have been installed. + + +Install a Fake Driver +===================== + +The following are the simplest steps for installing a fake driver for example use. For more information on installing +concrete drivers such as the BACnet or Modbus drivers, view their respective documentation in the +:ref:`Driver framework ` section. + +.. note:: + + This section will assume the user has created a `configs` directory in the volttron root directory, activated + the Python virtual environment, and started the platform as noted above. + +.. code-block:: console + + cp examples/configurations/drivers/fake.config /configs + cp examples/configurations/drivers/fake.csv /configs + vctl config store platform.driver devices/campus/building/fake configs/fake.config + vctl config store platform.driver fake.csv devices/fake.csv + +.. note:: + + For more information on the fake driver, or the configurations used in the above example, view the + :ref:`docs ` + + +Testing the Deployment +====================== + +To test that the configuration was successful, start an instance of VOLTTRON in the background: + +.. code-block:: console + + ./start-volttron + +.. note:: + + This command must be run from the root VOLTTRON directory. + +Having following the examples above, the platform should be ready for demonstrating the example deployment. Start +the Listener, SQLite historian and Platform Driver. + +.. code-block:: console + + vctl start --tag listener platform_historian platform_driver + +The output should look similar to this: + +.. code-block:: console + + (volttron)user@volttron-pc:~/volttron$ vctl status + AGENT IDENTITY TAG STATUS HEALTH + 8 listeneragent-3.2 listeneragent-3.2_1 listener running [2810] GOOD + 0 platform_driveragent-3.2 platform.driver platform_driver running [2813] GOOD + 3 sqlhistorianagent-3.7.0 platform.historian platform_historian running [2811] GOOD + +.. note:: + + The `STATUS` column indicates whether the agent is running. The `HEALTH` column indicates whether the current state + of the agent is within intended parameters (if the Platform Driver is publishing, the platform historian has not been + backlogged, etc.) + +You can further verify that the agents are functioning correctly with ``tail -f volttron.log``. + +ListenerAgent: + +.. code-block:: console + + 2020-10-27 11:43:33,997 (listeneragent-3.3 3294) __main__ INFO: Peer: pubsub, Sender: listeneragent-3.3_1:, Bus: , Topic: heartbeat/listeneragent-3.3_1, Headers: {'TimeStamp': '2020-10-27T18:43:33.988561+00:00', 'min_compatible_version': '3.0', 'max_compatible_version': ''}, Message: + 'GOOD' + +Platform Driver with Fake Driver: + +.. code-block:: console + + 2020-10-27 11:47:50,037 (listeneragent-3.3 3294) __main__ INFO: Peer: pubsub, Sender: platform.driver:, Bus: , Topic: devices/campus/building/fake/all, Headers: {'Date': '2020-10-27T18:47:50.005349+00:00', 'TimeStamp': '2020-10-27T18:47:50.005349+00:00', 'SynchronizedTimeStamp': '2020-10-27T18:47:50.000000+00:00', 'min_compatible_version': '3.0', 'max_compatible_version': ''}, Message: + [{'EKG': -0.8660254037844386, + 'EKG_Cos': -0.8660254037844386, + 'EKG_Sin': -0.8660254037844386, + 'Heartbeat': True, + 'OutsideAirTemperature1': 50.0, + 'OutsideAirTemperature2': 50.0, + 'OutsideAirTemperature3': 50.0, + 'PowerState': 0, + 'SampleBool1': True, + 'SampleBool2': True, + 'SampleBool3': True, + 'SampleLong1': 50, + ... + +SQLite Historian: + +.. code-block:: console + + 2020-10-27 11:50:25,021 (platform_driveragent-4.0 3535) platform_driver.driver DEBUG: finish publishing: devices/campus/building/fake/all + 2020-10-27 11:50:25,052 (sqlhistorianagent-3.7.0 3551) volttron.platform.dbutils.sqlitefuncts DEBUG: Managing store - timestamp limit: None GB size limit: None diff --git a/docs/source/community_resources/index.rst b/docs/source/developing-volttron/community.rst similarity index 61% rename from docs/source/community_resources/index.rst rename to docs/source/developing-volttron/community.rst index 1c174ac23f..d8fd8e1bf1 100644 --- a/docs/source/community_resources/index.rst +++ b/docs/source/developing-volttron/community.rst @@ -1,33 +1,40 @@ -.. _community: +.. _Community: + +================== +Join the Community +================== +The VOLTTRON project is transitioning into the Eclipse Foundation as Eclipse VOLTTRON. Current resources will still +be used during this time. Please watch this space! + +The Eclipse VOLTTRON team aims to work with users and contributors to continuously improve the platform with features +requested by the community as well as architectural features that improve robustness, security, and scalability. +Contributing back to the project, which is encouraged but not required, enhances its capabilities for the whole community. +To learn more, check out :ref:`Contributing ` and :ref:`Documentation `. -=================== -Join the Community -=================== -The VOLTTRON team aims to work with users and contributors to continuously improve the platform with features requested -by the community as well as architectural features that improve robustness, security, and scalability. Contributing -back to the project, which is encouraged but not required, enhances its capabilities for the whole community. To -learn more, check out :ref:`Contributing ` and :ref:`Documentation `. Slack Channel -^^^^^^^^^^^^^ +============= volttron-community.slack.com is where the |VOLTTRON| community at large can ask questions and meet with others using |VOLTTRON|. To be added to Slack please email the VOLTTRON team at `volttron@pnnl.gov `__. + Mailing List -^^^^^^^^^^^^ +============ Join the mailing list by emailing `volttron@pnnl.gov `__. + Stack Overflow -^^^^^^^^^^^^^^ +============== The VOLTTRON community supports questions being asked and answered through Stack Overflow. The questions tagged with the `volttron` tag can be found at http://stackoverflow.com/questions/tagged/volttron. + Office Hours -^^^^^^^^^^^^ +============ PNNL hosts office hours every other week on Fridays at 11 AM (PST). These meetings are designed to be very informal where VOLTTRON developers can answer specific questions about the inner workings of VOLTTRON. These meetings are also @@ -37,15 +44,5 @@ available through a Zoom meeting. To be invited to the link meeting, contact the Meetings are recorded and can be reviewed `here `__. -Contributing Back -^^^^^^^^^^^^^^^^^^ - -.. toctree:: - :glob: - :maxdepth: 1 - - contributing - documentation - * .. |VOLTTRON| unicode:: VOLTTRON U+2122 diff --git a/docs/source/developing-volttron/contributing-code.rst b/docs/source/developing-volttron/contributing-code.rst new file mode 100644 index 0000000000..6aefea2e1d --- /dev/null +++ b/docs/source/developing-volttron/contributing-code.rst @@ -0,0 +1,334 @@ +.. _Contributing-Code: + +================= +Contributing Code +================= + +As an open source project VOLTTRON requires input from the community to keep development focused on new and useful +features. To that end we are revising our commit process to hopefully allow more contributors to be a part of the +community. The following document outlines the process for source code and documentation to be submitted. +There are GUI tools that may make this process easier, however this document will focus on what is required from the +command line. + +The only requirements for contributing are Git (Linux version control software) and your favorite web browser. + +.. note:: + + The following guide assumes the user has already created a fork of the core VOLTTRON repository. Please review the + :ref:`docs ` if you have not yet created a fork. + +The only technical requirements for contributing are Git (version control software) and your +favorite web browser. + +As a part of VOLTTRON joining the Eclipse community, Eclipse requires that all contributors sign the +`Eclipse Contributor agreement `_ before making a pull request. + + +Reviewing Changes +================= + +Okay, we've written a cool new `foo.py` script to service `bar` in our deployment. Let's make sure our code is +up-to-snuff. + +Code +---- + +First, go through the code. + +.. note:: + + We on the VOLTTRON team would recommend an internal code review - it can be really hard to catch small mistakes, + typos, etc. for code you just finished writing. + +* Does the code follow best-practices for Python, object-oriented programming, unit and integration testing, etc.? +* Does the code contain any typos and does it follow `Pep8 guidelines `_? +* Does the code follow the guidelines laid out in the VOLTTRON documentation? + + +Docs +---- + +Next, Check out the documentation. + +* Is it complete? + + * Has an introduction describing purpose + * Describes configuration including all parameters + * Includes installation instructions + * Describes behavior at runtime + * Describes all available endpoints (JSON-RPC, pub/sub messages, Web-API endpoints, etc.) + +* Does it follow the :ref:`VOLTTRON documentation guidelines `? + + +Tests +----- + +You've included tests, right? Unit and integration tests show users that `foo.py` is better than their wildest +dreams - all of the features work, and include components they hadn't even considered themselves! + +* Are the unit tests thorough? + + * Success and failure cases + * Tests for each independent component of the code + +* Do the integration tests capture behavior with a running VOLTTRON platform? + + * Success and Failure cases + * Tests for each endpoint + * Tests for interacting with other agents if necessary + * Are status, health, etc. updating as expected when things go wrong or the code recovers? + +* Can the tests be read to describe the behavior of the code? + +Structure +--------- + +For agents and drivers, the VOLTTRON team has some really simple structure recommendations. These make your project +structure nice and tidy, and integrate nicely with the core repository. + +For agents: + +:: + + TestAgent/ + ├── setup.py + ├── config + ├── README.rst + ├── tester + | ├── agent.py + | └── __init__.py + └── tests + └── test_agent.py + +For drivers, the interface should be a file named after the driver in the Platform Driver's interfaces directory: + +:: + + ├── platform_driver + │ ├── agent.py + │ ├── driver.py + │ ├── __init__.py + │ ├── interfaces + │ │ ├── __init__.py + │ │ ├── bacnet.py + | | ├── csvdriver.py + │ │ └── new_driver.py + +Or in the `__init__.py` file in a directory named after the driver in the Platform Driver's interfaces directory: + +:: + + ├── platform_driver + │ ├── agent.py + │ ├── driver.py + │ ├── __init__.py + │ ├── interfaces + │ │ ├── __init__.py + │ │ ├── bacnet.py + │ │ ├── new_driver + │ │ | └── __init__.py + +This option is ideal for adding additional code files, and including documentation and tests. + + +Creating a Pull Request to the main VOLTTRON repository +======================================================= + +After reviewing changes to our fork of the VOLTTRON repository, we want our changes to be added into the main VOLTTRON +repository. After all, our `foo.py` can cure a lot of the world's problems and of course it is always good to have a +copyright with the correct year. Open your browser to +https://github.com/VOLTTRON/volttron/compare/develop...YOUR_USERNAME:develop. + +On that page the base fork should always be VOLTTRON/volttron with the base develop, the head fork should +be /volttron and the compare should be the branch in your repository to pull from. Once you have +verified that you have got the right changes made then, click on create pull request, enter a title and description that +represent your changes and submit the pull request. + +The VOLTTRON repository has a description template to use to format your PR: + +:: + + # Description + + Please include a summary of the change and which issue is fixed. Please also include relevant motivation and context. List any dependencies that are required for this change. + + Fixes # (issue) + + ## Type of change + + Please delete options that are not relevant. + + - [ ] Bug fix (non-breaking change which fixes an issue) + - [ ] New feature (non-breaking change which adds functionality) + - [ ] Breaking change (fix or feature that would cause existing functionality to not work as expected) + - [ ] This change requires a documentation update + + # How Has This Been Tested? + + Please describe the tests that you ran to verify your changes. Provide instructions so we can reproduce. Please also list any relevant details for your test configuration + + - [ ] Test A + - [ ] Test B + + **Test Configuration**: + * Firmware version: + * Hardware: + * Toolchain: + * SDK: + + # Checklist: + + - [ ] My code follows the style guidelines of this project + - [ ] I have performed a self-review of my own code + - [ ] I have commented my code, particularly in hard-to-understand areas + - [ ] I have made corresponding changes to the documentation + - [ ] My changes generate no new warnings + - [ ] I have added tests that prove my fix is effective or that my feature works + - [ ] New and existing unit tests pass locally with my changes + - [ ] Any dependent changes have been merged and published in downstream modules + +.. note:: + + The VOLTTRON repository includes a stub for completing your pull request. Please follow the stub to facilitate the + reviewing and merging processes. + + +What happens next? +================== + +Once you create a pull request, one or more VOLTTRON team members will review your changes and either accept them as is +ask for modifications in order to have your commits accepted. Typical response time is approximately two weeks; please +be patient, your pull request will be reviewed. You will be automatically emailed through the GitHub notification +system when this occurs (assuming you haven't changed your GitHub preferences). + + +Merging changes from the main VOLTTRON repository +------------------------------------------------- + +As time goes on the VOLTTRON code base will continually be modified so the next time you want to work on a change to +your files the odds are your local and remote repository will be out of date. In order to get your remote VOLTTRON +repository up to date with the main VOLTTRON repository you could simply do a pull request to your remote repository +from the main repository. To do so, navigate your browser to +https://github.com/YOUR_USERNAME/volttron/compare/develop...VOLTTRON:develop. + +Click the 'Create Pull Request' button. On the following page click the 'Create Pull Request' button. On the next page +click 'Merge Pull Request' button. + +Once your remote is updated you can now pull from your remote repository into your local repository through the +following command: + +.. code-block:: bash + + git pull + +The other way to get the changes into your remote repository is to first update your local repository with the +changes from the main VOLTTRON repository and then pushing those changes up to your remote repository. To do that you +need to first create a second remote entry to go along with the origin. A remote is simply a pointer to the url of a +different repository than the current one. Type the following command to create a new remote called 'upstream': + +.. code-block:: bash + + git remote add upstream https://github.com/VOLTTRON/volttron + +To update your local repository from the main VOLTTRON repository then execute the following command where upstream is +the remote and develop is the branch to pull from: + +.. code-block:: bash + + git pull upstream develop + +Finally to get the changes into your remote repository you can execute: + +.. code-block:: bash + + git push origin + + +.. _Git-Commands: + +Other commands to know +^^^^^^^^^^^^^^^^^^^^^^ + +At this point in time you should have enough information to be able to update both your local and remote repository +and create pull requests in order to get your changes into the main VOLTTRON repository. The following commands are +other commands to give you more information that the preceding tutorial went through + + +Viewing what the remotes are in our local repository +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +.. code-block:: bash + + git remote -v + + +Stashing changed files so that you can do a merge/pull from a remote +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +.. code-block:: bash + + git stash save 'A comment to be listed' + + +Applying the last stashed files to the current repository +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +.. code-block:: bash + + git stash pop + + +Finding help about any git command +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +.. code-block:: bash + + git help + git help branch + git help stash + git help push + git help merge + + +Creating a branch from the branch and checking it out +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +.. code-block:: bash + + git checkout -b newbranchname + + +Checking out a branch (if not local already will look to the remote to checkout) +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +.. code-block:: bash + + git checkout branchname + + +Removing a local branch (cannot be current branch) +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +.. code-block:: bash + + git branch -D branchname + + +Determine the current and show all local branches +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +.. code-block:: bash + + git branch + + +Using Travis Continuous Integration Tools +----------------------------------------- + +The main VOLTTRON repository is hooked into an automated build tool called travis-ci. Your remote repository can be +automatically built with the same tool by hooking your account into travis-ci's environment. To do this go to +https://travis-ci.org and create an account. You can using your GitHub login directly to this service. Then you will +need to enable the syncing of your repository through the travis-ci service. Finally you need to push a new change to +the repository. If the build fails you will receive an email notifying you of that fact and allowing you to modify the +source code and then push new changes out. diff --git a/docs/source/developing-volttron/contributing-documentation.rst b/docs/source/developing-volttron/contributing-documentation.rst new file mode 100644 index 0000000000..5be5231ea9 --- /dev/null +++ b/docs/source/developing-volttron/contributing-documentation.rst @@ -0,0 +1,216 @@ +.. _Contributing-Documentation: + +========================== +Contributing Documentation +========================== + +The Community is encouraged to contribute documentation back to the project as they work through use cases the +developers may not have considered or documented. By contributing documentation back, the community can +learn from each other and build up a more extensive knowledge base. + +|VOLTTRON| documentation utilizes ReadTheDocs: http://volttron.readthedocs.io/en/develop/ and is built +using the `Sphinx `_ Python library with static content in +`Restructured Text `_. + + +Building the Documentation +========================== + +Static documentation can be found in the `docs/source` directory. Edit or create new .rst files to add new content +using the `Restructured Text `_ format. To see the results +of your changes the documentation can be built locally through the command line using the following instructions: + +If you've already :ref:`bootstrapped ` |VOLTTRON|, do the following while activated. If not, +this will also pull down the necessary |VOLTTRON| libraries. + +.. code-block:: bash + + python bootstrap.py --documentation + cd docs + make html + +Then, open your browser to the created local files: + +.. code-block:: bash + + file:///home//git/volttron/docs/build/html/index.html + + +When complete, changes can be contributed back using the same process as code :ref:`contributions ` +by creating a pull request. When the changes are accepted and merged, they will be reflected in the ReadTheDocs site. + +.. |VOLTTRON| unicode:: VOLTTRON U+2122 + + +.. _Documentation-Styleguide: + +Documentation Styleguide +======================== + + +Naming Conventions +------------------ + +* File names and directories should be all lower-case and use only dashes/minus signs (-) as word separators + +:: + + index.rst + ├── first-document.rst + ├── more-documents + │ ├──second-document.rst + +* Reference Labels should be Capitalized and dash/minus separated: + +:: + + .. _Reference-Label: + +* Headings and Sub-headings should be written like book titles: + +:: + + ============== + The Page Title + ============== + + +Headings +-------- + +Each page should have a main title: + +:: + + ================================== + This is the Main Title of the Page + ================================== + +It can be useful to include reference labels throughout the document to use to refer back to that section of +documentation. Include reference labels above titles and important headings: + +:: + + .. _Main-Title: + + ================================== + This is the main title of the page + ================================== + + +Heading Levels +^^^^^^^^^^^^^^ + +* Page titles and documentation parts should use over-line and underline hashes: + +:: + + ===== + Title + ===== + +* Chapter headings should be over-lined and underlined with asterisks + +:: + + ******* + Chapter + ******* + +* For sections, subsections, sub-subsections, etc. underline the heading with the following: + + * =, for sections + * -, for subsections + * ^, for sub-subsections + * “, for paragraphs + + +In addition to following guidelines for styling, please separate headers from previous content by two newlines. + +:: + + ===== + Title + ===== + + Content + + + Subheading + ========== + + +Example Code Blocks +-------------------- + +Use bash for commands or user actions: + +.. code-block:: bash + + ls -al + + +Use this for the results of a command: + +.. code-block:: console + + total 5277200 + drwxr-xr-x 22 volttron volttron 4096 Oct 20 09:44 . + drwxr-xr-x 23 volttron volttron 4096 Oct 19 18:39 .. + -rwxr-xr-x 1 volttron volttron 164 Sep 29 17:08 agent-setup.sh + drwxr-xr-x 3 volttron volttron 4096 Sep 29 17:13 applications + + +Use this when Python source code is displayed + +.. code-block:: python + + @RPC.export + def status_agents(self): + return self._aip.status_agents() + + +Directives +---------- + +.. DANGER:: + + Something very bad! + +.. tip:: + + This is something good to know + + +Some other directives +^^^^^^^^^^^^^^^^^^^^^ + +"attention", "caution", "danger", "error", "hint", "important", "note", "tip", "warning", "admonition" + + +Links +----- + +Linking to external sites is simple: + +:: + + Link to `Google `_ + + +References +---------- + +You can reference other sections of documentation using the `ref` directive: + +:: + + This will reference the :ref:`platform installation ` + + +Other resources +--------------- + +- http://pygments.org/docs/lexers/ +- http://documentation-style-guide-sphinx.readthedocs.io/en/latest/style-guide.html +- http://www.sphinx-doc.org/en/stable/markup/code.html diff --git a/docs/source/devguides/agent_development/Agent-Configuration-Store.rst b/docs/source/developing-volttron/developing-agents/agent-configuration-store.rst similarity index 91% rename from docs/source/devguides/agent_development/Agent-Configuration-Store.rst rename to docs/source/developing-volttron/developing-agents/agent-configuration-store.rst index c019a8f295..6c5e84404e 100644 --- a/docs/source/devguides/agent_development/Agent-Configuration-Store.rst +++ b/docs/source/developing-volttron/developing-agents/agent-configuration-store.rst @@ -18,6 +18,7 @@ Updates from the platform will usually trigger callbacks on the agent. Agent access to the Configuration Store is managed through the `self.vip.config` object in the Agent class. + The "config" Configuration ************************** @@ -29,9 +30,7 @@ change to another configuration triggers any callbacks for `config`. Configuration Callbacks *********************** -Agents may setup callbacks for different configuration events. - -The callback method must have the following signature: +Agents may setup callbacks for different configuration events. The callback method must have the following signature: .. code-block:: python @@ -42,18 +41,24 @@ The callback method must have the following signature: The example above is for a class member method, however the method does not need to be a member of the agent class. - **config_name** - The method to call when a configuration event occurs. -- **action** - The specific configuration event type that triggered the callback. Possible values are "NEW", "UPDATE", "DELETE". See `Configuration Events`_ -- **contents** - The actual contents of the configuration. Will be a string, list, or dictionary for the actions "NEW" and "UPDATE". None if the action is "DELETE". +- **action** - The specific configuration event type that triggered the callback. Possible values are "NEW", "UPDATE", + "DELETE". See :ref:`Configuration Events ` +- **contents** - The actual contents of the configuration. Will be a string, list, or dictionary for the actions "NEW" + and "UPDATE". None if the action is "DELETE". .. note:: - All callbacks which are connected to the "NEW" event for a configuration will called during agent startup with the initial state of the configuration. + All callbacks which are connected to the "NEW" event for a configuration will called during agent startup with the + initial state of the configuration. +.. _Configuration-Store-Events: + Configuration Events -------------------- -- **NEW** - This event happens for every existing configuration at Agent startup and whenever a new configuration is added to the Configuration Store. +- **NEW** - This event happens for every existing configuration at Agent startup and whenever a new configuration is + added to the Configuration Store. - **UPDATE** - This event happens every time a configuration is changed. - **DELETE** - The event happens every time a configuration is removed from the store. @@ -73,17 +78,21 @@ A callback is setup with the `self.vip.config.subscribe` method. subscribe(callback, actions=["NEW", "UPDATE", "DELETE"], pattern="*") - **callback** - The method to call when a configuration event occurs. -- **actions** - The specific configuration event that will trigger the callback. May be a string with the name of a single action or a list of actions. +- **actions** - The specific configuration event that will trigger the callback. May be a string with the name of a + single action or a list of actions. - **pattern** - The pattern used to match configuration names to trigger the callback. + Configuration Name Pattern Matching ----------------------------------- -Configuration name matching uses Unix file name matching semantics. Specifically the python module :py:mod:`fnmatch` is used. +Configuration name matching uses Unix file name matching semantics. Specifically the python module :py:mod:`fnmatch` is +used. Name matching is not case sensitive regardless of the platform VOLTTRON is running on. -For example, the pattern `devices/*` will trigger the supplied callback for any configuration name that starts with `devices/`. +For example, the pattern `devices/*` will trigger the supplied callback for any configuration name that starts with +`devices/`. The default pattern matches all configurations. @@ -91,9 +100,8 @@ The default pattern matches all configurations. Getting a Configuration *********************** -Once RPC methods are available to an agent (once onstart methods have been called or -from any configuration callback) the contents of any configuration may be acquired -with the `self.vip.config.get` method. +Once RPC methods are available to an agent (once onstart methods have been called or from any configuration callback) +the contents of any configuration may be acquired with the `self.vip.config.get` method. .. code-block:: python @@ -101,12 +109,14 @@ with the `self.vip.config.get` method. If the Configuration Subsystem has not been initialized with the starting values of the agent configuration that will happen in order to satisfy the request. + If initialization occurs to satisfy the request callbacks will *not* be called before returning the results. Typically an Agent will only obtain the contents of a configuration via a callback. This method is included for agents that want to save state in the store and only need to retrieve the contents of a configuration at startup and ignore any changes to the configuration going forward. + Setting a Configuration *********************** @@ -163,8 +173,8 @@ via the tradition method of a bundled configuration file the `self.vip.config.se .. warning:: - This method may **not** be called once the Agent Configuration Store Subsystem has been initialized. This method should - only be called from `__init__` or an `onsetup` method. + This method may **not** be called once the Agent Configuration Store Subsystem has been initialized. This method + should only be called from `__init__` or an `onsetup` method. The `set_default` method adds a temporary configuration to the Agents Configuration Subsystem. Nothing is sent to the platform. If a configuration with the same name exists in the platform store it will be presented to @@ -183,21 +193,25 @@ If a configuration is deleted from the store and a default configuration exists the Agent Configuration Subsystem will call the `UPDATE` callback for that configuration with the contents of the default configuration. + Other Methods ************* In a well thought out configuration scheme these methods should not be needed but are included for completeness. + List Configurations ------------------- A current list of all configurations for the Agent may be called with the `self.vip.config.list` method. + Unsubscribe ----------- All subscriptions can be removed with a call to the `self.vip.config.unsubscribe_all` method. + Delete ------ @@ -211,6 +225,7 @@ A configuration can be deleted with a call to the `self.vip.config.delete` metho This method may **not** be called from a callback for the same reason as the `self.vip.config.set` method. + Delete Default -------------- @@ -225,6 +240,7 @@ A default configuration can be deleted with a call to the `self.vip.config.delet This method may **not** be called once the Agent Configuration Store Subsystem has been initialized. This method should only be called from `__init__` or an `onsetup` method. + Example Agent ************* @@ -280,4 +296,3 @@ The following example shows how to use set_default with a basic configuration an def configure_delete(self, config_name, action, contents): _log.debug("Removing {}".format(config_name)) #Do something in response to the removed configuration. - diff --git a/docs/source/developing-volttron/developing-agents/agent-development.rst b/docs/source/developing-volttron/developing-agents/agent-development.rst new file mode 100644 index 0000000000..240d8510a2 --- /dev/null +++ b/docs/source/developing-volttron/developing-agents/agent-development.rst @@ -0,0 +1,1010 @@ +.. _Agent-Development: + +================= +Agent Development +================= + +The VOLTTRON platform now has utilities to speed the creation and installation of new agents. To use these utilities the +VOLTTRON environment must be activated. + +From the project directory, activate the VOLTTRON environment with: + +.. code-block:: bash + + source env/bin/activate + + +Create Agent Code +================= + +Run the following command to start the Agent Creation Wizard: + +.. code-block:: bash + + vpkg init TestAgent tester + +`TestAgent` is the directory that the agent code will be placed in. The directory must not exist when the command is +run. `tester` is the name of the agent module created by wizard. + +The Wizard will prompt for the following information: + +.. code-block:: console + + Agent version number: [0.1]: 0.5 + Agent author: []: VOLTTRON Team + Author's email address: []: volttron@pnnl.gov + Agent homepage: []: https://volttron.org/ + Short description of the agent: []: Agent development tutorial. + +Once the last question is answered the following will print to the console: + +.. code-block:: console + + 2018-08-02 12:20:56,604 () volttron.platform.packaging INFO: Creating TestAgent + 2018-08-02 12:20:56,604 () volttron.platform.packaging INFO: Creating TestAgent/tester + 2018-08-02 12:20:56,604 () volttron.platform.packaging INFO: Creating TestAgent/setup.py + 2018-08-02 12:20:56,604 () volttron.platform.packaging INFO: Creating TestAgent/config + 2018-08-02 12:20:56,604 () volttron.platform.packaging INFO: Creating TestAgent/tester/agent.py + 2018-08-02 12:20:56,604 () volttron.platform.packaging INFO: Creating TestAgent/tester/__init__.py + +The TestAgent directory is created with the new Agent inside. + + +Agent Directory +--------------- + +At this point, the contents of the TestAgent directory should look like: + +:: + + TestAgent/ + ├── setup.py + ├── config + └── tester + ├── agent.py + └── __init__.py + + +Agent Skeleton +-------------- + +The `agent.py` file in the `tester` directory of the newly created agent module will contain skeleton code (below). +Descriptions of the features of this code as well as additional development help are found in the rest of this document. + +.. code-block:: python + + """ + Agent documentation goes here. + """ + + __docformat__ = 'reStructuredText' + + import logging + import sys + from volttron.platform.agent import utils + from volttron.platform.vip.agent import Agent, Core, RPC + + _log = logging.getLogger(__name__) + utils.setup_logging() + __version__ = "0.1" + + + def tester(config_path, **kwargs): + """ + Parses the Agent configuration and returns an instance of + the agent created using that configuration. + + :param config_path: Path to a configuration file. + :type config_path: str + :returns: Tester + :rtype: Tester + """ + try: + config = utils.load_config(config_path) + except Exception: + config = {} + + if not config: + _log.info("Using Agent defaults for starting configuration.") + + setting1 = int(config.get('setting1', 1)) + setting2 = config.get('setting2', "some/random/topic") + + return Tester(setting1, setting2, **kwargs) + + + class Tester(Agent): + """ + Document agent constructor here. + """ + + def __init__(self, setting1=1, setting2="some/random/topic", **kwargs): + super(Tester, self).__init__(**kwargs) + _log.debug("vip_identity: " + self.core.identity) + + self.setting1 = setting1 + self.setting2 = setting2 + + self.default_config = {"setting1": setting1, + "setting2": setting2} + + # Set a default configuration to ensure that self.configure is called immediately to setup + # the agent. + self.vip.config.set_default("config", self.default_config) + # Hook self.configure up to changes to the configuration file "config". + self.vip.config.subscribe(self.configure, actions=["NEW", "UPDATE"], pattern="config") + + def configure(self, config_name, action, contents): + """ + Called after the Agent has connected to the message bus. If a configuration exists at startup + this will be called before onstart. + + Is called every time the configuration in the store changes. + """ + config = self.default_config.copy() + config.update(contents) + + _log.debug("Configuring Agent") + + try: + setting1 = int(config["setting1"]) + setting2 = str(config["setting2"]) + except ValueError as e: + _log.error("ERROR PROCESSING CONFIGURATION: {}".format(e)) + return + + self.setting1 = setting1 + self.setting2 = setting2 + + self._create_subscriptions(self.setting2) + + def _create_subscriptions(self, topic): + """ + Unsubscribe from all pub/sub topics and create a subscription to a topic in the configuration which triggers + the _handle_publish callback + """ + self.vip.pubsub.unsubscribe("pubsub", None, None) + + self.vip.pubsub.subscribe(peer='pubsub', + prefix=topic, + callback=self._handle_publish) + + def _handle_publish(self, peer, sender, bus, topic, headers, message): + """ + Callback triggered by the subscription setup using the topic from the agent's config file + """ + pass + + @Core.receiver("onstart") + def onstart(self, sender, **kwargs): + """ + This is method is called once the Agent has successfully connected to the platform. + This is a good place to setup subscriptions if they are not dynamic or + do any other startup activities that require a connection to the message bus. + Called after any configurations methods that are called at startup. + + Usually not needed if using the configuration store. + """ + # Example publish to pubsub + # self.vip.pubsub.publish('pubsub', "some/random/topic", message="HI!") + + # Example RPC call + # self.vip.rpc.call("some_agent", "some_method", arg1, arg2) + pass + + @Core.receiver("onstop") + def onstop(self, sender, **kwargs): + """ + This method is called when the Agent is about to shutdown, but before it disconnects from + the message bus. + """ + pass + + @RPC.export + def rpc_method(self, arg1, arg2, kwarg1=None, kwarg2=None): + """ + RPC method + + May be called from another agent via self.vip.rpc.call + """ + return self.setting1 + arg1 - arg2 + + + def main(): + """Main method called to start the agent.""" + utils.vip_main(tester, + version=__version__) + + + if __name__ == '__main__': + # Entry point for script + try: + sys.exit(main()) + except KeyboardInterrupt: + pass + +The resulting code is well documented with comments and documentation strings. It gives examples of how to do common +tasks in VOLTTRON Agents. The main agent code is found in `tester/agent.py`. + + +Building an Agent +================= + +The following section includes guidance on several important components for building agents in VOLTTRON. + + +Parse Packaged Configuration and Create Agent Instance +------------------------------------------------------ + +The code to parse a configuration file packaged and installed with the agent is found in the `tester` function: + +.. code-block:: python + + def tester(config_path, **kwargs): + """ + Parses the Agent configuration and returns an instance of + the agent created using that configuration. + + :param config_path: Path to a configuration file. + :type config_path: str + :returns: Tester + :rtype: Tester + """ + try: + config = utils.load_config(config_path) + except Exception: + config = {} + + if not config: + _log.info("Using Agent defaults for starting configuration.") + + setting1 = int(config.get('setting1', 1)) + setting2 = config.get('setting2', "some/random/topic") + + return Tester(setting1, setting2, **kwargs) + +The configuration is parsed with the `utils.load_config` function and the results are stored in the `config` variable. +An instance of the Agent is created from the parsed values and is returned. + + +Initialization and Configuration Store Support +---------------------------------------------- + +The :ref:`configuration store ` is a powerful feature. The agent template provides +a simple example of setting up default configuration store values and setting up a configuration handler. + +.. code-block:: python + + class Tester(Agent): + """ + Document agent constructor here. + """ + + def __init__(self, setting1=1, setting2="some/random/topic", **kwargs): + super(Tester, self).__init__(**kwargs) + _log.debug("vip_identity: " + self.core.identity) + + self.setting1 = setting1 + self.setting2 = setting2 + + self.default_config = {"setting1": setting1, + "setting2": setting2} + + # Set a default configuration to ensure that self.configure is called immediately to setup + # the agent. + self.vip.config.set_default("config", self.default_config) + # Hook self.configure up to changes to the configuration file "config". + self.vip.config.subscribe(self.configure, actions=["NEW", "UPDATE"], pattern="config") + + def configure(self, config_name, action, contents): + """ + Called after the Agent has connected to the message bus. If a configuration exists at startup + this will be called before onstart. + + Is called every time the configuration in the store changes. + """ + config = self.default_config.copy() + config.update(contents) + + _log.debug("Configuring Agent") + + try: + setting1 = int(config["setting1"]) + setting2 = str(config["setting2"]) + except ValueError as e: + _log.error("ERROR PROCESSING CONFIGURATION: {}".format(e)) + return + + self.setting1 = setting1 + self.setting2 = setting2 + + self._create_subscriptions(self.setting2) + +.. note:: + + Support for the configuration store is instantiated by subscribing to configuration changes with + `self.vip.config.subscribe`. + + .. code-block:: python + + self.vip.config.subscribe(self.configure_main, actions=["NEW", "UPDATE"], pattern="config") + +Values in the default config can be built into the agent or come from the packaged configuration file. The subscribe +method tells our agent which function to call whenever there is a new or updated config file. For more information +on using the configuration store see :ref:`Agent Configuration Store `. + +`_create_subscriptions` (covered in a later section) will use the value in `self.setting2` to create a new subscription. + + +Agent Lifecycle Events +---------------------- + +The agent lifecycle is controlled in the agents VIP `core`. The agent lifecycle manages :ref:`scheduling and periodic +function calls `, the main agent loop, and trigger a number of signals for callbacks in the +concrete agent code. These callbacks are listed and described in the skeleton code below: + +.. note:: + + The lifecycle signals can trigger any method. To cause a method to be triggered by a lifecycle signal, use a + decorator: + + .. code-block:: python + + @Core.receiver("") + def my_callback(self, sender, **kwargs): + # do my lifecycle method callback + pass + +.. code-block:: python + + @Core.receiver("onsetup") + def onsetup(self, sender, **kwargs) + """ + This method is called after the agent has successfully connected to the platform, but before the scheduled + methods loop has started. This method not often used, but is most commonly used to define periodic + functions or do some pre-configuration. + """ + self.vip.core.periodic(60, send_request) + + @Core.receiver("onstart") + def onstart(self, sender, **kwargs): + """ + This method is called once the Agent has successfully connected to the platform. + This is a good place to setup subscriptions if they are not dynamic or to + do any other startup activities that require a connection to the message bus. + Called after any configurations methods that are called at startup. + + Usually not needed if using the configuration store. + """ + #Example publish to pubsub + self.vip.pubsub.publish('pubsub', "some/random/topic", message="HI!") + + #Example RPC call + self.vip.rpc.call("some_agent", "some_method", arg1, arg2) + + @Core.receiver("onstop") + def onstop(self, sender, **kwargs): + """ + This method is called when the Agent is about to shutdown, but before it disconnects from + the message bus. Common use-cases for this method are to stop periodic processing, closing connections and + setting agent state prior to cleanup. + """ + self.publishing = False + self.cache.close() + + @Core.receiver("onfinish") + def onfinish(self, sender, **kwargs) + """ + This method is called after all scheduled threads have concluded. This method is rarely used, but could be + used to send shut down signals to other agents, etc. + """ + self.vip.pubsub.publish('pubsub', 'some/topic', message=f'agent {self.core.identity} shutdown') + + +.. _Agent-Periodics-Scheduling: + +Periodics and Scheduling +------------------------ + +Periodic and Scheduled callback functions are callbacks made to functions in agent code from the thread scheduling in +the agent core. + + +Scheduled Callbacks +^^^^^^^^^^^^^^^^^^^ + +Scheduled callback functions are often used similarly to cron jobs to perform tasks at specific times, or to schedule +tasks ad-hoc as agent state is updated. There are 2 ways to schedule callbacks: using a decorator, or calling the +core's scheduling function. Example usage follows. + +.. code-block:: python + + # using the agent's core to schedule a task + self.core.schedule(periodic(5), self.sayhi) + + def sayhi(self): + print("Hello-World!") + +.. code-block:: python + + # using the decorator to schedule a task + @Core.schedule(cron('0 1 * * *')) + def cron_function(self): + print("this is a cron-scheduled function") + +.. note:: + + Scheduled Callbacks can use CRON scheduling, a datetime object, a number of seconds (from current time), or a + `periodic` which will make the schedule function as a periodic. + + .. code-block:: python + + # inside some agent method + self.core.schedule(t, function) + self.core.schedule(periodic(t), periodic_function) + self.core.schedule(cron('0 1 * * *'), cron_function) + + +Periodic Callbacks +^^^^^^^^^^^^^^^^^^ + +Periodic call back functions are functions which are repeatedly called at a regular interval until the periodic is +cancelled in the agent code or the agent stops running. Like scheduled callbacks, periodics can be specified using +either decorators or using core function calls. + +.. code-block:: python + + self.core.periodic(10, self.saybye) + + def saybye(self): + print('Good-bye Cruel World!') + +.. code-block:: python + + @Core.periodic(60) + def poll_api(self): + return requests.get("https://lmgtfy.com").json() + +.. note:: + + Periodic intervals are specified in seconds. + + +Publishing Data to the Message Bus +---------------------------------- + +The agent's VIP connection can be used to publish data to the message bus. The message published and topic to publish +to are determined by the agent implementation. Classes of agents already +:ref:`specified by VOLTTRON ` may have well-defined intended topic usage, see those agent +specifications for further detail. + +.. code-block:: python + + def publish_oscillating_update(self): + """ + Publish an "oscillating_value" which cycles between values 1 and 0 to the message bus using the topic + "some/topic/oscillating_value" + """ + self.publish_value = 1 if self.publish_value = 0 else 0 + self. vip.pubsub.publish('pubsub', 'some/topic/', message=f'{"oscillating_value": "{self.publish_value}"') + + +Setting up a Subscription +------------------------- + +The Agent creates a subscription to a topic on the message bus using the value of `self.setting2` in the method +`_create_subscription`. The messages for this subscription are handled with the `_handle_publish` method: + +.. code-block:: python + + def _create_subscriptions(self, topic): + """ + Unsubscribe from all pub/sub topics and create a subscription to a topic in the configuration which triggers + the _handle_publish callback + """ + # Unsubscribe from everything. + self.vip.pubsub.unsubscribe("pubsub", None, None) + + self.vip.pubsub.subscribe(peer='pubsub', + prefix=topic, + callback=self._handle_publish) + + def _handle_publish(self, peer, sender, bus, topic, headers, message): + """ + Callback triggered by the subscription setup using the topic from the agent's config file + """ + # By default no action is taken. + pass + +Alternatively, a decorator can be used to specify the function as a callback: + +.. code-block:: python + + @PubSub.subscribe('pubsub', "topic_prefix") + def _handle_publish(self, peer, sender, bus, topic, headers, message): + """ + Callback triggered by the subscription setup using the topic from the agent's config file + """ + # By default no action is taken. + pass + +`self.vip.pubsub.unsubscribe` can be used to unsubscribe from a topic: + +.. code-block:: python + + self.vip.pubsub.unsubscribe(peer='pubsub', + prefix=topic, + callback=self._handle_publish) + +Giving ``None`` as values for the prefix and callback argument will unsubscribe from everything on that bus. This is +handy for subscriptions that must be updated base on a configuration setting. + + +Heartbeat +^^^^^^^^^ + +The heartbeat subsystem provides access to a periodic publish so that others can observe the agent's status. Other +agents can subscribe to the `heartbeat` topic to see who is actively publishing to it. It it turned off by default. + +Enabling the `heartbeat` publish: + +.. code-block:: python + + self.vip.heartbeat.start_with_period(self._heartbeat_period) + +Subscribing to the heartbeat topic: + +.. code-block:: python + + self.vip.pubsub.subscribe(peer='pubsub', + prefix='heartbeat', + callback=handle_heartbeat) + + +Health +^^^^^^ + +The health subsystem adds extra status information to the an agent's heartbeat. Setting the status will start the +heartbeat if it wasn't already. Health is used to represent the internal state of the agent at runtime. `GOOD` health +indicates that all is fine with the agent and it is operating normally. `BAD` health indicates some kind of problem, +such as if an agent is unable to reach a remote web API. + +Example of setting health: + +.. code-block:: python + + from volttron.platform.messaging.health import STATUS_BAD, STATUS_GOOD, + + self.vip.health.set_status(STATUS_GOOD, "Configuration of agent successful") + + +Remote Procedure Calls +---------------------- + +An agent may receive commands from other agents via a Remote Procedure Call (RPC). +This is done with the `@RPC.export` decorator: + +.. code-block:: python + + @RPC.export + def rpc_method(self, arg1, arg2, kwarg1=None, kwarg2=None): + """ + RPC method. May be called from another agent via self.vip.rpc.call + """ + return self.setting1 + arg1 - arg2 + +To send an RPC call to another agent running on the platform, the agent must invoke the `rpc.call` method of its VIP +connection. + +.. code-block:: python + + # in agent code + def send_remote_procedure_call(self): + peer = "" + peer_method = "" + args = ["list", "of", "peer", "method", "arguments", "..."] + self.vip.rpc.call(peer, peer_method, *args) + + +Agent Resiliency +---------------- + +The VOLTTRON team has come up with a number of methods to help users develop more robust agents. + +#. Use `gevent.sleep()` in callbacks which perform long running functions. Long running functions can cause + other agent functions including those in the base agent to be delayed. Calling `gevent.sleep` transfers control from + the current executing greenlet to the next scheduled greenlet for the duration of the sleep, allowing other + components of the agent code to run. +#. Call `.get() on VIP subsystem calls (i.e. ``self.vip.rpc.call(...).get()``) to ensure that the call + returns a value or throws an Exception in a timely manner. A number of seconds can be provided to specify a timeout + duration. +#. Many of the :ref:`Operations Agents ` can be used to monitor agent health, status, publishing + frequency and more. Read up on the "ops agents" for more information. + + .. note:: + + If an agent crashes, becomes unreachable, etc., it is up to the user to restart or reconnect the agent. + +#. The main agent thread should monitor any spawned threads or processes to ensure they're cleaned up and/or exit + safely. + + +Building a resilient API +^^^^^^^^^^^^^^^^^^^^^^^^ + +Many agents export RPC calls or expose an API (application programming interface) which can be used by other agents +on the platform. The agent should include validation against input data for its API to ensure the agent is able to +continue to operate even if asked to handle faulty or malicious requests. + + +Type-hints +"""""""""" + +`Type-hints `_ can be used in function definitions to help the user +determine what the agent expects as input. + +.. warning:: + + Type-hints do not validate the type of data input to a function call, they are merely suggestions about what the + function is expecting. + +* To specify the type expected as input: + +.. code-block:: python + + # When calling this RPC method, the user should supply a string as input + @RPC.export + def type_hint_rpc(input_string: str): + +* To specify the type of function output: + +.. code-block:: python + + # This demonstrates a function that expects a string as input and that will return an integer value + @RPC.export + def type_hint_rpc(input_string: str) -> int: + +* Specifying multiple types: + +.. code-block:: python + + # Here our function expects either a string or dictionary + @RPC.export + def type_hint_rpc(input_json: Union[str, dict]) -> str: + +* To specify an optional argument with None as the default value: + +.. code-block:: python + + # 'Optional' is used to specify either a string should be passed or the default value 'None' will be used + @RPC.export + def type_hint_rpc(optional_input: Optional[str] = None) -> str: + +* These techniques can be combined: + +.. code-block:: python + + # 'Optional' can be used in combination with 'Union' for optional arguments which expect one of multiple types and + # default to None + @RPC.export + def type_hint_rpc(optional_input: Optional[Union[str, dict]] = None) -> str: + + +API Validation +"""""""""""""" + +Each function within an agent should validate its input parameters, especially with structured data. + +* Make use of isinstance to do type checking: + + .. code-block:: python + + @RPC.export + def type_checking_rpc(input_str: str) -> dict: + if not isinstance(input_str, str): + # Include a descriptive error message to help the user determine why input validation failed + # You can make use of 'f-strings' to help the user with debugging + raise ValueError( + f'The expected input type for function "type_checking_rpc" is str, received {type(input_str)}') + +* Add logic to validate the range of values supplied as input with a valid type: + + .. code-block:: python + + @RPC.export + def value_checking_rpc(input_json: Union[str, dict]) -> dict: + if not isinstance(input_json, str) or not isinstance(input_json, dict): + # You can make use of 'f-strings' to help the user determine why input validation failed + raise ValueError( + f'The expected input type for function "type_checking_rpc" is str or dict, received {type(input_str)}') + else: + # since we expected the input to be valid JSON, be sure that it can be correctly parsed + if isinstance(input_json, str): + input_json = json.loads(input_json) + # for this example, we expect our JSON to include two fields: test1 and test2 + # Use 'dict.get()' rather than 'dict[]' to return None and avoid causing a KeyError if the key + # is not present. Optionally, a second argument can be added to specify a default value to use in + # place of None: 'dict.get(, )' + test_1 = input_json.get("test1") + test_2 = input_json.get("test2") + # test 1 must be any string value + if not isinstance(test_1, str): + raise ValueError('Input JSON should contain key "test1" with value of type str') + # test 2 must be an integer value with value between 0 and 100 inclusive + if not isinstance(test_2, int) and 0 <= test_2 <= 100: + _log.warning(f'Field "test2" in input JSON was out of range (0 - 100): {test_2}, defaulting to 50') + test_2 = 50 + +.. note:: + + It is possible to restrict access to RPC functions using an :ref:`agent's authentication ` + capabilities. + + +Packaging Configuration +======================= + +The wizard will automatically create a `setup.py` file. This file sets up the name, version, required packages, method +to execute, etc. for the agent based on your answers to the wizard. The packaging process will also use this +information to name the resulting file. + +.. code-block:: python + + from setuptools import setup, find_packages + + MAIN_MODULE = 'agent' + + # Find the agent package that contains the main module + packages = find_packages('.') + agent_package = 'tester' + + # Find the version number from the main module + agent_module = agent_package + '.' + MAIN_MODULE + _temp = __import__(agent_module, globals(), locals(), ['__version__'], -1) + __version__ = _temp.__version__ + + # Setup + setup( + name=agent_package + 'agent', + version=__version__, + author_email="volttron@pnnl.gov", + url="https://volttron.org/", + description="Agent development tutorial.", + author="VOLTTRON Team", + install_requires=['volttron'], + packages=packages, + entry_points={ + 'setuptools.installation': [ + 'eggsecutable = ' + agent_module + ':main', + ] + } + ) + + +Launch Configuration +==================== + +In TestAgent, the wizard will automatically create a JSON file called "config". It contains configuration information +for the agent. This file contains examples of every data type supported by the configuration system: + +:: + + { + # VOLTTRON config files are JSON with support for python style comments. + "setting1": 2, # Integers + "setting2": "some/random/topic2", #Strings + "setting3": true, # Booleans: remember that in JSON true and false are not capitalized. + "setting4": false, + "setting5": 5.1, # Floating point numbers. + "setting6": [1,2,3,4], #Lists + "setting7": {"setting7a": "a", "setting7b": "b"} #Objects + } + + +.. _Agent-Packaging-and-Install: + +Packaging and Installation +========================== + +To install the agent the platform must be running. Start the platform with the command: + +.. code-block:: bash + + ./start-volttron + +.. note:: + + If you are not in an activated environment, this script will start the platform running in the background in the + correct environment. However the environment will not be activated for you; you must activate it yourself. + +Now we must install it into the platform. Use the following command to install it and add a tag for easily referring to +the agent. From the project directory, run the following command: + +.. code-block:: bash + + python scripts/install-agent.py -s TestAgent/ -c TestAgent/config -t testagent + +To verify it has been installed, use the following command: + +.. code-block:: bash + + vctl list + +This will result in output similar to the following: + +.. code-block:: bash + + AGENT IDENTITY TAG Status Health PRI + df testeragent-0.5 testeragent-0.5_1 testagent + +* The first string is a unique portion of the full UUID for the agent +* AGENT is the "name" of the agent based on the contents of its class name and the version in its setup.py. +* IDENTITY is the agent's identity in the platform. This is automatically assigned based on class name and instance + number. This agent's ID is _1 because it is the first instance. +* TAG is the name we assigned in the command above +* Status indicates the running status of an agent - running agents are *running*, agents which are not running will have + no listed status +* Health is an indication of the internal state of the agent. 'Healthy' agents will have GOOD health. If an agent + enters an error state, it will continue to run, but its health will be BAD. +* PRI is the priority for agents which have been "enabled" using the ``vctl enable`` command. + +When using lifecycle commands on agents, they can be referred to by the UUID (default) or AGENT (name) or TAG. + + +Running and Testing the Agent +============================= + +Now that the first pass of the agent code is complete, we can see if the agent works. It is highly-suggested to build +a set of automated tests for the agent code prior to writing the agent, and running those tests after the agent is +code-complete. Another quick way to determine if the agent is going the right direction is to run the agent on the +platform using the VOLTTRON command line interface. + + +From the Command Line +--------------------- + +To test the agent, we will start the platform (if not already running), launch the agent, and check the log file. +With the VOLTTRON environment activated, start the platform by running (if needed): + +.. code-block:: bash + + ./start-volttron + +You can launch the agent in three ways, all of which you can find by using the `vctl list` command: + +* By using the : + +.. code-block:: bash + + vctl start + +* By name: + +.. code-block:: bash + + vctl start --name testeragent-0.1 + +* By tag: + +.. code-block:: bash + + vctl start --tag testagent + +Check that it is :ref:`running `: + +.. code-block:: bash + + vctl status + +* Start the ListenerAgent as in the :ref:`platform installation guide `. +* Check the log file for messages indicating the TestAgent is receiving the ListenerAgents messages: + +.. code-block:: console + + 2021-01-12 16:46:58,291 (listeneragent-3.3 12136) __main__ INFO: Peer: pubsub, Sender: testeragent-0.1_1:, Bus: , Topic: some/random/topic, Headers: {'min_compatible_version': '5.0', 'max_compatible_version': ''}, Message: 'HI!' + + +Automated Test Cases and Documentation +-------------------------------------- + +Before contributing a new agent to the VOLTTRON source code repository, please consider adding two other essential +elements. + +1. Integration and unit test cases +2. README file that includes details of pre-requisite software, agent setup details (such as setting up databases, + permissions, etc.) and sample configuration + +VOLTTRON uses *pytest* as a framework for executing tests. All unit tests should be based on the *pytest* framework. +For instructions on writing unit and integration tests with *pytest*, refer to the +:ref:`Writing Agent Tests ` documentation. + +*pytest* is not installed with the distribution by default. To install py.test and it's dependencies execute the +following: + +.. code-block:: bash + + python bootstrap.py --testing + +.. note:: + + There are other options for different agent requirements. To see all of the options use: + + .. code-block:: bash + + python bootstrap.py --help + + in the Extra Package Options section. + +To run a single test module, use the command + +.. code-block:: bash + + pytest + +To run all of the tests in the volttron repository execute the following in the root directory using an activated +command prompt: + +.. code-block:: bash + + ./ci-integration/run-tests.sh + + +.. _Utility-Scripts: + +Scripts +======= + +In order to make repetitive tasks less repetitive the VOLTTRON team has create several scripts in order to help. These +tasks are available in the `scripts` directory. + +.. note:: + + In addition to the `scripts` directory, the VOLTTRON team has added the config directory to the .gitignore file. By + convention this is where we store customized scripts and configuration that will not be made public. Please feel + free to use this convention in your own processes. + +The `scripts/core` directory is laid out in such a way that we can build scripts on top of a base core. For example the +scripts in sub-folders such as the `historian-scripts` and `demo-comms` use the scripts that are present in the core +directory. + +The most widely used script is `scripts/install-agent.py`. The `install_agent.py` script will remove an agent if the +tag is already present, create a new agent package, and install the agent to :term:`VOLTTRON_HOME`. This script has +three required arguments and has the following signature: + +.. note:: + + Agent to Package must have a setup.py in the root of the directory. Additionally, the user must be in an activated + Python Virtual Environment for VOLTTRON + + .. code-block:: bash + + cd $VOLTTRON_ROOT + source env/bin/activate + +.. code-block:: console + + python scripts/install_agent.py -s -c -i --tag + +.. note:: + + The ``--help`` optional argument can be used with `scripts/install-agent.py` to view all available options for the + script + +The `install_agent.py` script will respect the `VOLTTRON_HOME` specified on the command line or set in the global +environment. An example of setting `VOLTTRON_HOME` to `/tmp/v1home` is as follows. + +.. code-block:: bash + + VOLTTRON_HOME=/tmp/v1home python scripts/install-agent.py -s -c --tag + + +.. toctree:: + :hidden: + :maxdepth: 1 + + agent-configuration-store + writing-agent-tests + developing-historian-agents + developing-market-agents + example-agents/index + specifications/index diff --git a/docs/source/developing-volttron/developing-agents/developing-historian-agents.rst b/docs/source/developing-volttron/developing-agents/developing-historian-agents.rst new file mode 100644 index 0000000000..4e134a6a9f --- /dev/null +++ b/docs/source/developing-volttron/developing-agents/developing-historian-agents.rst @@ -0,0 +1,108 @@ +.. _Developing-Historian-Agents: + +=========================== +Developing Historian Agents +=========================== + +VOLTTRON provides a convenient base class for developing new historian agents. The base class automatically performs +a number of important functions: + +* subscribes to all pertinent topics +* caches published data to disk until it is successfully recorded to a historian +* creates the public facing interface for querying results +* spells out a simple interface for concrete implementation to meet to make a working Historian Agent +* breaks data to publish into reasonably sized chunks before handing it off to the concrete implementation for + publication. The size of the chunk is configurable +* sets up a separate thread for publication. If publication code needs to block for a long period of time (up to 10s of + seconds) this will no disrupt the collection of data from the bus or the functioning of the agent itself + +The VOLTTRON repository provides several :ref:`historians ` which can be deployed without +modification. + + +BaseHistorian +------------- + +All Historians must inherit from the BaseHistorian class in volttron.platform.agent.base\_historian and implement the +following methods: + + +publish_to_historian(self, to_publish_list) +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +This method is called by the BaseHistorian class when it has received data from the message bus to be published. +`to_publish_list` is a list of records to publish in the form: + +:: + + [ + { + '_id': 1, + 'timestamp': timestamp, + 'source': 'scrape', + 'topic': 'campus/building/unit/point', + 'value': 90, + 'meta': {'units':'F'} + } + { + ... + } + ] + +- **_id** - ID of the record used for internal record tracking. All IDs in the list are unique +- **timestamp** - Python datetime object of the time data was published at timezone UTC +- **source** - Source of the data: can be scrape, analysis, log, or actuator +- **topic** - Topic data was published on, topic prefix's such as "device" are dropped +- **value** - Value of the data, can be any type. +- **meta** - Metadata for the value, some sources will omit this entirely. + +For each item in the list the concrete implementation should attempt to publish (or discard if non-publishable) every +item in the list. Publication should be batched if possible. For every successfully published record and every record +that is to be discarded because it is non-publishable the agent must call `report_handled` on those records. Records +that should be published but were not for whatever reason require no action. Future calls to `publish_to`_historian` +will include these unpublished records. `publish_to_historian` is always called with the oldest unhandled records. This +allows the historian to no lose data due to lost connections or other problems. + +As a convenience `report_all_handled` can be called if all of the items in `published_list` were successfully handled. + + +query_topic_list(self) +~~~~~~~~~~~~~~~~~~~~~~ + +Must return a list of all unique topics published. + + +query_historian(self, topic, start=None, end=None, skip=0, count=None, order=None) +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + + +This function must return the results of a query in the form: + +:: + + {"values": [(timestamp1: value1), (timestamp2: value2), ...], + "metadata": {"key1": value1, "key2": value2, ...}} + +metadata is not required (The caller will normalize this to {} for you if you leave it out) + +- **topic** - the topic the user is querying for +- **start** - datetime of the start of the query, `None` for the beginning of time +- **end** - datetime of the end of of the query, `None` for the end of time +- **skip** - skip this number of results (for pagination) +- **count** - return at maximum this number of results (for pagination) +- **order** - `FIRST_TO_LAST` for ascending time stamps, `LAST_TO_FIRST` for descending time stamps + + +historian_setup(self) +~~~~~~~~~~~~~~~~~~~~~~ + +Implementing this is optional. This function is run on the same thread as the rest of the concrete implementation at +startup. It is meant for connection setup. + + +Example Historian +----------------- + +An example historian can be found in the `examples/CSVHistorian` directory in the VOLTTRON repository. This example +historian uses a CSV file as the persistent data store. It is recommended to use this agent as a reference for +developing new historian agents. diff --git a/docs/source/devguides/agent_development/Developing-Market-Agents.rst b/docs/source/developing-volttron/developing-agents/developing-market-agents.rst similarity index 56% rename from docs/source/devguides/agent_development/Developing-Market-Agents.rst rename to docs/source/developing-volttron/developing-agents/developing-market-agents.rst index 4ff572a7fc..5bafb13d74 100644 --- a/docs/source/devguides/agent_development/Developing-Market-Agents.rst +++ b/docs/source/developing-volttron/developing-agents/developing-market-agents.rst @@ -4,24 +4,20 @@ Developing Market Agents ======================== -VOLTTRON provides a convenient base class for developing new market -agents. The base class automatically subscribes to all pertinent topics, -and spells out a simple interface for concrete implementation to -make a working Market Agent. +VOLTTRON provides a convenient base class for developing new market agents. The base class automatically subscribes to all pertinent topics, +and spells out a simple interface for concrete implementation to make a working Market Agent. -Markets are implemented by the Market Service Agent which is a core service agent. -The Market Service Agent publishes information on several topics to which the base -agent automatically subscribes. The base agent also provides all the methods you will -need to interact with the Market Service Agent to implement your market transactions. +Markets are implemented by the Market Service Agent which is a core service agent. The Market Service Agent publishes +information on several topics to which the base agent automatically subscribes. The base agent also provides all the +methods you will need to interact with the Market Service Agent to implement your market transactions. MarketAgent =========== -All Market Agents must inherit from the MarketAgent class in -volttron.platform.agent.base_market_agent and call the following -method +All Market Agents must inherit from the MarketAgent class in `volttron.platform.agent.base_market_agent` and call the +following method: -:: +.. code-block:: python self.join_market(market_name, buyer_seller, reservation_callback, offer_callback, aggregate_callback, price_callback, error_callback) @@ -29,14 +25,15 @@ This method causes the market agent to join a single market. If the agent wishe markets it may be called once for each market. The first argument is the name of the market to join and this name must be unique across the entire volttron instance because all markets are implemented by a single market service agent for each volttron instance. The second argument describes the role that this agent wished to play in this market. -The value is imported as +The value is imported as: -:: +.. code-block:: python from volttron.platform.agent.base_market_agent.buy_sell import BUYER, SELLER Arguments 3-7 are callback methods that the agent may implement as needed for the agent's participation in the market. + The Reservation Callback ------------------------ @@ -44,18 +41,19 @@ The Reservation Callback reservation_callback(self, timestamp, market_name, buyer_seller) -This method is called when it is time to reserve a slot in the market for the current market cycle. -If this callback is not registered a slot is reserved for every market cycle. If this callback is registered -it is called for each market cycle and returns True if a reservation is wanted and False if a reservation -is not wanted. The name of the market and the roll being played are provided so that a single callback can handle -several markets. If the agent joins three markets with the same reservation callback routine it will be called three -times with the appropriate market name and buyer/seller role for each call. The MeterAgent example -illustrates the use of this of this method and how to determine whether to make an offer when the reservation is -refused. -A market will only exist if there are reservations for at least one buyer or one seller. -If the market fails to achieve the minimum participation the error callback will be called. -If only buyers or only sellers make reservations any offers will be rejected -with the reason that the market has not formed. +This method is called when it is time to reserve a slot in the market for the current market cycle. If this callback is +not registered a slot is reserved for every market cycle. If this callback is registered it is called for each market +cycle and returns `True` if a reservation is wanted and `False` if a reservation is not wanted. + +The name of the market and the roll being played are provided so that a single callback can handle several markets. +If the agent joins three markets with the same reservation callback routine it will be called three times with the +appropriate market name and buyer/seller role for each call. The MeterAgent example illustrates the use of this of this +method and how to determine whether to make an offer when the reservation is refused. + +A market will only exist if there are reservations for at least one buyer or one seller. If the market fails to achieve +the minimum participation the error callback will be called. If only buyers or only sellers make reservations any +offers will be rejected with the reason that the market has not formed. + The Offer Callback ------------------ @@ -67,12 +65,13 @@ The Offer Callback If the agent has made a reservation for the market and a callback has been registered this callback is called. If the agent wishes to make an offer at this time the market agent computes either a supply or a demand curve as appropriate and offers the curve to the market service by calling the -:py:meth:`make_offer ` -method. -The name of the market and the roll being played are provided so that a single callback can handle -several markets. +:py:meth:`make_offer ` method. + +The name of the market and the roll being played are provided so that a single callback can handle several markets. + For each market joined either an offer callback, an aggregate callback, or a cleared price callback is required. + The Aggregate Callback ---------------------- @@ -80,19 +79,21 @@ The Aggregate Callback aggregate_callback(self, timestamp, market_name, buyer_seller, aggregate_curve) -When a market has received all its buy offers it calculates an aggregate demand curve. -When the market receives all of its sell offers it calculates an aggregate supply curve. -This callback delivers the aggregate curve to the market agent whenever the appropriate curve becomes available. -If the market agent wants to use this opportunity to make an offer on this or another market -it would do that using the -:py:meth:`make_offer ` -method. -If the aggregate demand curve is received, obviously you could only make a supply offer on this market. -If the aggregate supply curve is received, obviously you could only make a demand offer on this market. -You can of course use this information to make an offer on another market. The example AHUAgent does this. -The name of the market and the roll being played are provided so that a single callback can handle -several markets. -For each market joined either an offer callback, an aggregate callback, or a cleared price callback is required. +When a market has received all its buy offers it calculates an aggregate demand curve. When the market receives all of +its sell offers it calculates an aggregate supply curve. This callback delivers the aggregate curve to the market agent +whenever the appropriate curve becomes available. + +If the market agent wants to use this opportunity to make an offer on this or another market it would do that using the +:py:meth:`make_offer ` method. + +* If the aggregate demand curve is received, only a supply offer may be submitted for this market +* If the aggregate supply curve is received, only make a demand offer will be accepted by this market. + +You may use this information to make an offer on another market; The example AHUAgent does this. The name of the +market and the roll being played are provided so that a single callback can handle several markets. + +For each market joined, either an offer callback, an aggregate callback, or a cleared price callback is required. + The Price Callback ------------------ @@ -101,17 +102,17 @@ The Price Callback price_callback(self, timestamp, market_name, buyer_seller, price, quantity) -This callback is called when the market clears. -If the market agent wants to use this opportunity to make an offer on this or another market -it would do that using the -:py:meth:`make_offer ` -method. -Once the market has cleared you can't make an offer on that market. -You can of course use this information to make an offer on another market. The example AHUAgent does this. -The name of the market and the roll being played are provided so that a single callback can handle -several markets. +This callback is called when the market clears. If the market agent wants to use this opportunity to make an offer on +this or another market it would do that using the +:py:meth:`make_offer ` method. + +Once the market has cleared you can not make an offer on that market. Again, you may use this information to make an +offer on another market as in the example AHUAgent. The name of the market and the roll being played are provided so +that a single callback can handle several markets. + For each market joined either an offer callback, an aggregate callback, or a cleared price callback is required. + The Error Callback ------------------ @@ -119,10 +120,9 @@ The Error Callback error_callback(self, timestamp, market_name, buyer_seller, error_code, error_message, aux) -This callback is called when an error occurs isn't in response to an RPC call. -The error codes are documented in +This callback is called when an error occurs isn't in response to an RPC call. The error codes are documented in: -:: +.. code-block:: python from volttron.platform.agent.base_market_agent.error_codes import NOT_FORMED, SHORT_OFFERS, BAD_STATE, NO_INTERSECT @@ -132,5 +132,3 @@ The error codes are documented in * NO_INTERSECT - If the market fails to clear this would be called while clearing the market and an auxillary array will be included. The auxillary array contains comparisons between the supply max, supply min, demand max and demand min. They allow the market client to make determinations about why the curves did not intersect that may be useful. The error callback is optional, but highly recommended. - - diff --git a/services/core/MasterDriverAgent/master_driver/__init__.py b/docs/source/developing-volttron/developing-agents/documenting-agents.rst similarity index 100% rename from services/core/MasterDriverAgent/master_driver/__init__.py rename to docs/source/developing-volttron/developing-agents/documenting-agents.rst diff --git a/docs/source/developing-volttron/developing-agents/example-agents/c-agent.rst b/docs/source/developing-volttron/developing-agents/example-agents/c-agent.rst new file mode 100644 index 0000000000..f58cf0c587 --- /dev/null +++ b/docs/source/developing-volttron/developing-agents/example-agents/c-agent.rst @@ -0,0 +1,48 @@ +.. _C-Agent: + +======= +C Agent +======= + +The C Agent uses the `ctypes` module to load a shared object into memory so its functions can be called from Python. + +There are two versions of the C Agent: + +* A standard agent that can be installed with the agent installation process +* A driver which can can be controlled using the Platform Driver Agent + + +Building the Shared Object +-------------------------- + +The shared object library must be built before installing C Agent examples. Running ``make`` in the C Agent source +directory will compile the provided C code using the position independent flag, a requirement for creating shared +objects. + +Files created by make can be removed by running + +.. code-block:: bash + + make clean + + +Agent Installation +------------------ + +After building the shared object library the standard agent can be installed with the ``scripts/install-agent.py`` +script: + +.. code-block:: bash + + python scripts/install-agent.py -s examples/CAgent + +The other is a driver interface for the Platform Driver. To use the C driver, the driver code file must be moved into +the Platform Driver's `interfaces` directory: + + :: + + examples/CAgent/c_agent/driver/cdriver -> services/core/PlatformDriverAgent/platform_driver/interfaces + + +The C Driver configuration tells the interface where to find the shared object. An example is available in the C +Agent's `driver` directory. diff --git a/docs/source/developing-volttron/developing-agents/example-agents/config-actuation.rst b/docs/source/developing-volttron/developing-agents/example-agents/config-actuation.rst new file mode 100644 index 0000000000..b8e2fd8cdc --- /dev/null +++ b/docs/source/developing-volttron/developing-agents/example-agents/config-actuation.rst @@ -0,0 +1,22 @@ +.. _Config-Actuation: + +======================== +Config Actuation Example +======================== + +The Config Actuation example attempts to set points on a device when files are added or updated in its +:ref:`configuration store `. + + +Configuration +------------- + +The name of a configuration file must match the name of the device to be actuated. The configuration file is a JSON +dictionary of point name and value pairs. Any number of points on the device can be listed in the config. + +.. code-block:: python + + { + "point0": value, + "point1": value + } diff --git a/docs/source/devguides/supporting/examples/CSVHistorianAgent.rst b/docs/source/developing-volttron/developing-agents/example-agents/csv-historian.rst similarity index 58% rename from docs/source/devguides/supporting/examples/CSVHistorianAgent.rst rename to docs/source/developing-volttron/developing-agents/example-agents/csv-historian.rst index cbf2a74f32..7aa02b9d29 100644 --- a/docs/source/devguides/supporting/examples/CSVHistorianAgent.rst +++ b/docs/source/developing-volttron/developing-agents/example-agents/csv-historian.rst @@ -1,18 +1,17 @@ -.. _CSVHistorian: +.. _CSV-Historian: -CSVHistorian -============ +============= +CSV Historian +============= -The CSV Historian Agent is an example historian agent that writes device data -to the CSV file specified in the configuration file. +The CSV Historian Agent is an example historian agent that writes device data to the CSV file specified in the +configuration file. -This is the code created during Kyle Monson's presentation on VOLTTRON Historians -at the 2017 VOLTTRON Technical Meeting. -Explanation of CSVHistorian ---------------------------- +Explanation of CSV Historian +============================ -Setup logging for later. +The Utils module of the VOLTTRON platform includes functions for setting up global logging for the platform: .. code-block:: python @@ -20,9 +19,9 @@ Setup logging for later. _log = logging.getLogger(__name__) -The `historian` method is called by `utils.vip_main` when the agents is started (see below). `utils.vip_main` -expects a callable object that returns an instance of an Agent. This method -of dealing with a configuration file and instantiating an Agent is common practice. +The ``historian`` method is called by ``utils.vip_main`` when the agents is started (see below). ``utils.vip_main`` +expects a callable object that returns an instance of an Agent. This method of dealing with a configuration file and +instantiating an Agent is common practice. .. code-block:: python @@ -36,18 +35,18 @@ of dealing with a configuration file and instantiating an Agent is common practi return CSVHistorian(output_path = output_path, **kwargs) -All historians must inherit from `BaseHistorian`. The `BaseHistorian` class handles the capturing -and caching of all device, logging, analysis, and record data published to the message bus. +All historians must inherit from `BaseHistorian`. The `BaseHistorian` class handles the capturing and caching of all +device, logging, analysis, and record data published to the message bus. .. code-block:: python class CSVHistorian(BaseHistorian): -The Base Historian creates a separate thread to handle publishing data to the data store. In this thread -the Base Historian calls two methods on the created historian, `historian_setup` and `publish_to_historian`. +The Base Historian creates a separate thread to handle publishing data to the data store. In this thread the Base +Historian calls two methods on the created historian, ``historian_setup`` and ``publish_to_historian``. -The Base Historian created the new thread in it's `__init__` method. This means that any instance variables -must assigned in `__init__` before calling the Base Historian's `__init__` method. +The Base Historian created the new thread in it's ``__init__`` method. This means that any instance variables +must assigned in ``__init__`` before calling the Base Historian's ``__init__`` method. .. code-block:: python @@ -56,12 +55,11 @@ must assigned in `__init__` before calling the Base Historian's `__init__` metho self.csv_dict = None super(CSVHistorian, self).__init__(**kwargs) -Historian setup is called shortly after the new thread starts. This is where a Historian sets up a connect -the first time. In our example we create the Dictwriter object that we will use to create and add lines to the -CSV file. +Historian setup is called shortly after the new thread starts. This is where a Historian sets up a connect the first +time. In our example we create the `Dictwriter` object that we will use to create and add lines to the CSV file. -We keep a reference to the file object so that we may flush its contents to disk after writing the header -and after we have written new data to the file. +We keep a reference to the file object so that we may flush its contents to disk after writing the header and after we +have written new data to the file. The CSV file we create will have 4 columns: `timestamp`, `source`, `topic`, and `value`. @@ -73,8 +71,8 @@ The CSV file we create will have 4 columns: `timestamp`, `source`, `topic`, and self.csv_dict.writeheader() self.f.flush() -`publish_to_historian` is called when data is ready to be published. It is passed a list of dictionaries. -Each dictionary contains a record of a single value that was published to the message bus. +``publish_to_historian`` is called when data is ready to be published. It is passed a list of dictionaries. Each +dictionary contains a record of a single value that was published to the message bus. The dictionary takes the form: @@ -89,9 +87,9 @@ The dictionary takes the form: 'meta': {"units": "F", "tz": "UTC", "type": "float"} #Meta data published with the topic } -Once the data is written to the historian we call `self.report_all_handled()` to inform the `BaseHistorian` -that all data we received was successfully published and can be removed from the cache. Then we can flush the -file to ensure that the data is written to disk. +Once the data is written to the historian we call ``self.report_all_handled()`` to inform the `BaseHistorian` that all +data we received was successfully published and can be removed from the cache. Then we can flush the file to ensure +that the data is written to disk. .. code-block:: python @@ -111,11 +109,13 @@ file to ensure that the data is written to disk. This agent does not support the Historian Query interface. + Agent Testing ------------- The CSV Historian can be tested by running the included `launch_my_historian.sh` script. + Agent Installation ------------------ diff --git a/docs/source/devguides/supporting/utilities/DataPublisher.rst b/docs/source/developing-volttron/developing-agents/example-agents/data-publisher.rst similarity index 85% rename from docs/source/devguides/supporting/utilities/DataPublisher.rst rename to docs/source/developing-volttron/developing-agents/example-agents/data-publisher.rst index 9a81cdd890..0316dec696 100644 --- a/docs/source/devguides/supporting/utilities/DataPublisher.rst +++ b/docs/source/developing-volttron/developing-agents/example-agents/data-publisher.rst @@ -1,19 +1,25 @@ -.. _DataPublisher: +.. _Data-Publisher: -============= -DataPublisher -============= +============== +Data Publisher +============== + +This is a simple agent that plays back data either from the config store or a CSV to the configured topic. It can also +provide basic emulation of the Actuator Agent for testing agents that expect to be able to set points on a device in +response to device publishes. -This is a simple agent that plays back data either from the config -store or a CSV to the configured topic. It can also provide basic -emulation of the actuator agent for testing agents that expect to -be able to set points on a device in response to device publishes. Installation notes ------------------ -In order to simulate the actuator you must install the agent -with the VIP identity of `platform.actuator`. +In order to simulate the actuator you must install the agent with the :term:`VIP Identity` of `platform.actuator`. If +an an actuator is already installed on the platform, this will cause VIP identity conflicts. To install the +agent, the agent install script can be used: + +.. code-block:: bash + + python scripts/install-agent.py -s examples/DataPublisher -c + Configuration ------------- @@ -74,19 +80,18 @@ Configuration "replay_data": false } + CSV File Format --------------- -The CSV file must have a single header line. The column names are appended to the -`basepath` setting in the configuration file and the resulting topic is normalized -to remove extra `/` characters. The values are all treated as floating -point values and converted accordingly. +The CSV file must have a single header line. The column names are appended to the `basepath` setting in the +configuration file and the resulting topic is normalized to remove extra` ``/`` characters. The values are all treated +as floating point values and converted accordingly. -The corresponding device for each point is determined and the values are combined -together to create an `all` topic publish for each device. +The corresponding device for each point is determined and the values are combined together to create an `all` topic +publish for each device. -If a `Timestamp` column is in the input it may be used to set the timestamp in the -header of the published data. +If a `Timestamp` column is in the input it may be used to set the timestamp in the header of the published data. .. csv-table:: Publisher Data :header: Timestamp,centrifugal_chiller/OutsideAirTemperature,centrifugal_chiller/DischargeAirTemperatureSetPoint,fuel_cell/DischargeAirTemperature,fuel_cell/CompressorStatus,absorption_chiller/SupplyFanSpeed,absorption_chiller/SupplyFanStatus,boiler/DuctStaticPressureSetPoint,boiler/DuctStaticPressure diff --git a/docs/source/supporting/examples/DDSAgent.rst b/docs/source/developing-volttron/developing-agents/example-agents/dds-agent.rst similarity index 50% rename from docs/source/supporting/examples/DDSAgent.rst rename to docs/source/developing-volttron/developing-agents/example-agents/dds-agent.rst index 50a4f3a83c..c25acfa7f9 100644 --- a/docs/source/supporting/examples/DDSAgent.rst +++ b/docs/source/developing-volttron/developing-agents/example-agents/dds-agent.rst @@ -1,46 +1,41 @@ -.. _DDSAgent: +.. _DDS-Agent: ========= DDS Agent ========= -The DDS example agent demonstrates VOLTTRON's capacity to be extended -with tools and libraries not used in the core codebase. DDS is a messaging -platform that implements a publish-subscribe system for well defined data -types. +The DDS example agent demonstrates VOLTTRON's capacity to be extended with tools and libraries not used in the core +codebase. DDS is a messaging platform that implements a publish-subscribe system for well defined data types. -This agent example is meant to be run the command line, as opposed to -installing it like other agents. From the `examples/DDSAgent` directory, -the command to start it is: +This agent example is meant to be run the command line, as opposed to installing it like other agents. From the +`examples/DDSAgent` directory, the command to start it is: .. code-block:: shell $ AGENT_CONFIG=config python -m ddsagent.agent -The rticonnextdds-connector library needs to be installed for this example -to function properly. We'll retrieve it from GitHub since it is not available -through pip. Download the source with +The `rticonnextdds-connector` library needs to be installed for this example to function properly. We'll retrieve it +from GitHub since it is not available through Pip. Download the source with: .. code-block:: shell $ wget https://github.com/rticommunity/rticonnextdds-connector/archive/master.zip -and unpack it in `examples/DDSAgent/ddsagent` with +and unpack it in `examples/DDSAgent/ddsagent` with: .. code-block:: shell $ unzip master.zip -The `demo_publish()` output can be viewed with the rtishapesdemo available -from rti. +The ``demo_publish()`` output can be viewed with the `rtishapesdemo` available from RTI. + Configuration ------------- -Each data type that this agent will have access to needs to have an xml document -defining its structure. The xml will include a participant name, publisher name, -and a subscriber name. These are recorded in the configuration with the location -on disk of the xml file. +Each data type that this agent will have access to needs to have an XML document defining its structure. The XML will +include a participant name, publisher name, and a subscriber name. These are recorded in the configuration with the +location on disk of the XML file. .. code-block:: json diff --git a/docs/source/devguides/supporting/examples/files/cmd-image.png b/docs/source/developing-volttron/developing-agents/example-agents/files/cmd-image.png similarity index 100% rename from docs/source/devguides/supporting/examples/files/cmd-image.png rename to docs/source/developing-volttron/developing-agents/example-agents/files/cmd-image.png diff --git a/docs/source/devguides/supporting/examples/files/cmd-image_2.png b/docs/source/developing-volttron/developing-agents/example-agents/files/cmd-image_2.png similarity index 100% rename from docs/source/devguides/supporting/examples/files/cmd-image_2.png rename to docs/source/developing-volttron/developing-agents/example-agents/files/cmd-image_2.png diff --git a/docs/source/devguides/supporting/examples/files/env-vars-image_1.png b/docs/source/developing-volttron/developing-agents/example-agents/files/env-vars-image_1.png similarity index 100% rename from docs/source/devguides/supporting/examples/files/env-vars-image_1.png rename to docs/source/developing-volttron/developing-agents/example-agents/files/env-vars-image_1.png diff --git a/docs/source/devguides/supporting/examples/files/env-vars-image_2.png b/docs/source/developing-volttron/developing-agents/example-agents/files/env-vars-image_2.png similarity index 100% rename from docs/source/devguides/supporting/examples/files/env-vars-image_2.png rename to docs/source/developing-volttron/developing-agents/example-agents/files/env-vars-image_2.png diff --git a/docs/source/devguides/supporting/examples/files/extract-image_1.png b/docs/source/developing-volttron/developing-agents/example-agents/files/extract-image_1.png similarity index 100% rename from docs/source/devguides/supporting/examples/files/extract-image_1.png rename to docs/source/developing-volttron/developing-agents/example-agents/files/extract-image_1.png diff --git a/docs/source/devguides/supporting/examples/files/extract-image_2.png b/docs/source/developing-volttron/developing-agents/example-agents/files/extract-image_2.png similarity index 100% rename from docs/source/devguides/supporting/examples/files/extract-image_2.png rename to docs/source/developing-volttron/developing-agents/example-agents/files/extract-image_2.png diff --git a/docs/source/devguides/supporting/examples/files/github-image.png b/docs/source/developing-volttron/developing-agents/example-agents/files/github-image.png similarity index 100% rename from docs/source/devguides/supporting/examples/files/github-image.png rename to docs/source/developing-volttron/developing-agents/example-agents/files/github-image.png diff --git a/docs/source/devguides/supporting/examples/files/github-zip-image.png b/docs/source/developing-volttron/developing-agents/example-agents/files/github-zip-image.png similarity index 100% rename from docs/source/devguides/supporting/examples/files/github-zip-image.png rename to docs/source/developing-volttron/developing-agents/example-agents/files/github-zip-image.png diff --git a/docs/source/devguides/supporting/examples/files/matlab-agent-diagram.png b/docs/source/developing-volttron/developing-agents/example-agents/files/matlab-agent-diagram.png similarity index 100% rename from docs/source/devguides/supporting/examples/files/matlab-agent-diagram.png rename to docs/source/developing-volttron/developing-agents/example-agents/files/matlab-agent-diagram.png diff --git a/docs/source/devguides/supporting/examples/files/node-red-flow.png b/docs/source/developing-volttron/developing-agents/example-agents/files/node-red-flow.png similarity index 100% rename from docs/source/devguides/supporting/examples/files/node-red-flow.png rename to docs/source/developing-volttron/developing-agents/example-agents/files/node-red-flow.png diff --git a/docs/source/devguides/supporting/examples/files/node-red.png b/docs/source/developing-volttron/developing-agents/example-agents/files/node-red.png similarity index 100% rename from docs/source/devguides/supporting/examples/files/node-red.png rename to docs/source/developing-volttron/developing-agents/example-agents/files/node-red.png diff --git a/docs/source/developing-volttron/developing-agents/example-agents/index.rst b/docs/source/developing-volttron/developing-agents/example-agents/index.rst new file mode 100644 index 0000000000..f071612c7f --- /dev/null +++ b/docs/source/developing-volttron/developing-agents/example-agents/index.rst @@ -0,0 +1,27 @@ +.. _Example-Agents: + +============== +Example Agents +============== + +Some example agents are included with the platform to help explore its features. These agents represent concrete +implementations of important agent sub-types such as Historians or Weather Agents, or demonstrate a development pattern +for accomplishing common tasks. + +More complex agents contributed by other researchers can also be found in the examples directory. It is recommended +that developers new to VOLTTRON understand the example agents first before diving into the other agents. + + +.. toctree:: + :maxdepth: 1 + + c-agent + config-actuation + csv-historian + data-publisher + dds-agent + listener-agent + matlab-agent + node-red + scheduler-example-agent + simple-web-agent-walk-through diff --git a/docs/source/devguides/supporting/examples/ListenerAgent.rst b/docs/source/developing-volttron/developing-agents/example-agents/listener-agent.rst similarity index 63% rename from docs/source/devguides/supporting/examples/ListenerAgent.rst rename to docs/source/developing-volttron/developing-agents/example-agents/listener-agent.rst index 0f921f6008..3b7673179b 100644 --- a/docs/source/devguides/supporting/examples/ListenerAgent.rst +++ b/docs/source/developing-volttron/developing-agents/example-agents/listener-agent.rst @@ -1,15 +1,15 @@ -.. _ListenerAgent: +.. _Listener-Agent: -ListenerAgent -------------- +============== +Listener Agent +============== -The ListenerAgent subscribes to all topics and is useful for testing -that agents being developed are publishing correctly. It also provides a -template for building other agents as it expresses the requirements of a -platform agent. +The ListenerAgent subscribes to all topics and is useful for testing that agents being developed are publishing +correctly. It also provides a template for building other agents as it expresses the requirements of a platform agent. -Explanation of ListenerAgent -~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Explanation of Listener Agent Code +================================== Use :code:`utils` to setup logging, which we’ll use later. @@ -19,20 +19,19 @@ Use :code:`utils` to setup logging, which we’ll use later. _log = logging.getLogger(__name__) -The Listener agent extends (inherits from) the Agent class for its -default functionality such as responding to platform commands: +The Listener agent extends (inherits from) the Agent class for its default functionality such as responding to platform +commands: .. code-block:: python class ListenerAgent(Agent): - '''Listens to everything and publishes a heartbeat according to the + ''' + Listens to everything and publishes a heartbeat according to the heartbeat period specified in the settings module. ''' -After the class definition, the Listener agent reads the configuration -file, extracts the configuration parameters, and initializes any -Listener agent instance variable. This is done through the agent's :code:`__init__` -method: +After the class definition, the Listener agent reads the configuration file, extracts the configuration parameters, and +initializes any Listener agent instance variable. This is done through the agent's ``__init__`` method: .. code-block:: python @@ -50,10 +49,9 @@ method: else: self._logfn = _log.info -Next, the Listener agent will run its setup method. This method is -tagged to run after the agent is initialized by the decorator -``@Core.receiver('onsetup')``. This method accesses the configuration -parameters, logs a message to the platform log, and sets the agent ID. +Next, the Listener agent will run its setup method. This method is tagged to run after the agent is initialized by the +decorator ``@Core.receiver('onsetup')``. This method accesses the configuration parameters, logs a message to the +platform log, and sets the agent ID. .. code-block:: python @@ -63,19 +61,11 @@ parameters, logs a message to the platform log, and sets the agent ID. _log.info(self.config.get('message', DEFAULT_MESSAGE)) self._agent_id = self.config.get('agentid') -The Listener agent subscribes to all topics published on the message -bus. Publish and subscribe interactions with the message bus are handled by -the PubSub module located at: - - ``~/volttron/volttron/platform/vip/agent/subsystems/pubsub.py`` +The Listener agent subscribes to all topics published on the message bus. Publish and subscribe interactions with the +message bus are handled by the `PubSub` module located at `~/volttron/volttron/platform/vip/agent/subsystems/pubsub.py`. -The Listener agent uses an empty string to subscribe to all messages -published. This is done in a -`decorator `__ -for simplifying subscriptions. - -It also checks for the sender being ``pubsub.compat`` in case there are -any VOLTTRON 2.0 agents running on the platform. +The Listener agent uses an empty string to subscribe to all messages published. This is done in a +`decorator `_ for simplifying subscriptions. .. code-block:: python @@ -87,5 +77,3 @@ any VOLTTRON 2.0 agents running on the platform. self._logfn( "Peer: %r, Sender: %r:, Bus: %r, Topic: %r, Headers: %r, " "Message: %r", peer, sender, bus, topic, headers, message) - - diff --git a/docs/source/developing-volttron/developing-agents/example-agents/matlab-agent.rst b/docs/source/developing-volttron/developing-agents/example-agents/matlab-agent.rst new file mode 100644 index 0000000000..88ed164fab --- /dev/null +++ b/docs/source/developing-volttron/developing-agents/example-agents/matlab-agent.rst @@ -0,0 +1,606 @@ +.. _Matlab-Agent: + +============ +MatLab Agent +============ + +The MatLab agent and Matlab Standalone Agent together are example agents that allow for MatLab scripts to be run in a +Windows environment and interact with the VOLTTRON platform running in a Linux environment. + +The MatLab agent takes advantage of the config store to dynamically send scripts and commandline arguments across the +message bus to one or more Standalone Agents in Windows. The Standalone Agent then executes the requested script and +arguments, and sends back the results to the MatLab agent. + + +Overview of Matlab Agents +========================= + +There are multiple components that are used for the MatLab agent. This diagram is to represent the components that are +connected to the MatLab Agents. In this example, the scripts involved are based on the default settings in the MatLab +Agent. + +|matlab-agent-diagram| + + +MatLabAgentV2 +------------- + +MatLabAgentV2 publishes the name of a python script along with any command line arguments that are needed for the script +to the appropriate topic. The agent then listens on another topic, and whenever anything is published on this topic, it +stores the message in the log file chosen when the VOLTTRON instance is started. If there are multiple standalone +agents, the agent can send a a script to each of them, along with their own set of command line arguments. In this +case, each script name and set of command line arguments should be sent to separate subtopics. This is done so that no +matter how many standalone agents are in use, MatLabAgentV2 will record all of their responses. + +.. code:: + + class MatlabAgentV2(Agent): + + def __init__(self,script_names=[], script_args=[], topics_to_matlab=[], + topics_to_volttron=None,**kwargs): + + super(MatlabAgentV2, self).__init__(**kwargs) + _log.debug("vip_identity: " + self.core.identity) + + self.script_names = script_names + self.script_args = script_args + self.topics_to_matlab = topics_to_matlab + self.topics_to_volttron = topics_to_volttron + self.default_config = {"script_names": script_names, + "script_args": script_args, + "topics_to_matlab": topics_to_matlab, + "topics_to_volttron": topics_to_volttron} + + + #Set a default configuration to ensure that self.configure is called immediately to setup + #the agent. + self.vip.config.set_default("config", self.default_config) + #Hook self.configure up to changes to the configuration file "config". + self.vip.config.subscribe(self.configure, actions=["NEW", "UPDATE"], pattern="config") + + def configure(self, config_name, action, contents): + """ + Called after the Agent has connected to the message bus. + If a configuration exists at startup this will be + called before onstart. + Is called every time the configuration in the store changes. + """ + config = self.default_config.copy() + config.update(contents) + + _log.debug("Configuring Agent") + + try: + script_names = config["script_names"] + script_args = config["script_args"] + topics_to_matlab = config["topics_to_matlab"] + topics_to_volttron = config["topics_to_volttron"] + + except ValueError as e: + _log.error("ERROR PROCESSING CONFIGURATION: {}".format(e)) + return + + self.script_names = script_names + self.script_args = script_args + self.topics_to_matlab = topics_to_matlab + self.topics_to_volttron = topics_to_volttron + self._create_subscriptions(self.topics_to_volttron) + + for script in range(len(self.script_names)): + cmd_args = "" + for x in range(len(self.script_args[script])): + cmd_args += ",{}".format(self.script_args[script][x]) + _log.debug("Publishing on: {}".format(self.topics_to_matlab[script])) + self.vip.pubsub.publish('pubsub', topic=self.topics_to_matlab[script], + message="{}{}".format(self.script_names[script],cmd_args)) + _log.debug("Sending message: {}{}".format(self.script_names[script],cmd_args)) + + _log.debug("Agent Configured!") + +For this example, the agent is publishing to the `matlab/to_matlab/1` topic, and is listening to the +`matlab/to_volttron` topic. It is sending the script name `testScript.py` with the argument 20. These are the default +values found in the agent, if no configuration is loaded. + +.. code:: + + script_names = config.get('script_names', ["testScript.py"]) + script_args = config.get('script_args', [["20"]]) + topics_to_matlab = config.get('topics_to_matlab', ["matlab/to_matlab/1"]) + topics_to_volttron = config.get('topics_to_volttron', "matlab/to_volttron/") + + +StandAloneMatLab.py +------------------- + +The `StandAloneMatLab.py` script is a standalone agent designed to be able to run in a Windows environment. Its purpose +is to listen to a topic, and when something is published to this topic, it takes the message, and sends it to the +``script_runner`` function in `scriptwrapper.py`. This function processes the inputs, and then the output is published +to another topic. + +.. code:: + + class StandAloneMatLab(Agent): + '''The standalone version of the MatLab Agent''' + + @PubSub.subscribe('pubsub', _topics['volttron_to_matlab']) + def print_message(self, peer, sender, bus, topic, headers, message): + print('The Message is: ' + str(message)) + messageOut = script_runner(message) + self.vip.pubsub.publish('pubsub', _topics['matlab_to_volttron'], message=messageOut) + + +settings.py +----------- + +The topic to listen to and the topic to publish to are defined in `settings.py`, along with the information needed to +connect the Standalone Agent to the primary VOLTTRON instance. These should be the same topics that the MatLabAgentV2 +is publishing and listening to, so that the communication can be successful. To connect the Standalone Agent to the +primary VOLTTRON instance, the IP address and port of the instance are needed, along with the server key. + +.. code:: + + _topics = { + 'volttron_to_matlab': 'matlab/to_matlab/1', + 'matlab_to_volttron': 'matlab/to_volttron/1' + } + + # The parameters dictionary is used to populate the agent's + # remote vip address. + _params = { + # The root of the address. + # Note: + # 1. volttron instance should be configured to use tcp. use command vcfg + # to configure + 'vip_address': 'tcp://192.168.56.101', + 'port': 22916, + + # public and secret key for the standalone_matlab agent. + # These can be created using the command: volttron-ctl auth keypair + # public key should also be added to the volttron instance auth + # configuration to enable standalone agent access to volttron instance. Use + # command 'vctl auth add' Provide this agent's public key when prompted + # for credential. + + 'agent_public': 'dpu13XKPvGB3XJNVUusCNn2U0kIWcuyDIP5J8mAgBQ0', + 'agent_secret': 'Hlya-6BvfUot5USdeDHZ8eksDkWgEEHABs1SELmQhMs', + + # Public server key from the remote platform. This can be + # obtained using the command: + # volttron-ctl auth serverkey + 'server_key': 'QTIzrRGQ0-b-37AbEYDuMA0l2ETrythM2V1ac0v9CTA' + + } + + def remote_url(): + return "{vip_address}:{port}?serverkey={server_key}" \ + "&publickey={agent_public}&" \ + "secretkey={agent_secret}".format(**_params) + +The primary VOLTTRON instance will then need to add the public key from the Standalone Agent. In this example, the +topic that the Standalone Agent is listening to is `matlab/to_matlab/1`, and the topic it is publishing to is +`matlab/to_volttron/1`. + + +scriptwrapper.py +---------------- + +`Scriptwrapper.py` contains the script_runner function. The purpose of this function is to take in a string that +contains a Python script and command line arguments separated by commas. This string is parsed and passed to the system +arguments, which allows the script sent to the function to use the command line arguments. The function then redirects +standard output to a `StringIO` file object, and then attempts to execute the script. If there are any errors with the +script, the error that is generated is returned to the standalone agent. Otherwise, the file object stores the output +from the script, is converted to a string, and is sent to the standalone agent. In this example, the script that is to +be run is `testScript.py`. + +.. code:: + + #Script to take in a string, run the program, + #and output the results of the command as a string. + + import time + import sys + from io import StringIO + + + def script_runner(message): + original = sys.stdout + # print(message) + # print(sys.argv) + sys.argv = message.split(',') + # print(sys.argv) + + try: + out = StringIO() + sys.stdout = out + exec(open(sys.argv[0]).read()) + sys.stdout = original + return out.getvalue() + except Exception as ex: + out = str(ex) + sys.stdout = original + return out + +.. note:: + + The script that is to be run needs to be in the same folder as the agent and the `scriptwrapper.py` script. The + `script_runner` function needs to be edited if it is going to call a script at a different location. + + +testScript.py +------------- + +This is a very simple test script designed to demonstrate the calling of a MatLab function from within Python. First it +initializes the MatLab engine for Python. It then takes in a single command line argument, and passes it to the MatLab +function `testPy.m`. If no arguments are sent, it will send 0 to the `testPy.m` function. It then prints the result of +the `testPy.m` function. In this case, since standard output is being redirected to a file object, this is how the +result is passed from this function to the Standalone Agent. + +.. code-block:: python + + import matlab.engine + import sys + + + eng = matlab.engine.start_matlab() + + if len(sys.argv) == 2: + result = eng.testPy(float(sys.argv[1])) + else: + result = eng.testPy(0.0) + + print(result) + + +testPy.m +-------- + +This MatLab function is a very simple example, designed to show a function that takes an argument, and produces an array +as the output. The input argument is added to each element in the array, and the entire array is then returned. + +.. code:: + + function out = testPy(z) + x = 1:100 + out = x + z + end + + +Setup on Linux +-------------- + +1. Setup and run VOLTTRON from develop branch using instructions :ref:`here `. + +2. Configure volttron instance using the ``vcfg`` command. When prompted for the vip address use + ``tcp://``. This is necessary to enable volttron communication with external + processes. + + .. note:: + + If you are running VOLTTRON from within VirtualBox, jit would be good to set one of your adapters as a + `Host-only` adapter. This can be done within the VM's settings, under the `Network` section. Once this is + done, use this IP for the VIP address. + + +.. _Matlab-Agent-Config: + +3. Update the configuration for MatLabAgent_v2 at `/example/MatLabAgent_v2/config`. + + The configuration file for the MatLab agent has four variables. + + 1. script_names + + 2. script_args + + 3. topics_to_matlab + + 4. topics_to_volttron + + An example config file included with the folder. + + .. code:: + + { + # VOLTTRON config files are JSON with support for python style comments. + "script_names": ["testScript.py"], + "script_args": [["20"]], + "topics_to_matlab": ["matlab/to_matlab/1"], + "topics_to_volttron": "matlab/to_volttron/" + } + + To edit the configuration, the format should be as follows: + + .. code-block:: json + + { + "script_names": ["script1.py", "script2.py", "..."], + "script_args": [["arg1","arg2"], ["arg1"], ["..."]], + "topics_to_matlab": ["matlab/to_matlab/1", "matlab/to_matlab/2", "..."], + "topics_to_volttron": "matlab/to_volttron/" + } + + The config requires that each script name lines up with a set of commandline arguments and a topic. A + commandline argument must be included, even if it is not used. The placement of brackets are important, even when + only communicating with one standalone agent. + + For example, if only one standalone agent is used, and no command line arguments are in place, the config file may + look like this. + + .. code-block:: json + + { + "script_names": ["testScript.py"], + "script_args": [["0"]], + "topics_to_matlab": ["matlab/to_matlab/1"], + "topics_to_volttron": "matlab/to_volttron/" + } + + +4. Install MatLabAgent_v2 and start agent (from volttron root directory) + + .. code-block:: bash + + python ./scripts/install-agent.py -s examples/MatLabAgent_v2 --start + + .. note:: + + The MatLabAgent_v2 publishes the command to be run to the message bus only on start or on a configuration + update. Once we configure the `standalone_matlab` agent on the Windows machine, we will send a configuration + update to the running MatLabAgent_v2. The configuration would contain the topics to which the Standalone Agent + is listening to and will be publishing result to. + + .. seealso:: + + The MatLab agent uses the configuration store to dynamically change inputs. More information on the config + store and how it used can be found here. + + * :ref:`VOLTTRON Configuration Store ` + + * :ref:`Agent Configuration Store ` + + * :ref:`Agent Configuration Store Interface ` + +5. Run the below command and make a note of the server key. This is required for configuring the stand alone agent + on Windows. (This is run on the linux machine) + + .. code-block:: bash + + vctl auth serverkey + + +Setup on Windows +---------------- + +Install pre-requisites +^^^^^^^^^^^^^^^^^^^^^^ + +1. Install Python3.6 64-bit from the `Python website `_. + +2. Install the MatLab engine from + `MathWorks `_. + + .. warning:: + + The MatLab engine for Python only supports certain version of Python depending on the version of MatLab used. + Please check `here `__ to see if the current + version of MatLab supports your version of Python. + + +.. note:: + + At this time, you may want to verify that you are able to communicate with your Linux machine across your network. + The simplest method would be to open up the command terminal and use ``ping ``, and ``telnet + ``. Please make sure that the port is + opened for outside access. + + +Install Standalone MatLab Agent +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +The standalone MatLab agent is designed to be usable in a Windows environment. + +.. warning:: + + VOLTTRON is not designed to run in a Windows environment. Outside of cases where it is stated to be usable in a + Windows environment, it should be assumed that it will **NOT** function as expected. + +#. Download VOLTTRON + + Download the VOLTTRON develop repository from Github. Download the zip from + `GitHub `_. + + |github-image| + + |github-zip-image| + + Once the zipped file has been downloaded, go to your `Downloads` folder, right-click on the file, and select + `Extract All...` + + |extract-image_1| + + Choose a location for the extracted folder, and select "Extract" + + |extract-image_2| + +#. Setup the `PYTHONPATH` + + Open the Windows explorer, and navigate to `Edit environment variables for your account`. + + |cmd-image| + + Select "New" + + |env-vars-image_1| + + For "Variable name" enter: ``PYTHONPATH`` + For "Variable value" either browse to your VOLTTRON installation, or enter in the path to your VOLTTRON + installation. + + |env-vars-image_2| + + Select `OK` twice. + +#. Set Python version in MatLab + + Open your MatLab application. Run the command: + + .. code-block:: bash + + pyversion + + This should print the path to Python2.7. If you have multiple versions of python on your machine and `pyversion` + points to a different version of Python, use: + + .. code-block:: bash + + pyversion /path/to/python.exe + + to set the appropriate version of python for your system. + + For example, to use python 3.6 with MatLab: + + .. code-block:: console + + pyversion C:\Python36\python.exe + +#. Set up the environment. + + Open up the command prompt + + |cmd-image_2| + + Navigate to your VOLTTRON installation + + ``cd \Your\directory\path\to\volttron-develop`` + + Use pip to install and setup dependencies. + + ``pip install -r examples\StandAloneMatLab\requirements.txt`` + + ``pip install -e .`` + + .. note:: + + If you get the error doing the second step because of an already installed volttron from a different directory, + manually delete the `volttron-egg.` link file from your `\\Lib\\site-packages` directory (for + example: + + .. code-block:: bash + + del C:\\Python27\\lib\\site-packages\\volttron-egg.link + + and re-run the second command + +#. Configure the agent + + The configuration settings for the standalone agent are in setting.py (located in + `volttron-develop\\examples\\StandAloneMatLab\\`) + + **settings.py** + + * `volttron_to_matlab` needs to be set to the topic that will send your script and command line arguments to your + stand alone agent. This was defined in the :ref:`config. ` + + * `matlab_to_volttron` needs to be set to the topic that will send your script's + output back to your volttron platform. This was defined in :ref:`config. ` + + * `vip_address` needs to be set to the address of your volttron instance + + * `port` needs to be set to the port of your volttron instance + + * `server_key` needs to be set to the public server key of your primary volttron platform. This can be obtained + from the primary volttron platform using ``vctl auth serverkey`` (VOLTTRON must be running to use this command.) + + It is possible to have multiple standalone agents running. In this case, copy the `StandAloneMatLab` folder, and + make the necessary changes to the new `settings.py` file. Unless it is connecting to a separate VOLTTRON instance, + you should only need to change the `volttron_to_matlab` setting. + + .. note:: + + It is recommended that you generate a new "agent_public" and "agent_private" key for your standalone agent. + This can be done using the ``vctl auth keypair`` command on your primary VOLTTRON platform on Linux. If you + plan to use multiple standalone agents, they will each need their own keypair. + +6. Add standalone agent key to VOLTTRON platform + + * Copy the public key from `settings.py` in the StandAloneMatLab folder. + + * While the primary VOLTTRON platform is running on the linux machine, add the agent public key using the ``vctl + auth`` command on the Linux machine. This will make VOLTTRON platform allow connections from the standalone agent + + .. code-block:: bash + + vctl auth add --credentials + +7. Run standalone agent + + + At this point, the agent is ready to run. To use the agent, navigate to the example folder and use python to start + the agent. The agent will then wait for a message to be published to the selected topic by the MatLab agent. + + .. code-block:: bash + + cd examples\StandAloneMatLab\ + + python standalone_matlab.py + + The output should be similar to this: + + .. code-block:: console + + 2019-08-01 10:42:47,592 volttron.platform.vip.agent.core DEBUG: identity: standalone_matlab + 2019-08-01 10:42:47,592 volttron.platform.vip.agent.core DEBUG: agent_uuid: None + 2019-08-01 10:42:47,594 volttron.platform.vip.agent.core DEBUG: serverkey: None + 2019-08-01 10:42:47,596 volttron.platform.vip.agent.core DEBUG: AGENT RUNNING on ZMQ Core standalone_matlab + 2019-08-01 10:42:47,598 volttron.platform.vip.zmq_connection DEBUG: ZMQ connection standalone_matlab + 2019-08-01 10:42:47,634 volttron.platform.vip.agent.core INFO: Connected to platform: router: ebae9efa-5e8f-49e3-95a0-2020ddff9e8a version: 1.0 identity: standalone_matlab + 2019-08-01 10:42:47,634 volttron.platform.vip.agent.core DEBUG: Running onstart methods. + + + .. note:: + + If you have Python3 as your default Python run the command ``python -2 standalone_matlab.py`` + +8. On the Linux machine configure the Matlab Agent to publish commands to the topic standalone agent is listening to. +To load a new configuration or to change the current configuration enter + + .. code-block:: bash + + vctl config store config + + Whenever there is a change in the configuration in the config store, or whenever the agent starts, the MatLab Agent + sends the configured command to the topic configured. As long as the standalone agent has been started and is + listening to the appropriate topic, the output in the log should look similar to this: + + .. code:: + + 2019-08-01 10:43:18,925 (matlab_agentV2agent-0.3 3539) matlab_agentV2.agent DEBUG: Configuring Agent + 2019-08-01 10:43:18,926 (matlab_agentV2agent-0.3 3539) matlab_agentV2.agent DEBUG: Publishing on: matlab/to_matlab/1 + 2019-08-01 10:43:18,926 (matlab_agentV2agent-0.3 3539) matlab_agentV2.agent DEBUG: Sending message: testScript2.py,20 + 2019-08-01 10:43:18,926 (matlab_agentV2agent-0.3 3539) matlab_agentV2.agent DEBUG: Agent Configured! + 2019-08-01 10:43:18,979 (matlab_agentV2agent-0.3 3539) matlab_agentV2.agent INFO: Agent: matlab/to_volttron/1 + Message: + '20' + + Once the matlab agent publishes the message (in the above case, "testScript2.py,20") on the windows command prompt + running the standalone agent, you should see the message that was received by the standalone agent. + + .. code:: + + 2019-08-01 10:42:47,671 volttron.platform.vip.agent.subsystems.configstore DEBUG: Processing callbacks for affected files: {} + The Message is: testScript2.py,20 + + .. note:: + + If MatLabAgent_v2 has been installed and started, and you have not started the `standalone_matlab agent`, you + will need to either restart the matlab_agentV2, or make a change to the configuration in the config store to + send command to the topic standalone agent is actively listening to. + +.. |github-image| image:: files/github-image.png +.. |cmd-image| image:: files/cmd-image.png +.. |env-vars-image_1| image:: files/env-vars-image_1.png +.. |env-vars-image_2| image:: files/env-vars-image_2.png +.. |cmd-image_2| image:: files/cmd-image_2.png +.. |github-zip-image| image:: files/github-zip-image.png +.. |extract-image_1| image:: files/extract-image_1.png +.. |extract-image_2| image:: files/extract-image_2.png +.. |matlab-agent-diagram| image:: files/matlab-agent-diagram.png diff --git a/docs/source/devguides/supporting/examples/NodeRed.rst b/docs/source/developing-volttron/developing-agents/example-agents/node-red.rst similarity index 67% rename from docs/source/devguides/supporting/examples/NodeRed.rst rename to docs/source/developing-volttron/developing-agents/example-agents/node-red.rst index 132e7eee32..523c5718f5 100644 --- a/docs/source/devguides/supporting/examples/NodeRed.rst +++ b/docs/source/developing-volttron/developing-agents/example-agents/node-red.rst @@ -1,45 +1,44 @@ -.. _NodeRed: +.. _Node-Red: +================ Node Red Example ================ -Node Red is a visual programming wherein users connect small units of -functionality "nodes" to create "flows". +Node Red is a visual programming language wherein users connect small units of functionality "nodes" to create "flows". + +There are two example nodes that allow communication between Node-Red and VOLTTRON. One node reads subscribes to +messages on the VOLTTRON message bus and the other publishes to it. -There are two example nodes that allow communication between Node Red and -VOLTTRON. One node reads subscribes to messages on the VOLTTRON message bus -and the other publishes to it. Dependencies ------------ -The example nodes depend on `python-shell` to be installed and available to -the Node Red environment. +The example nodes depend on `python-shell` to be installed and available to the Node Red environment. + Installation ------------ -Copy all files from `volttron/examples/NodeRed` to your `~/.node-red/nodes` -directory. `~/.node-red` is the default directory for Node Red files. If you -have set a different directory use that instead. +Copy all files from `volttron/examples/NodeRed` to your `~/.node-red/nodes` directory. `~/.node-red` is the default +directory for Node Red files. If you have set a different directory use that instead. -Set the variables at the beginning of the `volttron.js` file to be a valid -VOLTTRON environment, VOLTTRON home, and python path. +Set the variables at the beginning of the `volttron.js` file to be a valid VOLTTRON environment, VOLTTRON home, and +Python PATH. -Valid CURVE keys need to be added to the `settings.py` file. If they are -generated with the `vctl auth keypair` command then the public key -should be added to VOLTTRON's authorization file with the following: +Valid CURVE keys need to be added to the `settings.py` file. If they are generated with the `vctl auth keypair` command +then the public key should be added to VOLTTRON's authorization file with the following: .. code-block:: console $ vctl auth add -The serverkey can be found with +The serverkey can be found with: .. code-block:: console $ vctl auth serverkey + Usage ----- @@ -67,8 +66,8 @@ Start VOLTTRON and Node Red. 11 Jan 15:26:49 - [info] Starting flows 11 Jan 15:26:49 - [info] Started flows -The output from the Node Red command indicates the address of its web -interface. Nodes available for use are in the left sidebar. +The output from the Node Red command indicates the address of its web interface. Nodes available for use are in the +left sidebar. |Node Red| diff --git a/docs/source/developing-volttron/developing-agents/example-agents/scheduler-example-agent.rst b/docs/source/developing-volttron/developing-agents/example-agents/scheduler-example-agent.rst new file mode 100644 index 0000000000..83817ae8e8 --- /dev/null +++ b/docs/source/developing-volttron/developing-agents/example-agents/scheduler-example-agent.rst @@ -0,0 +1,57 @@ +.. _Scheduler-Example-Agent: + +======================= +Scheduler Example Agent +======================= + +The Scheduler Example Agent demonstrates how to use the scheduling feature of the :ref`Actuator Agent ` +as well as how to send a command. This agent publishes a request for a reservation on a (fake) device then takes an +action when it's scheduled time appears. The ActuatorAgent must be running to exercise this example. + +.. Note:: + + Since there is no actual device, an error is produced when the agent attempts to take its action. + +.. code-block:: python + + def publish_schedule(self): + '''Periodically publish a schedule request''' + headers = { + 'AgentID': agent_id, + 'type': 'NEW_SCHEDULE', + 'requesterID': agent_id, #The name of the requesting agent. + 'taskID': agent_id + "-ExampleTask", #The desired task ID for this task. It must be unique among all other scheduled tasks. + 'priority': 'LOW', #The desired task priority, must be 'HIGH', 'LOW', or 'LOW_PREEMPT' + } + + start = str(datetime.datetime.now()) + end = str(datetime.datetime.now() + datetime.timedelta(minutes=1)) + + + msg = [ + ['campus/building/unit',start,end] + ] + self.vip.pubsub.publish( + 'pubsub', topics.ACTUATOR_SCHEDULE_REQUEST, headers, msg) + +The agent listens to schedule announcements from the actuator and then issues a command: + +.. code-block:: python + + @PubSub.subscribe('pubsub', topics.ACTUATOR_SCHEDULE_ANNOUNCE(campus='campus', + building='building',unit='unit')) + def actuate(self, peer, sender, bus, topic, headers, message): + print ("response:",topic,headers,message) + if headers[headers_mod.REQUESTER_ID] != agent_id: + return + '''Match the announce for our fake device with our ID + Then take an action. Note, this command will fail since there is no + actual device''' + headers = { + 'requesterID': agent_id, + } + self.vip.pubsub.publish( + 'pubsub', topics.ACTUATOR_SET(campus='campus', + building='building',unit='unit', + point='point'), + headers, 0.0) diff --git a/docs/source/developing-volttron/developing-agents/example-agents/simple-web-agent-walk-through.rst b/docs/source/developing-volttron/developing-agents/example-agents/simple-web-agent-walk-through.rst new file mode 100644 index 0000000000..8626da335a --- /dev/null +++ b/docs/source/developing-volttron/developing-agents/example-agents/simple-web-agent-walk-through.rst @@ -0,0 +1,76 @@ +.. _Simple-Web-Agent-Walk-through: + +============================= +Simple Web Agent Walk-through +============================= + +A simple web enabled agent that will hook up with a VOLTTRON message bus and allow interaction between it via HTTP. +This example agent shows a simple file serving agent, a JSON-RPC based call, and a websocket based connection mechanism. + + +Starting VOLTTRON Platform +-------------------------- + +.. note:: + + Starting the VOLTTRON platform requires an :term:`Activated Environment`. Run the following command from the root + directory: + + .. code-block:: bash + + . env/bin/activate + +In order to start the simple web agent, we need to bind the VOLTTRON instance to the a web server. We need to specify +the address and the port for the web server. For example, if we want to bind the `localhost:8080` as the web server +we start the VOLTTRON platform as follows: + +.. code-block:: bash + + ./start-volttron --bind-web-address http://127.0.0.1:8080 + +Once the platform is started, we are ready to run the Simple Web Agent. + + +Running Simple Web Agent +------------------------ + +.. note:: + + The following assumes the shell is located at the :term:`VOLTTRON_ROOT`. + +Copy the following into your shell (save it to a file for executing it again later): + +.. code-block:: console + + python scripts/install-agent.py \ + --agent-source examples/SimpleWebAgent \ + --tag simpleWebAgent \ + --vip-identity webagent \ + --force \ + --start + +This will create a web server on ``http://localhost:8080``. The `index.html` file under `simpleweb/webroot/simpleweb/` +can be any HTML page which binds to the VOLTTRON message bus .This provides a simple example of providing a web endpoint +in VOLTTRON. + + +Path based registration examples +-------------------------------- + +- Files will need to be in `webroot/simpleweb` in order for them to be browsed from + ``http://localhost:8080/simpleweb/index.html`` + +- Filename is required as we don't currently auto-redirect to any default pages as shown in + ``self.vip.web.register_path("/simpleweb", os.path.join(WEBROOT))`` + +The following two examples show the way to call either a JSON-RPC (default) endpoint and one that returns a different +content-type. With the JSON-RPC example from volttron central we only allow post requests, however this is not +required. + +- Endpoint will be available at `http://localhost:8080/simple/text` + ``self.vip.web.register_endpoint("/simple/text", self.text)`` + +- Endpoint will be available at `http://localhost:8080/simple/jsonrpc` + ``self.vip.web.register_endpoint("/simpleweb/jsonrpc", self.rpcendpoint)`` +- ``text/html`` content type specified so the browser can act appropriately like ``[("Content-Type", "text/html")]`` +- The default response is ``application/json so our`` endpoint returns appropriately with a JSON based response. diff --git a/docs/source/devguides/agent_development/files/1-eclipse-desktop.jpg b/docs/source/developing-volttron/developing-agents/files/1-eclipse-desktop.jpg similarity index 100% rename from docs/source/devguides/agent_development/files/1-eclipse-desktop.jpg rename to docs/source/developing-volttron/developing-agents/files/1-eclipse-desktop.jpg diff --git a/docs/source/devguides/agent_development/files/10-check-volttron-with-eclipse.jpg b/docs/source/developing-volttron/developing-agents/files/10-check-volttron-with-eclipse.jpg similarity index 100% rename from docs/source/devguides/agent_development/files/10-check-volttron-with-eclipse.jpg rename to docs/source/developing-volttron/developing-agents/files/10-check-volttron-with-eclipse.jpg diff --git a/docs/source/devguides/agent_development/files/11-check-volttron-with-eclipse.jpg b/docs/source/developing-volttron/developing-agents/files/11-check-volttron-with-eclipse.jpg similarity index 100% rename from docs/source/devguides/agent_development/files/11-check-volttron-with-eclipse.jpg rename to docs/source/developing-volttron/developing-agents/files/11-check-volttron-with-eclipse.jpg diff --git a/docs/source/devguides/agent_development/files/12-check-volttron-with-eclipse.jpg b/docs/source/developing-volttron/developing-agents/files/12-check-volttron-with-eclipse.jpg similarity index 100% rename from docs/source/devguides/agent_development/files/12-check-volttron-with-eclipse.jpg rename to docs/source/developing-volttron/developing-agents/files/12-check-volttron-with-eclipse.jpg diff --git a/docs/source/devguides/agent_development/files/13-check-volttron-with-eclipse.jpg b/docs/source/developing-volttron/developing-agents/files/13-check-volttron-with-eclipse.jpg similarity index 100% rename from docs/source/devguides/agent_development/files/13-check-volttron-with-eclipse.jpg rename to docs/source/developing-volttron/developing-agents/files/13-check-volttron-with-eclipse.jpg diff --git a/docs/source/devguides/agent_development/files/14-check-volttron-from-github.jpg b/docs/source/developing-volttron/developing-agents/files/14-check-volttron-from-github.jpg similarity index 100% rename from docs/source/devguides/agent_development/files/14-check-volttron-from-github.jpg rename to docs/source/developing-volttron/developing-agents/files/14-check-volttron-from-github.jpg diff --git a/docs/source/devguides/agent_development/files/15-check-volttron-from-github.jpg b/docs/source/developing-volttron/developing-agents/files/15-check-volttron-from-github.jpg similarity index 100% rename from docs/source/devguides/agent_development/files/15-check-volttron-from-github.jpg rename to docs/source/developing-volttron/developing-agents/files/15-check-volttron-from-github.jpg diff --git a/docs/source/devguides/agent_development/files/16-check-volttron-from-github.jpg b/docs/source/developing-volttron/developing-agents/files/16-check-volttron-from-github.jpg similarity index 100% rename from docs/source/devguides/agent_development/files/16-check-volttron-from-github.jpg rename to docs/source/developing-volttron/developing-agents/files/16-check-volttron-from-github.jpg diff --git a/docs/source/devguides/agent_development/files/17-check-volttron-from-github.jpg b/docs/source/developing-volttron/developing-agents/files/17-check-volttron-from-github.jpg similarity index 100% rename from docs/source/devguides/agent_development/files/17-check-volttron-from-github.jpg rename to docs/source/developing-volttron/developing-agents/files/17-check-volttron-from-github.jpg diff --git a/docs/source/devguides/agent_development/files/18-check-volttron-from-github.jpg b/docs/source/developing-volttron/developing-agents/files/18-check-volttron-from-github.jpg similarity index 100% rename from docs/source/devguides/agent_development/files/18-check-volttron-from-github.jpg rename to docs/source/developing-volttron/developing-agents/files/18-check-volttron-from-github.jpg diff --git a/docs/source/devguides/agent_development/files/19-check-volttron-from-github.jpg b/docs/source/developing-volttron/developing-agents/files/19-check-volttron-from-github.jpg similarity index 100% rename from docs/source/devguides/agent_development/files/19-check-volttron-from-github.jpg rename to docs/source/developing-volttron/developing-agents/files/19-check-volttron-from-github.jpg diff --git a/docs/source/devguides/agent_development/files/2-egit-plugin.jpg b/docs/source/developing-volttron/developing-agents/files/2-egit-plugin.jpg similarity index 100% rename from docs/source/devguides/agent_development/files/2-egit-plugin.jpg rename to docs/source/developing-volttron/developing-agents/files/2-egit-plugin.jpg diff --git a/docs/source/devguides/agent_development/files/20-check-volttron-from-github.jpg b/docs/source/developing-volttron/developing-agents/files/20-check-volttron-from-github.jpg similarity index 100% rename from docs/source/devguides/agent_development/files/20-check-volttron-from-github.jpg rename to docs/source/developing-volttron/developing-agents/files/20-check-volttron-from-github.jpg diff --git a/docs/source/devguides/agent_development/files/21-configuring-pydev.jpg b/docs/source/developing-volttron/developing-agents/files/21-configuring-pydev.jpg similarity index 100% rename from docs/source/devguides/agent_development/files/21-configuring-pydev.jpg rename to docs/source/developing-volttron/developing-agents/files/21-configuring-pydev.jpg diff --git a/docs/source/devguides/agent_development/files/22-configuring-pydev.jpg b/docs/source/developing-volttron/developing-agents/files/22-configuring-pydev.jpg similarity index 100% rename from docs/source/devguides/agent_development/files/22-configuring-pydev.jpg rename to docs/source/developing-volttron/developing-agents/files/22-configuring-pydev.jpg diff --git a/docs/source/devguides/agent_development/files/23-configuring-pydev.jpg b/docs/source/developing-volttron/developing-agents/files/23-configuring-pydev.jpg similarity index 100% rename from docs/source/devguides/agent_development/files/23-configuring-pydev.jpg rename to docs/source/developing-volttron/developing-agents/files/23-configuring-pydev.jpg diff --git a/docs/source/devguides/agent_development/files/24-setting-pydev-project.jpg b/docs/source/developing-volttron/developing-agents/files/24-setting-pydev-project.jpg similarity index 100% rename from docs/source/devguides/agent_development/files/24-setting-pydev-project.jpg rename to docs/source/developing-volttron/developing-agents/files/24-setting-pydev-project.jpg diff --git a/docs/source/devguides/agent_development/files/25-setting-pydev-perspective.jpg b/docs/source/developing-volttron/developing-agents/files/25-setting-pydev-perspective.jpg similarity index 100% rename from docs/source/devguides/agent_development/files/25-setting-pydev-perspective.jpg rename to docs/source/developing-volttron/developing-agents/files/25-setting-pydev-perspective.jpg diff --git a/docs/source/devguides/agent_development/files/26-running-volttron.jpg b/docs/source/developing-volttron/developing-agents/files/26-running-volttron.jpg similarity index 100% rename from docs/source/devguides/agent_development/files/26-running-volttron.jpg rename to docs/source/developing-volttron/developing-agents/files/26-running-volttron.jpg diff --git a/docs/source/devguides/agent_development/files/27-running-volttron.jpg b/docs/source/developing-volttron/developing-agents/files/27-running-volttron.jpg similarity index 100% rename from docs/source/devguides/agent_development/files/27-running-volttron.jpg rename to docs/source/developing-volttron/developing-agents/files/27-running-volttron.jpg diff --git a/docs/source/devguides/agent_development/files/28-running-volttron.jpg b/docs/source/developing-volttron/developing-agents/files/28-running-volttron.jpg similarity index 100% rename from docs/source/devguides/agent_development/files/28-running-volttron.jpg rename to docs/source/developing-volttron/developing-agents/files/28-running-volttron.jpg diff --git a/docs/source/devguides/agent_development/files/29-running-volttron.jpg b/docs/source/developing-volttron/developing-agents/files/29-running-volttron.jpg similarity index 100% rename from docs/source/devguides/agent_development/files/29-running-volttron.jpg rename to docs/source/developing-volttron/developing-agents/files/29-running-volttron.jpg diff --git a/docs/source/devguides/agent_development/files/3-egit-plugin.jpg b/docs/source/developing-volttron/developing-agents/files/3-egit-plugin.jpg similarity index 100% rename from docs/source/devguides/agent_development/files/3-egit-plugin.jpg rename to docs/source/developing-volttron/developing-agents/files/3-egit-plugin.jpg diff --git a/docs/source/devguides/agent_development/files/30-running-listener-agent.jpg b/docs/source/developing-volttron/developing-agents/files/30-running-listener-agent.jpg similarity index 100% rename from docs/source/devguides/agent_development/files/30-running-listener-agent.jpg rename to docs/source/developing-volttron/developing-agents/files/30-running-listener-agent.jpg diff --git a/docs/source/devguides/agent_development/files/31-running-listener-agent.jpg b/docs/source/developing-volttron/developing-agents/files/31-running-listener-agent.jpg similarity index 100% rename from docs/source/devguides/agent_development/files/31-running-listener-agent.jpg rename to docs/source/developing-volttron/developing-agents/files/31-running-listener-agent.jpg diff --git a/docs/source/devguides/agent_development/files/32-running-listener-agent.jpg b/docs/source/developing-volttron/developing-agents/files/32-running-listener-agent.jpg similarity index 100% rename from docs/source/devguides/agent_development/files/32-running-listener-agent.jpg rename to docs/source/developing-volttron/developing-agents/files/32-running-listener-agent.jpg diff --git a/docs/source/devguides/agent_development/files/33-running-listener-agent.jpg b/docs/source/developing-volttron/developing-agents/files/33-running-listener-agent.jpg similarity index 100% rename from docs/source/devguides/agent_development/files/33-running-listener-agent.jpg rename to docs/source/developing-volttron/developing-agents/files/33-running-listener-agent.jpg diff --git a/docs/source/devguides/agent_development/files/34-running-listener-agent.jpg b/docs/source/developing-volttron/developing-agents/files/34-running-listener-agent.jpg similarity index 100% rename from docs/source/devguides/agent_development/files/34-running-listener-agent.jpg rename to docs/source/developing-volttron/developing-agents/files/34-running-listener-agent.jpg diff --git a/docs/source/devguides/agent_development/files/35-listening_agent_output.jpg b/docs/source/developing-volttron/developing-agents/files/35-listening_agent_output.jpg similarity index 100% rename from docs/source/devguides/agent_development/files/35-listening_agent_output.jpg rename to docs/source/developing-volttron/developing-agents/files/35-listening_agent_output.jpg diff --git a/docs/source/devguides/agent_development/files/36-agent-test-folder.jpg b/docs/source/developing-volttron/developing-agents/files/36-agent-test-folder.jpg similarity index 100% rename from docs/source/devguides/agent_development/files/36-agent-test-folder.jpg rename to docs/source/developing-volttron/developing-agents/files/36-agent-test-folder.jpg diff --git a/docs/source/devguides/agent_development/files/37-testagent-output.jpg b/docs/source/developing-volttron/developing-agents/files/37-testagent-output.jpg similarity index 100% rename from docs/source/devguides/agent_development/files/37-testagent-output.jpg rename to docs/source/developing-volttron/developing-agents/files/37-testagent-output.jpg diff --git a/docs/source/devguides/agent_development/files/38-console-output.jpg b/docs/source/developing-volttron/developing-agents/files/38-console-output.jpg similarity index 100% rename from docs/source/devguides/agent_development/files/38-console-output.jpg rename to docs/source/developing-volttron/developing-agents/files/38-console-output.jpg diff --git a/docs/source/devguides/agent_development/files/39-testagent-output-weather-subscribed.jpg b/docs/source/developing-volttron/developing-agents/files/39-testagent-output-weather-subscribed.jpg similarity index 100% rename from docs/source/devguides/agent_development/files/39-testagent-output-weather-subscribed.jpg rename to docs/source/developing-volttron/developing-agents/files/39-testagent-output-weather-subscribed.jpg diff --git a/docs/source/devguides/agent_development/files/4-egit-plugin.jpg b/docs/source/developing-volttron/developing-agents/files/4-egit-plugin.jpg similarity index 100% rename from docs/source/devguides/agent_development/files/4-egit-plugin.jpg rename to docs/source/developing-volttron/developing-agents/files/4-egit-plugin.jpg diff --git a/docs/source/developing-volttron/developing-agents/files/40-message-debugger.jpg b/docs/source/developing-volttron/developing-agents/files/40-message-debugger.jpg new file mode 100644 index 0000000000..99cc3c8a85 Binary files /dev/null and b/docs/source/developing-volttron/developing-agents/files/40-message-debugger.jpg differ diff --git a/docs/source/devguides/agent_development/files/5-install-eclipse-pydev-plugin.jpg b/docs/source/developing-volttron/developing-agents/files/5-install-eclipse-pydev-plugin.jpg similarity index 100% rename from docs/source/devguides/agent_development/files/5-install-eclipse-pydev-plugin.jpg rename to docs/source/developing-volttron/developing-agents/files/5-install-eclipse-pydev-plugin.jpg diff --git a/docs/source/devguides/agent_development/files/6-check-volttron-with-eclipse.jpg b/docs/source/developing-volttron/developing-agents/files/6-check-volttron-with-eclipse.jpg similarity index 100% rename from docs/source/devguides/agent_development/files/6-check-volttron-with-eclipse.jpg rename to docs/source/developing-volttron/developing-agents/files/6-check-volttron-with-eclipse.jpg diff --git a/docs/source/devguides/agent_development/files/7-check-volttron-with-eclipse.jpg b/docs/source/developing-volttron/developing-agents/files/7-check-volttron-with-eclipse.jpg similarity index 100% rename from docs/source/devguides/agent_development/files/7-check-volttron-with-eclipse.jpg rename to docs/source/developing-volttron/developing-agents/files/7-check-volttron-with-eclipse.jpg diff --git a/docs/source/devguides/agent_development/files/8-check-volttron-with-eclipse.jpg b/docs/source/developing-volttron/developing-agents/files/8-check-volttron-with-eclipse.jpg similarity index 100% rename from docs/source/devguides/agent_development/files/8-check-volttron-with-eclipse.jpg rename to docs/source/developing-volttron/developing-agents/files/8-check-volttron-with-eclipse.jpg diff --git a/docs/source/devguides/agent_development/files/9-check-volttron-with-eclipse.jpg b/docs/source/developing-volttron/developing-agents/files/9-check-volttron-with-eclipse.jpg similarity index 100% rename from docs/source/devguides/agent_development/files/9-check-volttron-with-eclipse.jpg rename to docs/source/developing-volttron/developing-agents/files/9-check-volttron-with-eclipse.jpg diff --git a/docs/source/devguides/agent_development/files/run_configuration.png b/docs/source/developing-volttron/developing-agents/files/run_configuration.png similarity index 100% rename from docs/source/devguides/agent_development/files/run_configuration.png rename to docs/source/developing-volttron/developing-agents/files/run_configuration.png diff --git a/docs/source/specifications/aggregate.rst b/docs/source/developing-volttron/developing-agents/specifications/aggregate.rst similarity index 99% rename from docs/source/specifications/aggregate.rst rename to docs/source/developing-volttron/developing-agents/specifications/aggregate.rst index c6198112e4..567e92763c 100644 --- a/docs/source/specifications/aggregate.rst +++ b/docs/source/developing-volttron/developing-agents/specifications/aggregate.rst @@ -1,8 +1,8 @@ -.. _AggregateHistorianSpec: +.. _Aggregate-Historian-Specification: -======================================= -Aggregate Historian Agent Specification -======================================= +=================== +Aggregate Historian +=================== Description =========== diff --git a/docs/source/specifications/files/aggregate_historian.jpg b/docs/source/developing-volttron/developing-agents/specifications/files/aggregate_historian.jpg similarity index 100% rename from docs/source/specifications/files/aggregate_historian.jpg rename to docs/source/developing-volttron/developing-agents/specifications/files/aggregate_historian.jpg diff --git a/docs/source/developing-volttron/developing-agents/specifications/index.rst b/docs/source/developing-volttron/developing-agents/specifications/index.rst new file mode 100644 index 0000000000..c85089ba2b --- /dev/null +++ b/docs/source/developing-volttron/developing-agents/specifications/index.rst @@ -0,0 +1,14 @@ +.. _Agent-Specifications: + +==================== +Agent Specifications +==================== + +Documents included below are intended to provide a specification to classes of agents which include a base class in the +VOLTTRON repository and have a well defined set of functions and services. + +.. toctree:: + + aggregate + tagging-service + weather-service diff --git a/docs/source/specifications/tagging_service.rst b/docs/source/developing-volttron/developing-agents/specifications/tagging-service.rst similarity index 99% rename from docs/source/specifications/tagging_service.rst rename to docs/source/developing-volttron/developing-agents/specifications/tagging-service.rst index 172930b791..9b11ec7823 100644 --- a/docs/source/specifications/tagging_service.rst +++ b/docs/source/developing-volttron/developing-agents/specifications/tagging-service.rst @@ -1,8 +1,8 @@ -.. _TaggingServiceSpec: +.. _Tagging-Service-Specification: -=========================== -Tagging agent specification -=========================== +=============== +Tagging Service +=============== *********** Description diff --git a/docs/source/specifications/Weather_Service_Spec.rst b/docs/source/developing-volttron/developing-agents/specifications/weather-service.rst similarity index 98% rename from docs/source/specifications/Weather_Service_Spec.rst rename to docs/source/developing-volttron/developing-agents/specifications/weather-service.rst index 15fded9d91..fc52bcbe3b 100644 --- a/docs/source/specifications/Weather_Service_Spec.rst +++ b/docs/source/developing-volttron/developing-agents/specifications/weather-service.rst @@ -1,8 +1,8 @@ -.. _WeatherAgentSpec: +.. _Weather-Agent-Specification: -============================= -Weather service specification -============================= +=============== +Weather Service +=============== *********** Description diff --git a/docs/source/devguides/agent_development/Testing-Agents.rst b/docs/source/developing-volttron/developing-agents/writing-agent-tests.rst similarity index 71% rename from docs/source/devguides/agent_development/Testing-Agents.rst rename to docs/source/developing-volttron/developing-agents/writing-agent-tests.rst index 98d0d779a3..6eefe10096 100644 --- a/docs/source/devguides/agent_development/Testing-Agents.rst +++ b/docs/source/developing-volttron/developing-agents/writing-agent-tests.rst @@ -1,35 +1,37 @@ .. _Writing-Agent-Tests: -******************* +=================== Writing Agent Tests -******************* +=================== -The VOLTTRON team strongly encourages developing agents with a set of unit and integration tests. Test-driven +The VOLTTRON team strongly encourages developing agents with a set of unit and integration tests. Test-driven development can save developers significant time and effort by clearly defining behavioral expectations for agent code. -We recommend developing agent tests using Pytest. Agent code contributed to VOLTTRON is expected to include a set of -tests using Pytest in the agent module directory. Following are instructions for setting up Pytest, structuring your +We recommend developing agent tests using Pytest. Agent code contributed to VOLTTRON is expected to include a set of +tests using Pytest in the agent module directory. Following are instructions for setting up Pytest, structuring your tests, how to write unit and integration tests (including some helpful tools using Pytest and Mock) and how to run your tests. + Installation -############ +============ To get started with Pytest, install it in an activated environment: -:: +.. code-block:: bash pip install pytest -Or when running VOLTTRON's bootstrap process, specify the `--testing` optional argument. +Or when running VOLTTRON's bootstrap process, specify the ``--testing`` optional argument. -:: +.. code-block:: bash python bootstrap.py --testing `Pytest on PyPI `_ + Module Structure -################ +================ We suggest the following structure for your agent module: @@ -50,12 +52,13 @@ We suggest the following structure for your agent module: │ └── setup.py The test suite should be in a `tests` directory in the root agent directory, and should contain one or more -test code files (with the `test_` convention). Conftest.py can be used to give all agent tests -access to some portion of the VOLTTRON code - in many cases agents use conftest to import VOLTTRON testing +test code files (with the `test_` convention). `conftest.py` can be used to give all agent tests +access to some portion of the VOLTTRON code. In many cases, agents use `conftest.py` to import VOLTTRON testing fixtures for integration tests. + Naming Conventions -################## +------------------ Pytest tests are discovered and run using some conventions: @@ -64,7 +67,7 @@ Pytest tests are discovered and run using some conventions: * Pytest will search in those directories for files called test_.py or _test.py * In those files, Pytest will test: * functions and methods prefixed by "test" outside of any class - * functions and methonds prefixed by "test" inside of any class prefixed by "test" + * functions and methods prefixed by "test" inside of any class prefixed by "test" :: @@ -101,20 +104,31 @@ Pytest tests are discovered and run using some conventions: assert False In the above example, Pytest will run the tests `test_success` from the file test1.py and `test_success` and test_fail -from test2.py. No tests will be run from file.txt, even though it contains test code, nor will it try to run +from test2.py. No tests will be run from file.txt, even though it contains test code, nor will it try to run `helper_method` from test1.py as a test. + Writing Unit Tests -################## +================== These tests should test the various methods of the code base, checking for success and fail conditions. These tests -should capture how the components of the system should function - tests should describe all the possible output +should capture how the components of the system should function; and describe all the possible output conditions given the possible range of inputs including how they should fail if given improper input. `Pytest guide to Unit Testing `_ -VOLTTRON agents include code for many platform features, these features can be mocked to allow unit tests to test only -the features of the agent, without having to account for the behaviors of the core platform: +Mocking Dependencies +-------------------- + +VOLTTRON agents include code for many platform features; these features can be mocked to allow unit tests to test only +the features of the agent without having to account for the behaviors of the core platform. While there are many tools +that can mock dependencies of an agent, we recommend Volttron's AgentMock or Python's Mock testing library. + +AgentMock +^^^^^^^^^ +AgentMock was specifically created to run unit tests on agents. AgentMock takes an Agent class and mocks the attributes +and methods of that Agent's dependencies. AgentMock also allows you to customize the behavior of dependencies within +each individual test. Below is an example: .. code-block:: python @@ -138,24 +152,77 @@ the features of the agent, without having to account for the behaviors of the co assert isinstance(result.get("test2"), str) # ... + def test_success_case_custom_mocks(): + agent.some_dependency.some_method.return_value = "foobar" + agent.some_attribute = "custom, dummy value" + result = agent.do_function_that_relies_on_custom_mocks("valid input") + # ... + def test_failure_case() # pytests.raises can be useful for testing exceptions, more information about usage below with pytest.raises(ValueError, match=r'Invalid input string for do_function') result = agent.do_function("invalid input") +Mock +^^^^ + +Simliar to AgentMock, Python's Mock testing library allows a user to replace the behavior of dependencies with a +user-specified behavior. This is useful for replacing VOLTTRON platform behavior, remote API behavior, modules, +etc. where using them in unit or integration tests is impractical or impossible. +Below is an example that uses the patch decorator to mock an Agent's web request. + +`Mock documentation `_ + +.. code-block:: python + + class UserAgent() + + def __init__(): + # Code here + + def get_remote_data() + response = self._get_data_from_remote() + return "Remote response: {}".format(response) + + # it can be useful to create private functions for use with mock for things like making web requests + def _get_data_from_remote(): + url = "test.com/test1" + headers = {} + return requests.get(url, headers) + + # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + + import pytest + import mock + + def get_mock_response(): + return "test response" + + # here we're mocking the UserAgent's _get_data_from_remote method and replacing it with our get_mock_response method + # to feed our test some fake remote data + @mock.patch.object(UserAgent, '_get_data_from_remote', get_mock_response) + def test_get_remote_data(): + assert UserAgent.get_remote_Data() == "Remote response: test response" + + + + + + Pytest Tools -************ +------------ + +Pytest includes many helpful tools for developing your tests. We'll highlight a few that have been useful for +VOLTTRON core tests, but checkout `the Pytest documentation `_ for additional information on +each tool as well as tools not covered in this guide. -Pytest includes many helpful tools for developing your tests, here we'll highlight a few that have been useful for -VOLTTRON core tests, but checkout `the Pytest documentation `_ for additional information on each tool as well as -tools not covered in this guide. Pytest Fixtures ---------------- +^^^^^^^^^^^^^^^ Pytest fixtures can be used to create reusable code for tests that can be accessed by every test in a module based on -scope. There are several kinds of scopes, but commonly used are "module" (the fixture is run once per module for all -the tests of that module) or "function" (the fixture is run once per test). For fixtures to be used by tests, they +scope. There are several kinds of scopes, but commonly used are "module" (the fixture is run once per module for all +the tests of that module) or "function" (the fixture is run once per test). For fixtures to be used by tests, they should be passed as parameters. `Pytest Fixture documentation `_ @@ -177,8 +244,7 @@ Here is an example of a fixture, along with using it in a test: cursor.commit() sqlite.conn.close() - # now when we pass the cleanup function, we should expect that the table will be dropped and rebuilt before the test - # runs + # when we pass the cleanup function, we expect that the table will be dropped and rebuilt before the test runs def test_store_data(cleanup_database): sqlite_conn = sqlite.connect("test.sqlite") cursor = sqlite_conn.cursor() @@ -192,21 +258,21 @@ Here is an example of a fixture, along with using it in a test: assert count == 1 - Pytest.mark ------------ +^^^^^^^^^^^ Pytest marks are used to set metadata for test functions. Defining your own custom marks can allow you to run -subsections of your tests. Parametrize can be used to pass a series of parameters to a test, so that it can be run -many times to cover the space of potential inputs. Marks also exist to specify expected behavior for tests. +subsections of your tests. Parametrize can be used to pass a series of parameters to a test, so that it can be run +many times to cover the space of potential inputs. Marks also exist to specify expected behavior for tests. `Mark documentation `_ + Custom Marks -~~~~~~~~~~~~ +"""""""""""" To add a custom mark, add the name of the mark followed by a colon then a description string to the 'markers' section -of Pytest.ini (an example of this exists in the core VOLTTRON repository). Then add the appropriate decorator: +of Pytest.ini (an example of this exists in the core VOLTTRON repository). Then add the appropriate decorator: .. code-block:: python @@ -225,8 +291,9 @@ The VOLTTRON team also has a `dev` mark for running individual (or a few) one-of # TODO unit test here pass + Parametrize -~~~~~~~~~~~ +""""""""""" Parametrize will allow tests to be run with a variety of parameters. Add the parametrize decorator, and for parameters include a list of parameter names matching the test parameter names as a comma-delimited string followed by a list of @@ -236,15 +303,16 @@ tuples containing parameters for each test. .. code-block:: python - @pytest.mark.parametrize("param1,param2,param3", [(1, 2, 3), (-1, 0, "")]) + @pytest.mark.parametrize("test_input1, test_input2, expected", [(1, 2, 3), (-1, 0, "")]) def test_user_agent(param1, param2, param3): # TODO unit test here pass + Skip, skipif, and xfail -~~~~~~~~~~~~~~~~~~~~~~~ +""""""""""""""""""""""" -The skip mark can be used to skip a test for any reason every time the test suite is run: +The `skip` mark can be used to skip a test for any reason every time the test suite is run: .. code-block:: python @@ -254,7 +322,7 @@ The skip mark can be used to skip a test for any reason every time the test suit # TODO unit test here pass -The skipif mark can be used to skip a test based on some condition: +The `skipif` mark can be used to skip a test based on some condition: .. code-block:: python @@ -264,7 +332,7 @@ The skipif mark can be used to skip a test based on some condition: # TODO unit test here pass -The xfail mark can be used to run a test, but to show that the test is currently expected to fail +The `xfail` mark can be used to run a test, but to show that the test is currently expected to fail .. code-block:: python @@ -276,58 +344,22 @@ The xfail mark can be used to run a test, but to show that the test is currently `Skip, skipif, and xfail docs `_ -Mock -**** - -Mock allows a user to replace the behavior of dependencies with a user specified behavior. This is useful for replacing -VOLTTRON platform behavior, remote API behavior, modules, etc. where using them in unit or integration tests is -impractical or impossible. - -`Mock documentation `_ - -.. code-block:: python - - class UserAgent() - - def __init__(): - # Code here - - def get_remote_data() - response = self._get_data_from_remote() - return "Remote response: {}".format(response) - - # it can be useful to create private functions for use with mock for things like making web requests - def _get_data_from_remote(): - url = "test.com/test1" - headers = {} - return requests.get(url, headers) - - # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - - import pytest - import mock - - def get_mock_response(): - return "test response" - - # here we're mocking the UserAgent's _get_data_from_remote method and replacing it with our get_mock_response method - # to feed our test some fake remote data - @mock.patch.object(UserAgent, '_get_data_from_remote', get_mock_response) - def test_get_remote_data(): - assert UserAgent.get_remote_Data() == "Remote response: test response" Writing Integration Tests -######################### +========================= -Integration tests are useful for testing the faults that occur between integrated units. In the context of VOLTTRON +Integration tests are useful for testing the faults that occur between integrated units. In the context of VOLTTRON agents, integration tests should test the interactions between the agent, the platform, and other agents installed on -the platform that would interface with the agent. It is typical for integration tests to test configuration, behavior +the platform that would interface with the agent. It is typical for integration tests to test configuration, behavior and content of RPC calls and agent Pub/Sub, the agent subsystems, etc. `Pytest best practices for Integration Testing `_ -The Volttrontesting directory includes several helpful fixtures for your tests. Including the following line at the top -of your tests, or in conftest.py, will allow you to utilize the platform wrapper fixtures, and more. +Volttrontesting Directory +------------------------- + +The `Volttrontesting` directory includes several helpful fixtures for your tests. Including the following line at the +top of your tests, or in `conftest.py`, will allow you to utilize the platform wrapper fixtures, and more. .. code-block:: python @@ -391,22 +423,45 @@ Here is an example success case integration test: For more integration test examples, it is recommended to take a look at some of the VOLTTRON core agents, such as historian agents and weather service agents. +Using Docker for Limited-Integration Testing +-------------------------------------------- + +If you want to run limited-integration tests which do not require the setup of a volttron system, you can use Docker +containers to mimic dependencies of an agent. The `volttrontesting/fixtures/docker_wrapper.py` module provides a +convenient function to create docker containers for use in limited-integration tests. For example, suppose that you +had an agent with a dependency on a MySQL database. If you want to test the connection between the Agent and the MySQL +dependency, you can create a Docker container to act as a real MySQL database. Below is an example: + +.. code-block:: python + + from volttrontesting.fixtures.docker_wrapper import create_container + from UserAgent import UserAgentClass + + def test_docker_wrapper_example(): + ports_config = {'3306/tcp': 3306} + with create_container("mysql:5.7", ports=ports_config) as container: + init_database(container) + agent = UserAgent(ports_config) + + results = agent.some_method_that_talks_to_container() + + Running your Tests and Debugging -################################ +================================ Pytest can be run from the command line to run a test module. -:: +.. code-block:: bash pytest -If using marks, you can add "-m " to specify your testing subset, and -s can be used to surpress standard output. -For more information about optional arguments you can type `pytest --help` into your command line interface to see the -full list of options. +If using marks, you can add ``-m `` to specify your testing subset, and -s can be used to suppress standard +output. For more information about optional arguments you can type `pytest --help` into your command line interface to +see the full list of options. Testing output should look something like this: -:: +.. code-block:: console (volttron) @:~/volttron$ pytest services/core/SQLHistorian/ ======================================================== test session starts ========================================================= @@ -453,12 +508,12 @@ Testing output should look something like this: Running Tests Via PyCharm -************************* +------------------------- -To run our Pytests using PyCharm, we'll need to create a run configuration. To do so, select "edit configurations" from +To run our Pytests using PyCharm, we'll need to create a run configuration. To do so, select "edit configurations" from the "Run" menu (or if using the toolbar UI element you can click on the run configurations dropdown to select "edit -configurations"). Use the plus symbol at the top right of the pop-up menu, scroll to "Python Tests" and expand this -menu and select "pytest". This will create a run configuration, which will then need to be filled out. We recommend the +configurations"). Use the plus symbol at the top right of the pop-up menu, scroll to "Python Tests" and expand this +menu and select "pytest". This will create a run configuration, which will then need to be filled out. We recommend the following in general: * Set the "Script Path" radio and fill the form with the path to your module. Pytest will run any tests in that diff --git a/docs/source/devguides/walkthroughs/Driver-Creation-Walkthrough.rst b/docs/source/developing-volttron/developing-drivers/driver-development.rst similarity index 65% rename from docs/source/devguides/walkthroughs/Driver-Creation-Walkthrough.rst rename to docs/source/developing-volttron/developing-drivers/driver-development.rst index 862d2fc494..49377dfddb 100644 --- a/docs/source/devguides/walkthroughs/Driver-Creation-Walkthrough.rst +++ b/docs/source/developing-volttron/developing-drivers/driver-development.rst @@ -1,52 +1,83 @@ -.. _DriverCreationWalkthrough: +.. _Driver-Development: -Volttron Drivers Overview -========================= +================== +Driver Development +================== -In order for Volttron agents to gather data from a device or to set device values, agents send requests to the Master -Driver Agent to read or set points. The Master Driver Agent then sends these requests on to the appropriate driver for -interfacing with that device based on the topic specified in the request and the configuration of the Master Driver. -Drivers provide an interface between the device and the master driver by implementing portions of the devices' protocols +In order for VOLTTRON agents to gather data from a device or to set device values, agents send requests to the Master +Driver Agent to read or set points. The Platform Driver Agent then sends these requests on to the appropriate driver for +interfacing with that device based on the topic specified in the request and the configuration of the Platform Driver. +Drivers provide an interface between the device and the platform driver by implementing portions of the devices' protocols needed to serve the functions of setting and reading points. -As a demonstration of developing a driver, a driver can be made to read and set points in a CSV file. This driver will +As a demonstration of developing a driver a driver can be made to read and set points in a CSV file. This driver will only differ from a real device driver in terms of the specifics of the protocol. + Create a Driver and Register class -================================== +********************************** -When a new driver configuration is added to the Master Driver, the Master Driver will look for a file in its interfaces -directory (services/core/MasterDriverAgent/master_driver/interfaces) that shares the name of the value specified by -"driver_type" in the configuration file. For the CSV Driver, create a file named csvdriver.py in that directory. +When a new driver configuration is added to the Platform Driver, the Platform Driver will look for a file or directory in +its interfaces directory (services/core/PlatformDriverAgent/platform_driver/interfaces) that shares the name of the value +specified by "driver_type" in the configuration file. For the CSV Driver, create a file named csvdriver.py in that +directory. :: - ├── master_driver - │   ├── agent.py - │   ├── driver.py - │   ├── __init__.py - │   ├── interfaces - │   │   ├── __init__.py - │   │   ├── bacnet.py - | | ├── csvdriver.py - │   │   └── modbus.py - │   └── socket_lock.py - ├── master-driver.agent + ├── platform_driver + │ ├── agent.py + │ ├── driver.py + │ ├── __init__.py + │ ├── interfaces + │ │ ├── __init__.py + │ │ ├── bacnet.py + | | ├── csvdriver.py + │ │ └── modbus.py + │ └── socket_lock.py + ├── platform-driver.agent └── setup.py +Following is an example using the directory type structure: + +:: + + ├── platform_driver + │ ├── agent.py + │ ├── driver.py + │ ├── __init__.py + │ ├── interfaces + │ │ ├── __init__.py + │ │ ├── bacnet.py + | | ├── csvdriver.py + │ │ ├── modbus.py + │ │ ├── modbus_tk.py + │ │ | ├── __init__.py + │ │ | ├── tests + │ │ | ├── requirements.txt + │ │ | └── README.rst + +.. note:: + + Using this format, the directory must be the name specified by "driver_type" in the configuration file and the + `Interface` class must be in the `__init__.py` file in that directory. + +This format is ideal for including additional code files as well as requirements files, tests and documentation. + + Interface Basics ----------------- +================ + A complete interface consists of two parts: the interface class and one or more register classes. Interface Class Skeleton -~~~~~~~~~~~~~~~~~~~~~~~~ -When the Master Driver processes a driver configuration file, it creates an instance of the interface class found in the -interface file (such as the one we've just created). The interface class is responsible for managing the communication -between the Volttron Platform, and the device. Each device has many registers which hold the values Volttron agents are -interested in, so generally the interface manages reading and writing to and from a device's registers. At a minimum, +------------------------ +When the Platform Driver processes a driver configuration file it creates an instance of the interface class found in the +interface file (such as the one we've just created). The interface class is responsible for managing the communication +between the Volttron Platform, and the device. Each device has many registers which hold the values Volttron agents are +interested in so generally the interface manages reading and writing to and from a device's registers. At a minimum, the interface class should be configurable, be able to read and write registers, as well as read all registers with a -single request. First create the csv interface class boilerplate. +single request. First create the csv interface class boilerplate. .. code-block:: python @@ -66,18 +97,24 @@ single request. First create the csv interface class boilerplate. def _scrape_all(self): pass -This class should inherit from the BaseInterface, and at a minimum implement the configure, get_point, set_point, and +This class should inherit from the BaseInterface and at a minimum implement the configure, get_point, set_point, and scrape_all methods. -.. Note:: In some sense, drivers are sub-agents running under the same process as the Master Driver. They should be instantiated following the agent pattern, so a function to handle configuration and create the Driver object has been included. +.. Note:: + + In some sense, drivers are sub-agents running under the same process as the Platform Driver. They should be + instantiated following the agent pattern, so a function to handle configuration and create the Driver object has + been included. + Register Class Skeleton -~~~~~~~~~~~~~~~~~~~~~~~ -The interface needs some information specifying the communication for each register on the device. For each different -type of register, a register class should be defined, which will help identify individual registers, and determine how -to communicate with them. Our CSV driver will be fairly basic, with one kind of "register", which will be a column in -a CSV file, however other drivers may require many kinds of registers; For instance, the Modbus protocol driver has -registers which store data in byte sized chunks, and registers which store individual bits, therefore the Modbus driver +----------------------- + +The interface needs some information specifying the communication for each register on the device. For each different +type of register a register class should be defined which will help identify individual registers and determine how +to communicate with them. Our CSV driver will be fairly basic, with one kind of "register", which will be a column in +a CSV file. Other drivers may require many kinds of registers; for instance, the Modbus protocol driver has +registers which store data in byte sized chunks and registers which store individual bits, therefore the Modbus driver has bit and byte registers. For the CSV driver, create the register class boilerplate: @@ -89,24 +126,25 @@ For the CSV driver, create the register class boilerplate: default_value=None, description=''): super(CsvRegister, self).__init__("byte", read_only, pointName, units, description=description) -This class should inherit from the BaseRegister. The class should keep register metadata, and depending upon the +This class should inherit from the BaseRegister. The class should keep register metadata, and depending upon the requirements of the protocol/device, may perform the communication. -The BACNet and Modbus drivers may be used as examples of more specific implementations. For the purpose of this -demonstration, writing and reading points will be done in the register, however, this may not always be the case (as in +The BACNet and Modbus drivers may be used as examples of more specific implementations. For the purpose of this +demonstration writing and reading points will be done in the register, however, this may not always be the case (as in the case of the BACNet driver). + Filling out the Interface class -------------------------------- +=============================== + The CSV interface will be writing to and reading from a CSV file, so the device configuration should include a path -specifying a CSV file to use as the "device". The CSV "device: path value is set at the beginning of the agent loop -which runs the configure method when the Master Driver starts. Since this Driver is for demonstration, we'll create the -CSV with some default values if the configured path doesn't exist. The CSV device will consist of 2 columns, "Point +specifying a CSV file to use as the "device". The CSV "device: path value is set at the beginning of the agent loop +which runs the configure method when the Platform Driver starts. Since this Driver is for demonstration, we'll create the +CSV with some default values if the configured path doesn't exist. The CSV device will consist of 2 columns: "Point Name" specifying the name of the register, and "Point Value", the current value of the register. .. code-block:: python - _log = logging.getLogger(__name__) CSV_FIELDNAMES = ["Point Name", "Point Value"] @@ -146,11 +184,11 @@ Name" specifying the name of the register, and "Point Value", the current value writer.writerows(CSV_DEFAULT) self.parse_config(registry_config_str) -At the end of the configuration method, the Driver parses the registry configuration. The registry configuration is -a csv which is used to tell the Driver which register the user wishes to communicate with, and includes a few meta-data +At the end of the configuration method, the Driver parses the registry configuration. The registry configuration is +a csv which is used to tell the Driver which register the user wishes to communicate with and includes a few meta-data values about each register, such as whether the register can be written to, if the register value uses a specific -measurement unit, etc. After each register entry is parsed from the registry config, a register is added to the driver's -list of active registers. +measurement unit, etc. After each register entry is parsed from the registry config a register is added to the +driver's list of active registers. .. code-block:: python @@ -192,8 +230,8 @@ list of active registers. self.insert_register(register) -Since the driver's registers will be doing the work of parsing the registers, the interface only needs to select the -correct register to read from or write to, and instruct the register to perform the corresponding unit of work. +Since the driver's registers will be doing the work of parsing the registers the interface only needs to select the +correct register to read from or write to and instruct the register to perform the corresponding unit of work. .. code-block:: python @@ -216,11 +254,13 @@ correct register to read from or write to, and instruct the register to perform result[register.point_name] = register.get_state() return result + Writing the Register class --------------------------- +========================== + The CSV driver's register class is responsible for parsing the CSV, reading the corresponding rows to return the -register's current value and writing updated values into the CSV for the register. On a device which communicates via -a protocol such as Modbus, the same units of work would be done, but using pymodbus to perform the reads and writes. +register's current value and writing updated values into the CSV for the register. On a device which communicates via +a protocol such as Modbus the same units of work would be done, but using pymodbus to perform the reads and writes. Here, Python's CSV library will be used as our "protocol implementation". The Register class determines which file to read based on values passed from the Interface class. @@ -234,8 +274,8 @@ The Register class determines which file to read based on values passed from the description=description) self.csv_path = csv_path -To find its value, the register will read the CSV file, iterate over each row until a row with the point name the same -as the register name, at which point it extracts the point value, and returns it. The register should be written to +To find its value the register will read the CSV file, iterate over each row until a row with the point name the same +as the register name at which point it extracts the point value, and returns it. The register should be written to handle problems which may occur, such as no correspondingly named row being present in the CSV file. .. code-block:: python @@ -256,8 +296,8 @@ handle problems which may occur, such as no correspondingly named row being pres raise RuntimeError("CSV device at {} does not exist".format(self.csv_path)) Likewise to overwrite an existing value, the register will iterate over each row until the point name matches the -register name, saving the output as it goes. When it finds the correct row, it instead saves the output updated with the -new value, then continues on. Finally it writes the output back to the csv. +register name, saving the output as it goes. When it finds the correct row it instead saves the output updated with the +new value then continues on. Finally it writes the output back to the csv. .. code-block:: python @@ -287,13 +327,14 @@ new value, then continues on. Finally it writes the output back to the csv. writer.writerows([dict(row) for row in points]) return self.get_state() -At this point, we should be able to scrape the CSV device using the Master Driver, and set points using the actuator. +At this point we should be able to scrape the CSV device using the Platform Driver and set points using the actuator. Creating Driver Configurations ------------------------------- +============================== + The configuration files for the CSV driver are very simple, but in general, the device configuration should specify -the parameters which the interface requires to communicate with the device, and the registry configuration contains -rows which correspond to registers, and specifies their usage. +the parameters which the interface requires to communicate with the device and the registry configuration contains +rows which correspond to registers and specifies their usage. Here's the driver configuration for the CSV driver: @@ -307,7 +348,10 @@ Here's the driver configuration for the CSV driver: "timezone": "UTC" } -.. Note:: the "driver_type" value must match the name of the driver's python file, as this is what the Master Driver will look for when searching for the correct interface. +.. Note:: + + The "driver_type" value must match the name of the driver's python file as this is what the Platform Driver + will look for when searching for the correct interface. And here's the registry configuration: @@ -321,38 +365,47 @@ And here's the registry configuration: The BACNet and Modbus driver docs and example configurations can be used to compare these configurations to more complex configurations. + Testing your driver -=================== -To test the driver's scrape all functionality, one can install a ListenerAgent and Master Driver with the driver's -configurations, and run them. To do so for the CSV driver using the configurations above: activate the Volttron +******************* +To test the driver's scrape all functionality, one can install a ListenerAgent and Platform Driver with the driver's +configurations, and run them. To do so for the CSV driver using the configurations above: activate the Volttron environment start the platform, tail the platform's log file, then try the following: - | python scripts/install-agent.py -s examples/ListenerAgent - | python scripts/install-agent.py -s services/core/MasterDriverAgent -c - services/core/MasterDriverAgent/master-driver.agent - | vctl config store platform.driver devices///csv_driver - | vctl config store platform.driver +.. code-block:: bash + + python scripts/install-agent.py -s examples/ListenerAgent + python scripts/install-agent.py -s services/core/PlatformDriverAgent -c services/core/PlatformDriverAgent/platform-driver.agent + vctl config store platform.driver devices///csv_driver + vctl config store platform.driver -.. Note:: "vctl config list platform.driver" will list device and registry configurations stored for the master driver and "vctl config delete platform.driver " can be used to remove a configuration entry - these commands are very useful for debugging +.. Note:: -After the Master Driver starts, the driver's output should appear in the logs at regular intervals based on the Master + `vctl config list platform.driver` will list device and registry configurations stored for the platform driver and + `vctl config delete platform.driver ` can be used to remove a configuration entry - + these commands are very useful for debugging + +After the Platform Driver starts the driver's output should appear in the logs at regular intervals based on the Master Driver's configuration. + Here is some sample CSV driver output: - | 2019-11-15 10:32:00,010 (listeneragent-3.3 22996) listener.agent INFO: Peer: pubsub, Sender: platform.driver:, Bus: - | , Topic: devices/pnnl/isb1/csv_driver/all, Headers: {'Date': '2019-11-15T18:32:00.001360+00:00', 'TimeStamp': - | '2019-11-15T18:32:00.001360+00:00', 'SynchronizedTimeStamp': '2019-11-15T18:32:00.000000+00:00', - | 'min_compatible_version': '3.0', 'max_compatible_version': ''}, Message: - | [{'test1': '0', 'test2': '1', 'test3': 'testpoint'}, - | {'test1': {'type': 'integer', 'tz': 'UTC', 'units': None}, - | 'test2': {'type': 'integer', 'tz': 'UTC', 'units': None}, - | 'test3': {'type': 'integer', 'tz': 'UTC', 'units': None}}] +.. code-block:: console + + 2019-11-15 10:32:00,010 (listeneragent-3.3 22996) listener.agent INFO: Peer: pubsub, Sender: platform.driver:, Bus: + , Topic: devices/pnnl/isb1/csv_driver/all, Headers: {'Date': '2019-11-15T18:32:00.001360+00:00', 'TimeStamp': + '2019-11-15T18:32:00.001360+00:00', 'SynchronizedTimeStamp': '2019-11-15T18:32:00.000000+00:00', + 'min_compatible_version': '3.0', 'max_compatible_version': ''}, Message: + [{'test1': '0', 'test2': '1', 'test3': 'testpoint'}, + {'test1': {'type': 'integer', 'tz': 'UTC', 'units': None}, + 'test2': {'type': 'integer', 'tz': 'UTC', 'units': None}, + 'test3': {'type': 'integer', 'tz': 'UTC', 'units': None}}] This output is an indication of the basic scrape all functionality working in the Interface class - in our implementation this is also an indication of the basic functionality of the Interface class "get_point" method and Register class "get_state" methods working (although edge cases should still be tested!). -To test the Interface's "set_point" method and Register's "set_state" method, we'll need to use the Actuator agent. +To test the Interface's "set_point" method and Register's "set_state" method we'll need to use the Actuator agent. The following agent code can be used to alternate a point's value on a schedule using the actuator, as well as perform an action based on a pubsub subscription to a single point: diff --git a/docs/source/developing-volttron/development-environment/fork-repository.rst b/docs/source/developing-volttron/development-environment/fork-repository.rst new file mode 100644 index 0000000000..01bd869b4f --- /dev/null +++ b/docs/source/developing-volttron/development-environment/fork-repository.rst @@ -0,0 +1,82 @@ +.. _Fork-Repository: + +====================== +Forking the Repository +====================== + +The first step to editing the repository is to fork it into your own user space. Creating a fork makes a copy of the +repository in your GitHub for you to make any changes you may require for your use-case. This allows you to make +changes without impacting the core VOLTTRON repository. + +Forking is done by pointing your favorite web browser to http://github.com/VOLTTRON/volttron and then clicking "Fork" on +the upper right of the screen. (Note: You must have a GitHub account to fork the repository. If you don't have one, we +encourage you to `sign up `_.) + +.. note:: + + After making changes to your repository, you may wish to contribute your changes back to the Core VOLTTRON + repository. Instructions for contributing code may be found :ref:`here `. + + +Cloning 'YOUR' VOLTTRON forked repository +========================================= + +The next step in the process is to copy your forked repository onto your computer to work on. This will create an +identical copy of the GitHub repository on your local machine. To do this you need to know the address of your +repository. The URL to your repository address will be ``https://github.com//volttron.git``. From a +terminal execute the following commands: + +.. code-block:: bash + + # Here, we are assuming you are doing develop work in a folder called `git`. If you'd rather use something else, that's OK. + mkdir -p ~/git + cd ~/git + git clone -b develop https://github.com//volttron.git + cd volttron + +.. note:: + + VOLTTRON uses develop as its main development branch rather than the standard `main` branch (the default). + + +Adding and Committing files +=========================== + +Now that you have your repository cloned, it's time to start doing some modifications. Using a simple text editor +you can create or modify any file in the volttron directory. After making a modification or creating a file +it is time to move it to the stage for review before committing to the local repository. For this example let's assume +we have made a change to `README.md` in the root of the volttron directory and added a new file called `foo.py`. To get +those files in the staging area (preparing for committing to the local repository) we would execute the following +commands: + +.. code-block:: bash + + git add foo.py + git add README.md + + # Alternatively in one command + git add foo.py README.md + +After adding the files to the stage you can review the staged files by executing: + +.. code-block:: bash + + git status + +Finally, in order to commit to the local repository we need to think of what change we actually did and be able to +document it. We do that with a commit message (the -m parameter) such as the following. + +.. code-block:: bash + + git commit -m "Added new foo.py and updated copyright of README.md" + + +Pushing to the remote repository +================================ + +The next step is to share our changes with the world through GitHub. We can do this by pushing the commits +from your local repository out to your GitHub repository. This is done by the following command: + +.. code-block:: bash + + git push diff --git a/docs/source/developing-volttron/development-environment/index.rst b/docs/source/developing-volttron/development-environment/index.rst new file mode 100644 index 0000000000..3af938d6ee --- /dev/null +++ b/docs/source/developing-volttron/development-environment/index.rst @@ -0,0 +1,14 @@ +.. _Development-Environment-Setup: + +==================================== +Setting Up a Development Environment +==================================== + +An example development environment used by the VOLTTRON team would consist of a Linux VM running on the host development +machine on which an IDE would be running. The guides can be used to set up a development environment. + +.. toctree:: + + fork-repository + virtual_machine/install-vm + pycharm/index diff --git a/docs/source/devguides/files/00_open_pycharm.png b/docs/source/developing-volttron/development-environment/pycharm/files/00_open_pycharm.png similarity index 100% rename from docs/source/devguides/files/00_open_pycharm.png rename to docs/source/developing-volttron/development-environment/pycharm/files/00_open_pycharm.png diff --git a/docs/source/devguides/files/01_load_volttron.png b/docs/source/developing-volttron/development-environment/pycharm/files/01_load_volttron.png similarity index 100% rename from docs/source/devguides/files/01_load_volttron.png rename to docs/source/developing-volttron/development-environment/pycharm/files/01_load_volttron.png diff --git a/docs/source/devguides/files/02_set_project_interpreter.png b/docs/source/developing-volttron/development-environment/pycharm/files/02_set_project_interpreter.png similarity index 100% rename from docs/source/devguides/files/02_set_project_interpreter.png rename to docs/source/developing-volttron/development-environment/pycharm/files/02_set_project_interpreter.png diff --git a/docs/source/devguides/files/03_run_settings.png b/docs/source/developing-volttron/development-environment/pycharm/files/03_run_settings.png similarity index 100% rename from docs/source/devguides/files/03_run_settings.png rename to docs/source/developing-volttron/development-environment/pycharm/files/03_run_settings.png diff --git a/docs/source/devguides/files/04_listener_settings.png b/docs/source/developing-volttron/development-environment/pycharm/files/04_listener_settings.png similarity index 100% rename from docs/source/devguides/files/04_listener_settings.png rename to docs/source/developing-volttron/development-environment/pycharm/files/04_listener_settings.png diff --git a/docs/source/devguides/files/05_run_listener.png b/docs/source/developing-volttron/development-environment/pycharm/files/05_run_listener.png similarity index 100% rename from docs/source/devguides/files/05_run_listener.png rename to docs/source/developing-volttron/development-environment/pycharm/files/05_run_listener.png diff --git a/docs/source/devguides/files/06_run_tests.png b/docs/source/developing-volttron/development-environment/pycharm/files/06_run_tests.png similarity index 100% rename from docs/source/devguides/files/06_run_tests.png rename to docs/source/developing-volttron/development-environment/pycharm/files/06_run_tests.png diff --git a/docs/source/devguides/pycharm/files/07_run_forwarder.png b/docs/source/developing-volttron/development-environment/pycharm/files/07_run_forwarder.png similarity index 100% rename from docs/source/devguides/pycharm/files/07_run_forwarder.png rename to docs/source/developing-volttron/development-environment/pycharm/files/07_run_forwarder.png diff --git a/docs/source/devguides/pycharm/files/08_run_historian.png b/docs/source/developing-volttron/development-environment/pycharm/files/08_run_historian.png similarity index 100% rename from docs/source/devguides/pycharm/files/08_run_historian.png rename to docs/source/developing-volttron/development-environment/pycharm/files/08_run_historian.png diff --git a/docs/source/devguides/pycharm/index.rst b/docs/source/developing-volttron/development-environment/pycharm/index.rst similarity index 99% rename from docs/source/devguides/pycharm/index.rst rename to docs/source/developing-volttron/development-environment/pycharm/index.rst index 1cae39d848..c2044d04d0 100644 --- a/docs/source/devguides/pycharm/index.rst +++ b/docs/source/developing-volttron/development-environment/pycharm/index.rst @@ -1,5 +1,6 @@ .. _Pycharm-Dev-Environment: +=============================== Pycharm Development Environment =============================== diff --git a/docs/source/images/VOLLTRON_Logo_Black_Horizontal_with_Tagline.png b/docs/source/developing-volttron/development-environment/virtual_machine/files/VOLLTRON_Logo_Black_Horizontal_with_Tagline.png similarity index 100% rename from docs/source/images/VOLLTRON_Logo_Black_Horizontal_with_Tagline.png rename to docs/source/developing-volttron/development-environment/virtual_machine/files/VOLLTRON_Logo_Black_Horizontal_with_Tagline.png diff --git a/docs/source/files/VOLTTRON_User_Guide.pdf b/docs/source/developing-volttron/development-environment/virtual_machine/files/VOLTTRON_User_Guide.pdf similarity index 100% rename from docs/source/files/VOLTTRON_User_Guide.pdf rename to docs/source/developing-volttron/development-environment/virtual_machine/files/VOLTTRON_User_Guide.pdf diff --git a/docs/source/images/add-chart.png b/docs/source/developing-volttron/development-environment/virtual_machine/files/add-chart.png similarity index 100% rename from docs/source/images/add-chart.png rename to docs/source/developing-volttron/development-environment/virtual_machine/files/add-chart.png diff --git a/docs/source/devguides/eclipse/files/eclipse-marketplace.png b/docs/source/developing-volttron/development-environment/virtual_machine/files/eclipse-marketplace.png similarity index 100% rename from docs/source/devguides/eclipse/files/eclipse-marketplace.png rename to docs/source/developing-volttron/development-environment/virtual_machine/files/eclipse-marketplace.png diff --git a/docs/source/images/edit-chart.png b/docs/source/developing-volttron/development-environment/virtual_machine/files/edit-chart.png similarity index 100% rename from docs/source/images/edit-chart.png rename to docs/source/developing-volttron/development-environment/virtual_machine/files/edit-chart.png diff --git a/docs/source/images/example_market.png b/docs/source/developing-volttron/development-environment/virtual_machine/files/example_market.png similarity index 100% rename from docs/source/images/example_market.png rename to docs/source/developing-volttron/development-environment/virtual_machine/files/example_market.png diff --git a/docs/source/devguides/eclipse/files/finish-import.png b/docs/source/developing-volttron/development-environment/virtual_machine/files/finish-import.png similarity index 100% rename from docs/source/devguides/eclipse/files/finish-import.png rename to docs/source/developing-volttron/development-environment/virtual_machine/files/finish-import.png diff --git a/docs/source/devguides/eclipse/files/import-project.png b/docs/source/developing-volttron/development-environment/virtual_machine/files/import-project.png similarity index 100% rename from docs/source/devguides/eclipse/files/import-project.png rename to docs/source/developing-volttron/development-environment/virtual_machine/files/import-project.png diff --git a/docs/source/images/install-volttron-restricted.png b/docs/source/developing-volttron/development-environment/virtual_machine/files/install-volttron-restricted.png similarity index 100% rename from docs/source/images/install-volttron-restricted.png rename to docs/source/developing-volttron/development-environment/virtual_machine/files/install-volttron-restricted.png diff --git a/docs/source/images/linux-mint.png b/docs/source/developing-volttron/development-environment/virtual_machine/files/linux-mint.png similarity index 100% rename from docs/source/images/linux-mint.png rename to docs/source/developing-volttron/development-environment/virtual_machine/files/linux-mint.png diff --git a/docs/source/images/login-screen.png b/docs/source/developing-volttron/development-environment/virtual_machine/files/login-screen.png similarity index 100% rename from docs/source/images/login-screen.png rename to docs/source/developing-volttron/development-environment/virtual_machine/files/login-screen.png diff --git a/docs/source/devguides/eclipse/files/new-python-run.png b/docs/source/developing-volttron/development-environment/virtual_machine/files/new-python-run.png similarity index 100% rename from docs/source/devguides/eclipse/files/new-python-run.png rename to docs/source/developing-volttron/development-environment/virtual_machine/files/new-python-run.png diff --git a/docs/source/images/overview.png b/docs/source/developing-volttron/development-environment/virtual_machine/files/overview.png similarity index 100% rename from docs/source/images/overview.png rename to docs/source/developing-volttron/development-environment/virtual_machine/files/overview.png diff --git a/docs/source/files/pin-to-dashboard.png b/docs/source/developing-volttron/development-environment/virtual_machine/files/pin-to-dashboard.png similarity index 100% rename from docs/source/files/pin-to-dashboard.png rename to docs/source/developing-volttron/development-environment/virtual_machine/files/pin-to-dashboard.png diff --git a/docs/source/images/platform-default.png b/docs/source/developing-volttron/development-environment/virtual_machine/files/platform-default.png similarity index 100% rename from docs/source/images/platform-default.png rename to docs/source/developing-volttron/development-environment/virtual_machine/files/platform-default.png diff --git a/docs/source/devguides/eclipse/files/platform-run-config.png b/docs/source/developing-volttron/development-environment/virtual_machine/files/platform-run-config.png similarity index 100% rename from docs/source/devguides/eclipse/files/platform-run-config.png rename to docs/source/developing-volttron/development-environment/virtual_machine/files/platform-run-config.png diff --git a/docs/source/images/platforms.png b/docs/source/developing-volttron/development-environment/virtual_machine/files/platforms.png similarity index 100% rename from docs/source/images/platforms.png rename to docs/source/developing-volttron/development-environment/virtual_machine/files/platforms.png diff --git a/docs/source/devguides/eclipse/files/pydev-python.png b/docs/source/developing-volttron/development-environment/virtual_machine/files/pydev-python.png similarity index 100% rename from docs/source/devguides/eclipse/files/pydev-python.png rename to docs/source/developing-volttron/development-environment/virtual_machine/files/pydev-python.png diff --git a/docs/source/files/register-new-platform-authorization.png b/docs/source/developing-volttron/development-environment/virtual_machine/files/register-new-platform-authorization.png similarity index 100% rename from docs/source/files/register-new-platform-authorization.png rename to docs/source/developing-volttron/development-environment/virtual_machine/files/register-new-platform-authorization.png diff --git a/docs/source/images/register-new-platform.png b/docs/source/developing-volttron/development-environment/virtual_machine/files/register-new-platform.png similarity index 100% rename from docs/source/images/register-new-platform.png rename to docs/source/developing-volttron/development-environment/virtual_machine/files/register-new-platform.png diff --git a/docs/source/images/select-branch.png b/docs/source/developing-volttron/development-environment/virtual_machine/files/select-branch.png similarity index 100% rename from docs/source/images/select-branch.png rename to docs/source/developing-volttron/development-environment/virtual_machine/files/select-branch.png diff --git a/docs/source/devguides/eclipse/files/select-path.png b/docs/source/developing-volttron/development-environment/virtual_machine/files/select-path.png similarity index 100% rename from docs/source/devguides/eclipse/files/select-path.png rename to docs/source/developing-volttron/development-environment/virtual_machine/files/select-path.png diff --git a/docs/source/devguides/eclipse/files/select-repo.png b/docs/source/developing-volttron/development-environment/virtual_machine/files/select-repo.png similarity index 100% rename from docs/source/devguides/eclipse/files/select-repo.png rename to docs/source/developing-volttron/development-environment/virtual_machine/files/select-repo.png diff --git a/docs/source/devguides/eclipse/files/set-as-pydev.png b/docs/source/developing-volttron/development-environment/virtual_machine/files/set-as-pydev.png similarity index 100% rename from docs/source/devguides/eclipse/files/set-as-pydev.png rename to docs/source/developing-volttron/development-environment/virtual_machine/files/set-as-pydev.png diff --git a/docs/source/devguides/eclipse/files/setup-python.png b/docs/source/developing-volttron/development-environment/virtual_machine/files/setup-python.png similarity index 100% rename from docs/source/devguides/eclipse/files/setup-python.png rename to docs/source/developing-volttron/development-environment/virtual_machine/files/setup-python.png diff --git a/docs/source/images/transport-payload.png b/docs/source/developing-volttron/development-environment/virtual_machine/files/transport-payload.png similarity index 100% rename from docs/source/images/transport-payload.png rename to docs/source/developing-volttron/development-environment/virtual_machine/files/transport-payload.png diff --git a/docs/source/images/vbox-bidirectional.png b/docs/source/developing-volttron/development-environment/virtual_machine/files/vbox-bidirectional.png similarity index 100% rename from docs/source/images/vbox-bidirectional.png rename to docs/source/developing-volttron/development-environment/virtual_machine/files/vbox-bidirectional.png diff --git a/docs/source/images/vbox-controller.png b/docs/source/developing-volttron/development-environment/virtual_machine/files/vbox-controller.png similarity index 100% rename from docs/source/images/vbox-controller.png rename to docs/source/developing-volttron/development-environment/virtual_machine/files/vbox-controller.png diff --git a/docs/source/images/vbox-credentials.png b/docs/source/developing-volttron/development-environment/virtual_machine/files/vbox-credentials.png similarity index 100% rename from docs/source/images/vbox-credentials.png rename to docs/source/developing-volttron/development-environment/virtual_machine/files/vbox-credentials.png diff --git a/docs/source/images/vbox-download.png b/docs/source/developing-volttron/development-environment/virtual_machine/files/vbox-download.png similarity index 100% rename from docs/source/images/vbox-download.png rename to docs/source/developing-volttron/development-environment/virtual_machine/files/vbox-download.png diff --git a/docs/source/images/vbox-hard-disk-xfce.png b/docs/source/developing-volttron/development-environment/virtual_machine/files/vbox-hard-disk-xfce.png similarity index 100% rename from docs/source/images/vbox-hard-disk-xfce.png rename to docs/source/developing-volttron/development-environment/virtual_machine/files/vbox-hard-disk-xfce.png diff --git a/docs/source/images/vbox-memory-size.png b/docs/source/developing-volttron/development-environment/virtual_machine/files/vbox-memory-size.png similarity index 100% rename from docs/source/images/vbox-memory-size.png rename to docs/source/developing-volttron/development-environment/virtual_machine/files/vbox-memory-size.png diff --git a/docs/source/images/vbox-naming.png b/docs/source/developing-volttron/development-environment/virtual_machine/files/vbox-naming.png similarity index 100% rename from docs/source/images/vbox-naming.png rename to docs/source/developing-volttron/development-environment/virtual_machine/files/vbox-naming.png diff --git a/docs/source/images/vbox-proc-settings.png b/docs/source/developing-volttron/development-environment/virtual_machine/files/vbox-proc-settings.png similarity index 100% rename from docs/source/images/vbox-proc-settings.png rename to docs/source/developing-volttron/development-environment/virtual_machine/files/vbox-proc-settings.png diff --git a/docs/source/images/vc-run-demo.png b/docs/source/developing-volttron/development-environment/virtual_machine/files/vc-run-demo.png similarity index 100% rename from docs/source/images/vc-run-demo.png rename to docs/source/developing-volttron/development-environment/virtual_machine/files/vc-run-demo.png diff --git a/docs/source/devguides/eclipse/files/volttron-console.png b/docs/source/developing-volttron/development-environment/virtual_machine/files/volttron-console.png similarity index 100% rename from docs/source/devguides/eclipse/files/volttron-console.png rename to docs/source/developing-volttron/development-environment/virtual_machine/files/volttron-console.png diff --git a/docs/source/images/volttron-main-args.png b/docs/source/developing-volttron/development-environment/virtual_machine/files/volttron-main-args.png similarity index 100% rename from docs/source/images/volttron-main-args.png rename to docs/source/developing-volttron/development-environment/virtual_machine/files/volttron-main-args.png diff --git a/docs/source/devguides/eclipse/files/volttron-main.png b/docs/source/developing-volttron/development-environment/virtual_machine/files/volttron-main.png similarity index 100% rename from docs/source/devguides/eclipse/files/volttron-main.png rename to docs/source/developing-volttron/development-environment/virtual_machine/files/volttron-main.png diff --git a/docs/source/images/volttron-webimage.jpg b/docs/source/developing-volttron/development-environment/virtual_machine/files/volttron-webimage.jpg similarity index 100% rename from docs/source/images/volttron-webimage.jpg rename to docs/source/developing-volttron/development-environment/virtual_machine/files/volttron-webimage.jpg diff --git a/docs/source/setup/install-vm.rst b/docs/source/developing-volttron/development-environment/virtual_machine/install-vm.rst similarity index 76% rename from docs/source/setup/install-vm.rst rename to docs/source/developing-volttron/development-environment/virtual_machine/install-vm.rst index b8c2c6d002..23f844ea21 100644 --- a/docs/source/setup/install-vm.rst +++ b/docs/source/developing-volttron/development-environment/virtual_machine/install-vm.rst @@ -1,8 +1,8 @@ -.. _install-vm: +.. _Install-VM: -================================ -Installing Linux Virtual Machine -================================ +================================== +Installing a Linux Virtual Machine +================================== VOLTTRON requires a Linux system to run. For Windows users this will require a virtual machine (VM). @@ -12,7 +12,7 @@ https://www.virtualbox.org/wiki/Downloads. |VirtualBox Download| -.. |VirtualBox Download| image:: images/vbox-download.png +.. |VirtualBox Download| image:: files/vbox-download.png After installing VirtualBox download a virtual box appliance from https://www.osboxes.org/linux-mint/ extract the VDI from the downlaoded archive, **or** download a system installation disk. VOLTTRON version 7.0.x has been tested @@ -27,37 +27,37 @@ as an example, however platform setup in Ubuntu should be identical. Adding a VDI Image to VirtualBox Environment --------------------------------------------- +******************************************** |Linux Mint| -.. |Linux Mint| image:: images/linux-mint.png +.. |Linux Mint| image:: files/linux-mint.png The below info holds the VM's preset username and password. |Linux Mint Credentials| -.. |Linux Mint Credentials| image:: images/vbox-credentials.png +.. |Linux Mint Credentials| image:: files/vbox-credentials.png Create a new VirtualBox Image. |VirtualBox VM Naming| -.. |VirtualBox VM Naming| image:: images/vbox-naming.png +.. |VirtualBox VM Naming| image:: files/vbox-naming.png Select the amount of RAM for the VM. The recommended minimum is shown in the image below: |VirtualBox Memory Size Selection| -.. |VirtualBox Memory Size Selection| image:: images/vbox-memory-size.png +.. |VirtualBox Memory Size Selection| image:: files/vbox-memory-size.png Specify the hard drive image using the extracted VDI file. |VirtualBox Hard Disk| -.. |VirtualBox Hard Disk| image:: images/vbox-hard-disk-xfce.png +.. |VirtualBox Hard Disk| image:: files/vbox-hard-disk-xfce.png With the newly created VM selected, choose Machine from the VirtualBox menu in the top left corner of the VirtualBox window; from the drop down menu, choose Settings. @@ -67,7 +67,7 @@ Drag’n’Drop as Bidirectional. |VirtualBox Bidirectional| -.. |VirtualBox Bidirectional| image:: images/vbox-bidirectional.png +.. |VirtualBox Bidirectional| image:: files/vbox-bidirectional.png .. note:: Currently, this feature only works under certain circumstances (e.g. copying / pasting text). @@ -76,7 +76,7 @@ Go to System Settings. In the processor tab, set the number of processors to two |VirtualBox Processors| -.. |VirtualBox Processors| image:: images/vbox-proc-settings.png +.. |VirtualBox Processors| image:: files/vbox-proc-settings.png Go to Storage Settings. Confirm that the Linux Mint VDI is attached to Controller: SATA. @@ -87,9 +87,7 @@ Go to Storage Settings. Confirm that the Linux Mint VDI is attached to Controlle |VirtualBox Controller| -.. |VirtualBox Controller| image:: images/vbox-controller.png +.. |VirtualBox Controller| image:: files/vbox-controller.png Start the machine by saving these changes and clicking the “Start” arrow located on the upper left hand corner of the main VirtualBox window. - - diff --git a/docs/source/devguides/supporting/files/jupyter_notebooks.jpg b/docs/source/developing-volttron/jupyter/files/jupyter_notebooks.jpg similarity index 100% rename from docs/source/devguides/supporting/files/jupyter_notebooks.jpg rename to docs/source/developing-volttron/jupyter/files/jupyter_notebooks.jpg diff --git a/docs/source/developing-volttron/jupyter/jupyter-notebooks.rst b/docs/source/developing-volttron/jupyter/jupyter-notebooks.rst new file mode 100644 index 0000000000..5dae9152d3 --- /dev/null +++ b/docs/source/developing-volttron/jupyter/jupyter-notebooks.rst @@ -0,0 +1,234 @@ +.. _Jupyter-Notebooks: + +================= +Jupyter Notebooks +================= + +Jupyter is an open-source web application that lets you create and share “notebook” documents. A notebook displays +formatted text along with live code that can be executed from the browser, displaying the execution output and +preserving it in the document. Notebooks that execute Python code used to be called `iPython Notebooks`. The iPython +Notebook project has now merged into Project Jupyter. + + +Using Jupyter to Manage a Set of VOLTTRON Servers +------------------------------------------------- + +The following Jupyter notebooks for VOLTTRON have been provided as examples: + + - **Collector notebooks**. Each Collector notebook sets up a particular type of device driver + and forwards device data to another VOLTTRON instance, the Aggregator. + + - **SimulationCollector notebook**. This notebook sets up a group of Simulation device drivers + and forwards device data to another VOLTTRON instance, the Aggregator. + - **BacnetCollector notebook**. This notebook sets up a Bacnet (or Bacnet gateway) device driver + and forwards device data to another VOLTTRON instance, the Aggregator. + - **ChargePointCollector notebook**. This notebook sets up a ChargePoint device driver + and forwards device data to another VOLTTRON instance, the Aggregator. + - **SEP2Collector notebook**. This notebook sets up a SEP2.0 (IEEE 2030.5) device driver + and forwards device data to another VOLTTRON instance, the Aggregator. + The Smart Energy Profile 2.0 ("SEP2") protocol implements IEEE 2030.5, and is capable + of connecting a wide array of smart energy devices to the Smart Grid. The standard is + designed to run over TCP/IP and is physical layer agnostic. + + - **Aggregator notebook**. This notebook sets up and executes aggregation of forwarded data + from other VOLTTRON instances, using a historian to record the data. + - **Observer notebook**. This notebook sets up and executes a DataPuller that captures data from + another VOLTTRON instance, using a Historian to record the data. It also uses the + Message Debugger agent to monitor messages flowing across the VOLTTRON bus. + +Each notebook configures and runs a set of VOLTTRON Agents. When used as a set they implement a +multiple-VOLTTRON-instance architecture that captures remote device data, aggregates it, and reports on it, routing the +data as follows: + +.. image:: files/jupyter_notebooks.jpg + + +Install VOLTTRON and Jupyter on a Server +---------------------------------------- + +The remainder of this guide describes how to set up a host for VOLTTRON and Jupyter. Use this setup process on a server +in order to prepare it to run Jupyter notebook for VOLTTRON. + + +Set Up the Server and Install VOLTTRON +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +The following is a complete, but terse, description of the steps for installing and running VOLTTRON on a server. For +more detailed, general instructions, see :ref:`Installing Volttron `. + +The VOLTTRON server should run on the same host as the Jupyter server. + +* Load third-party software: + +.. code-block:: bash + + $ sudo apt-get update + $ sudo apt-get install build-essential python-dev openssl libssl-dev libevent-dev git + $ sudo apt-get install sqlite3 + +* Clone the VOLTTRON repository from github: + +.. code-block:: bash + + $ cd ~ + $ mkdir repos + $ cd repos + $ git clone https://github.com/VOLTTRON/volttron/ + +* Check out the develop (or master) branch and bootstrap the development environment: + +.. code-block:: bash + + $ cd volttron + $ git checkout develop + $ python bootstrap.py + +* Activate and initialize the VOLTTRON virtual environment: + + Run the following each time you open a new command-line shell on the server: + + .. code-block:: bash + + $ export VOLTTRON_ROOT=~/repos/volttron + $ export VOLTTRON_HOME=~/.volttron + $ cd $VOLTTRON_ROOT + $ source env/bin/activate + + +Install Extra Libraries +^^^^^^^^^^^^^^^^^^^^^^^ + +* Add Python libraries to the VOLTTRON virtual environment: + +These notebooks use third-party software that's not included in VOLTTRON's standard distribution that was loaded by +`bootstrap.py`. The following additional packages are required: + +- Jupyter +- SQLAlchemy (for the Message Debugger) +- Suds (for the ChargePoint driver, if applicable) +- Numpy and MatPlotLib (for plotted output) + +.. Note:: + + A Jupyter installation also installs and/or upgrades many dependent libraries. Doing so could disrupt other work on + the OS, so it’s safest to load Jupyter (and any other library code) in a virtual environment. VOLTTRON runs in a + virtual environment during normal operation, so if you're using Jupyter in conjunction with VOLTTRON, it should be + installed in your VOLTTRON virtual environment (In other words, be sure to use `cd $VOLTTRON_ROOT` and + `source env/bin/activate` to activate the virtual environment before running ``pip install``.) + +* Install the third-party software: + +.. code-block:: bash + + $ pip install SQLAlchemy==1.1.4 + $ pip install suds-jurko==0.6 + $ pip install numpy + $ pip install matplotlib + $ pip install jupyter + +.. Note:: + + If `pip install` fails due to an untrusted cert, try using this command instead: + + .. code-block:: bash + + $ pip install --trusted-host pypi.python.org + + An InsecurePlatformWarning may be displayed, but it typically won't stop the installation from proceeding. + + +Configure VOLTTRON +------------------ + +Use the `vcfg` wizard to configure the VOLTTRON instance. By default, the wizard configures a VOLTTRON instance that +communicates with agents only on the local host (ip 127.0.0.1). This set of notebooks manages communications among +multiple VOLTTRON instances on different hosts. To enable this cross-host communication on VOLTTRON's web server, +replace 127.0.0.1 with the host's IP address, as follows: + +.. code-block:: bash + + $ vcfg + +Accept all defaults, except as follows: + +- If a prompt defaults to 127.0.0.1 as an IP address, substitute the `host's IP address` (this may happen multiple + times). +- When asked whether this is a volttron central, answer `Y`. +- When prompted for a username and password, use `admin` and `admin`. + + +Start VOLTTRON +-------------- + +Start the main VOLTTRON process, logging to $VOLTTRON_ROOT/volttron.log: + +.. code-block:: bash + + $ volttron -vv -l volttron.log --msgdebug + +This runs VOLTTRON as a foreground process. To run it in the background, use: + +.. code-block:: + + $ ./start-volttron --msgdebug + +This also enables the Message Debugger, a non-production VOLTTRON debugging aid that's used by some notebooks. To run +with the Message Debugger disabled (VOLTTRON's normal state), omit the ``--msgdebug`` flag. + +Now that VOLTTRON is running, it's ready for agent configuration and execution. Each Jupyter notebook contains detailed +instructions and executable code for doing that. + + +Configure Jupyter +----------------- + +More detailed information about installing, configuring and using Jupyter Notebooks is available on the Project Jupyter +site, http://jupyter.org/. + +* Create a Jupyter configuration file: + +.. code-block:: bash + + $ jupyter notebook --generate-config + +* Revise the Jupyter configuration: + +Open `~/.jupyter/jupyter_notebook_config.py` in your favorite text editor. Change the configuration to accept +connections from any IP address (not just from localhost) and use a specific, non-default port number: + +- Un-comment ``c.NotebookApp.ip`` and set it to: ``*`` instead of ``localhost`` +- Un-comment ``c.NotebookApp.port`` and set it to: ``8891`` instead of ``8888`` + +Save the config file. + +* Open ports for TCP connections: + +Make sure that your Jupyter server host's security rules allow inbound TCP connections on port `8891`. + +If the VOLTTRON instance needs to receive TCP requests, for example ForwardHistorian or DataPuller messages from other +VOLTTRON instances, make sure that the host's security rules also allow inbound TCP communications on VOLTTRON's port, +which is usually `22916`. + + +Launch Jupyter +-------------- + +* Start the Jupyter server: + +In a separate command-line shell, set up VOLTTRON's environment variables and virtual environment, and then launch the +Jupyter server: + +.. code-block:: bash + + $ export VOLTTRON_HOME=(your volttron home directory, e.g. ~/.volttron) + $ export VOLTTRON_ROOT=(where volttron was installed; e.g. ~/repos/volttron) + $ cd $VOLTTRON_ROOT + $ source env/bin/activate + $ cd examples/JupyterNotebooks + $ jupyter notebook --no-browser + +* Open a Jupyter client in a web browser: + +Look up the host's IP address (e.g., using ifconfig). Open a web browser and navigate to the URL that was displayed when +you started jupyter, replacing `localhost` with that IP address. A Jupyter web page should display, listing your +notebooks. diff --git a/docs/source/developing-volttron/python-for-matlab-users.rst b/docs/source/developing-volttron/python-for-matlab-users.rst new file mode 100644 index 0000000000..289c874be2 --- /dev/null +++ b/docs/source/developing-volttron/python-for-matlab-users.rst @@ -0,0 +1,194 @@ +.. _Python-for-Matlab-Users: + +======================= +Python for Matlab Users +======================= + +Matlab is a popular proprietary programming language and tool suite with built in support for matrix operations and +graphically plotting computation results. The purpose of this document is to introduce Python to those already familiar +Matlab so it will be easier for them to develop tools and agents in VOLTTRON. + + +A Simple Function +----------------- + +Python and Matlab are similar in many respects, syntactically and semantically. With the addition of the NumPy library +in Python, almost all numerical operations in Matlab can be emulated or directly translated. Here are functions in each +language that perform the same operation: + +.. code-block:: matlab + + % Matlab + function [result] = times_two(number) + result = number * 2; + end + +.. code-block:: python + + # Python + def times_two(number): + result = number * 2 + return result + +Some notes about the previous functions: + +#. Values are explicitly returned with the `return` statement. It is possible to return multiple values, as in Matlab, + but doing this without a good reason can lead to overcomplicated functions. + +#. Semicolons are not used to end statements in python, and white space is significant. After a block is started (if, + for, while, functions, classes) subsequent lines should be indented with four spaces. The block ends when the + programmer stops adding the extra level of indentation. + + +Translating +----------- + +The following may be helpful if you already have a Matlab file or function that will be translated into Python. Many of +the syntax differences between Matlab and Python can be rectified with your text editor's find and replace feature. + +Start by copying all of your Matlab code into a new file with a `.py` extension. It is recommended to start by +commenting everything out and uncommenting the Matlab code in chunks. This way it is possible to write valid Python and +verify it as you translate, instead of waiting till the whole file is "translated". Editors designed to work with +Python should be able to highlight syntax errors as well. + +#. Comments are created with a `%`. Find and replace these with `#`. + +.. code-block:: python + + def test_function(): + # single line Python comment + """ + Multi-line Python comment + """ + pass # inline Python comment + +#. Change `elseif` blocks to `elif` blocks. + +.. code-block:: python + + if thing == 0: + do_thing1() + elif thing ==1: + do_thing2() + else: + do_the_last_thing() + +#. Python indexes start at zero instead of one. Array slices and range operations don't include the upper bound, so + only the lower bound should decrease by one. The following examples are of Python code in the console: + +.. code-block:: console + + >>> test_array = [0, 1, 2, 3, 4] + >>> test_array[0] + 0 + >>> test_array[1] + 1 + >>> test_array[0:2] + [0, 1] + >>>>>> test_array[:2] + [0, 1] + >>> test_array[2:] + [2, 3, 4] + >>> + +#. Semicolons in Matlab are used to suppress output at the end of lines and for organizing array literals. After + arranging the arrays into nested lists, all semicolons can be removed. + +#. The `end` keyword in Matlab is used both to access the last element in an array and to close blocks. The array use + case can be replaced with `-1` and the others can be removed entirely. + +.. code-block:: console + + >>> test_array = [0, 1, 2, 3, 4] + >>> test_array[-1] + 4 + >>> + + +A More Concrete Example +^^^^^^^^^^^^^^^^^^^^^^^ + +In the `Building Economic Dispatch `_ project, a sibling project to VOLTTRON, +a number of components written in Matlab would create a matrix out of some collection of columns and perform least +squares regression using the `matrix division` operator. This is straightforward and very similar in both languages +assuming that all of the columns are defined and are the same length. + +.. code-block:: matlab + + % Matlab + XX = [U, xbp, xbp2, xbp3, xbp4, xbp5]; + AA = XX \ ybp; + +.. code-block:: python + + # Python + import numpy as np + + XX = np.column_stack((U, xbp, xbp2, xbp3, xbp4, xbp5)) + AA, resid, rank, s = np.linalg.lstsq(XX, ybp) + +This pattern also included the creation of the `U` column, a column of ones used as the bias term in the linear equation +. In order to make the Python version more readable and more robust, the pattern was removed from each component and +replaced with a single function call to `least_squares_regression`. + +This function does some validation on the input parameters, automatically creates the bias column, and returns the least +squares solution to the system. Now if we want to change how the solution is calculated we only have to change the one +function, instead of each instance where the pattern was written originally. + +.. code-block:: python + + def least_squares_regression(inputs=None, output=None): + if inputs is None: + raise ValueError("At least one input column is required") + if output is None: + raise ValueError("Output column is required") + + if type(inputs) != tuple: + inputs = (inputs,) + + ones = np.ones(len(inputs[0])) + x_columns = np.column_stack((ones,) + inputs) + + solution, resid, rank, s = np.linalg.lstsq(x_columns, output) + return solution + +Lessons Learned (sometimes the hard way) +---------------------------------------- + + +Variable Names +^^^^^^^^^^^^^^ + +Use descriptive function and variable names whenever possible. The most important things to consider here are reader +comprehension and searching. Consider a variable called `hdr`. Is it `header` without any vowels, or is it short for +`high-dynamic-range`? Spelling out full words in variable names can save someone else a lot of guesswork. + +Searching comes in when we're looking for instances of a string or variable. Single letter variable names are +impossible to search for. Variables names describing the value being stored in a concise but descriptive manner are +preferred. + + +Matlab load/save +^^^^^^^^^^^^^^^^ + +Matlab has built-in functions to automatically save and load variables from your programs to disk. Using these +functions can lead to poor program design and should be avoided if possible. It would be best to refactor as you +translate if they are being used. Few operations are so expensive that that cannot be redone every time the program is +run. For part of the program that saves variables, consider making a function that simply returns them instead. + +If your Matlab program is loading csv files then use the Pandas library when working in python. Pandas works well with +NumPy and is the go-to library when using csv files that contain numeric data. + + +More Resources +-------------- + +`NumPy for Matlab Users +`_ +Has a nice list of common operations in Matlab and NumPy. + +`NumPy Homepage +`_ + +`Pandas Homepage +`_ diff --git a/docs/source/devguides/agent_development/Agent-Development-Cheatsheet.rst b/docs/source/devguides/agent_development/Agent-Development-Cheatsheet.rst deleted file mode 100644 index 70411168cf..0000000000 --- a/docs/source/devguides/agent_development/Agent-Development-Cheatsheet.rst +++ /dev/null @@ -1,211 +0,0 @@ -.. _Agent-Development-Cheatsheet: - -Agent Development Cheat Sheet -============================= - -This is a catalogue of features available in volttron -that are frequently useful in agent development. - - -Utilities ---------- -These functions can be found in the *volttron.platform.agent.utils* module. -*logging* also needs to be imported to use the logger. - -setup_logging -~~~~~~~~~~~~~ -You'll probably see the following lines near the top of agent files: - -.. code-block:: python - - utils.setup_logging() - _log = logging.getLogger(__name__) - -This code sets up the logger for this module so it can provide more useful -output. In most cases it will be better to use the logger in lieu of simply -printing messages with print. - -load_config -~~~~~~~~~~~ -load_config does just that. Give it the path to your config file and it will -parse the json and return a dictionary. - -vip_main -~~~~~~~~ -This is the function that is called to start your agent. You'll likely -see it in the main methods at the bottom of agents' files. Whatever is -passed to it (a class name or a function that returns an instance of -your agent) should accept a file path that can be parsed with load_config. - - -Core Agent Functionality ------------------------- -These tools volttron.platform.vip.agent module. -Try importing - - -Agent Lifecycle Events -~~~~~~~~~~~~~~~~~~~~~~ -Each agent has four events that are triggered at different stages -of its life. These are onsetup, onstart, onstop, and onfinish. Registering -callbacks to these events are commonplace in agent development, with onstart -being the most frequently used. - -The easiest way to register a callback is with a function decorator: - -.. code-block:: python - - @Core.receiver('onstart') - def function(self, sender, **kwargs): - function_body - -Periodic and Scheduled Function Calls -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -Functions and agent methods can be registered to be called periodically or scheduled -to run at a particular time using the Core.schedule decorator or by calling an agent's -core.schedule() method. The latter is especially useful if, for example, a decision -needs to be made in an agent's onstart method as to whether a call should be scheduled. - -.. code-block:: python - - from volttron.platform.scheduling import cron, periodic - - @Core.schedule(t) - def function(self): - ... - - @Core.schedule(periodic(t)) - def periodic_function(self): - ... - - @Core.schedule(cron('0 1 * * *')) - def cron_function(self): - ... - -or - -.. code-block:: python - - # inside some agent method - self.core.schedule(t, function) - self.core.schedule(periodic(t), periodic_function) - self.core.schedule(cron('0 1 * * *'), cron_function) - - -Subsystem ----------- -These features are available to all Agent subclasses. No extra imports are required. - -Remote Procedure Calls -~~~~~~~~~~~~~~~~~~~~~~ -Remote Procedure Calls, or RPCs are a powerful way to interact with other agents. -To make a function available to call by a remote agent just add the export decorator: - -.. code-block:: python - - @RPC.export - def function(self, ...): - function_body - -*function* can now be called by a remote agent *agent* with - -.. code-block:: python - - # vip identity is the identity (a string) of the agent - # where function() is defined - agent.vip.rpc.call(vip, 'function').get(timeout=t) - -Pubsub -~~~~~~ -Agents can publish and subscribe to topics. Like RPC, pubsub functions can be invoked -via decorators or inline through vip. The following function is called whenever -the agent sees a message starting with *topic_prefix*. - -.. code-block:: python - - @PubSub.subscribe('pubsub', topic_prefix) - def function(self, peer, sender, bus, topic, headers, message): - function_body - -An agent can publish to a topic *topic* with the *self.vip.pubsub.publish* method. - -An agent can remove a subscriptions with *self.vip.pubsub.unsubscribe*. Giving None as values -for the prefix and callback argument will unsubscribe from everything on that bus. This -is handy for subscriptions that must be updated base on a configuration setting. - -Configuration Store -~~~~~~~~~~~~~~~~~~~ - -Support for the configuration store is done by subscribing to configuration changes -with *self.vip.config.subscribe*. - -.. code-block:: python - - self.vip.config.subscribe(self.configure_main, actions=["NEW", "UPDATE"], pattern="config") - -See :doc:`Agent Configuration Store ` - -Heartbeat -~~~~~~~~~ -The heartbeat subsystem provides access to a periodic publish so that others -can observe the agent's status. Other agents can subscibe to the -*heartbeat* topic to see who is actively publishing to it. - -It it turned off by default. - -Health -~~~~~~ -The health subsystem adds extra status information to the an agent's heartbeat. -Setting the status will start the heartbeat if it wasn't already. - - -Agent Skeleton --------------- - -.. code-block:: python - - import logging - - from volttron.platform.vip.agent import Agent, Core, PubSub, RPC - from volttron.platform.agent import utils - - utils.setup_logging() - _log = logging.getLogger(__name__) - - - class MyAgent(Agent): - def __init__(self, config_path, **kwargs): - self.config = utils.load_config(config_path) - - @Core.receiver('onsetup') - def onsetup(self, sender, **kwargs): - pass - - @Core.receiver('onstart') - def onstart(self, sender, **kwargs): - self.vip.heartbeat.start() - - @Core.receiver('onstop') - def onstop(self, sender, **kwargs): - pass - - @Core.receiver('onfinish') - def onfinish(self, sender, **kwargs): - pass - - @PubSub.subscribe('pubsub', 'some/topic') - def on_match(self, peer, sender, bus, topic, headers, message): - pass - - @RPC.export - def my_method(self): - pass - - def main(): - utils.vip_main(MyAgent) - - if __name__ == '__main__': - try: - main() - except KeyboardInterrupt: - pass diff --git a/docs/source/devguides/agent_development/Agent-Development.rst b/docs/source/devguides/agent_development/Agent-Development.rst deleted file mode 100644 index e3bc8cf52d..0000000000 --- a/docs/source/devguides/agent_development/Agent-Development.rst +++ /dev/null @@ -1,422 +0,0 @@ -.. _Agent-Development: - -Agent Creation Walkthrough --------------------------- - -The VOLTTRON platform now has utilities to speed the creation and installation -of new agents. To use these utilities the VOLTTRON environment must be activated. - -From the project directory, activate the VOLTTRON environment with: - -``. env/bin/activate`` - -Create Agent Code -~~~~~~~~~~~~~~~~~ - -Run the following command to start the Agent Creation Wizard: - -``vpkg init TestAgent tester`` - -`TestAgent` is the directory that the agent code will be placed in. The directory must -not exist when the command is run. - -`tester` is the name of the agent module created by wizard. - -The Wizard will prompt for the following information: - -:: - - Agent version number: [0.1]: 0.5 - Agent author: []: VOLTTRON Team - Author's email address: []: volttron@pnnl.gov - Agent homepage: []: https://volttron.org/ - Short description of the agent: []: Agent development tutorial. - -Once the last question is answered the following will print to the console: - -:: - - 2018-08-02 12:20:56,604 () volttron.platform.packaging INFO: Creating TestAgent - 2018-08-02 12:20:56,604 () volttron.platform.packaging INFO: Creating TestAgent/tester - 2018-08-02 12:20:56,604 () volttron.platform.packaging INFO: Creating TestAgent/setup.py - 2018-08-02 12:20:56,604 () volttron.platform.packaging INFO: Creating TestAgent/config - 2018-08-02 12:20:56,604 () volttron.platform.packaging INFO: Creating TestAgent/tester/agent.py - 2018-08-02 12:20:56,604 () volttron.platform.packaging INFO: Creating TestAgent/tester/__init__.py - -The TestAgent directory is created with the new Agent inside. - -Agent Directory -~~~~~~~~~~~~~~~ - -At this point, the contents of the TestAgent directory should look like: - -:: - - TestAgent/ - ├── setup.py - ├── config - └── tester - ├── agent.py - └── __init__.py - - -Examine the Agent Code -~~~~~~~~~~~~~~~~~~~~~~ - -The resulting code is well documented with comments and documentation strings. It -gives examples of how to do common tasks in VOLTTRON Agents. - -The main agent code is found in `tester/agent.py` - -Here we will cover the highlights. - -Parse Packaged Configuration and Create Agent Instance -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -The code to parse a configuration file packaged and installed with the agent -is found in the `tester` function: - -:: - - def tester(config_path, **kwargs): - """Parses the Agent configuration and returns an instance of - the agent created using that configuration. - - :param config_path: Path to a configuration file. - - :type config_path: str - :returns: Tester - :rtype: Tester - """ - try: - config = utils.load_config(config_path) - except StandardError: - config = {} - - if not config: - _log.info("Using Agent defaults for starting configuration.") - - setting1 = int(config.get('setting1', 1)) - setting2 = config.get('setting2', "some/random/topic") - - return Tester(setting1, - setting2, - **kwargs) - -The configuration is parsed with the `utils.load_config` function and the results -are stored in the `config` variable. - -An instance of the Agent is created from the parsed values and is returned. - -Initialization and Configuration Store Support -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -The configuration store is a powerful feature introduced in VOLTTRON 4. -The agent template provides a simple example of setting up default configuration -store values and setting up a configuration handler. - -:: - - class Tester(Agent): - """ - Document agent constructor here. - """ - - def __init__(self, setting1=1, setting2="some/random/topic", - **kwargs): - super(Tester, self).__init__(**kwargs) - _log.debug("vip_identity: " + self.core.identity) - - self.setting1 = setting1 - self.setting2 = setting2 - - self.default_config = {"setting1": setting1, - "setting2": setting2} - - - #Set a default configuration to ensure that self.configure is called immediately to setup - #the agent. - self.vip.config.set_default("config", self.default_config) - #Hook self.configure up to changes to the configuration file "config". - self.vip.config.subscribe(self.configure, actions=["NEW", "UPDATE"], pattern="config") - - def configure(self, config_name, action, contents): - """ - Called after the Agent has connected to the message bus. If a configuration exists at startup - this will be called before onstart. - - Is called every time the configuration in the store changes. - """ - config = self.default_config.copy() - config.update(contents) - - _log.debug("Configuring Agent") - - try: - setting1 = int(config["setting1"]) - setting2 = str(config["setting2"]) - except ValueError as e: - _log.error("ERROR PROCESSING CONFIGURATION: {}".format(e)) - return - - self.setting1 = setting1 - self.setting2 = setting2 - - self._create_subscriptions(self.setting2) - -Values in the default config can be built into the agent or come from the -packaged configuration file. The subscribe method tells our agent which function -to call whenever there is a new or updated config file. For more information -on using the configuration store see :doc:`Agent Configuration Store `. - -`_create_subscriptions` (covered in the next section) will use the value in `self.setting2` -to create a new subscription. - -Setting up a Subscription -^^^^^^^^^^^^^^^^^^^^^^^^^ - -The Agent creates a subscription using the value of `self.setting2` in the method -`_create_subscription`. The messages for this subscription are handled with -the `_handle_publish` method: - -:: - - def _create_subscriptions(self, topic): - #Unsubscribe from everything. - self.vip.pubsub.unsubscribe("pubsub", None, None) - - self.vip.pubsub.subscribe(peer='pubsub', - prefix=topic, - callback=self._handle_publish) - - def _handle_publish(self, peer, sender, bus, topic, headers, - message): - #By default no action is taken. - pass - -Agent Lifecycle Events -^^^^^^^^^^^^^^^^^^^^^^ - -Methods may be setup to be called at agent startup and shutdown: - -:: - - @Core.receiver("onstart") - def onstart(self, sender, **kwargs): - """ - This method is called once the Agent has successfully connected to the platform. - This is a good place to setup subscriptions if they are not dynamic or to - do any other startup activities that require a connection to the message bus. - Called after any configurations methods that are called at startup. - - Usually not needed if using the configuration store. - """ - #Example publish to pubsub - #self.vip.pubsub.publish('pubsub', "some/random/topic", message="HI!") - - #Exmaple RPC call - #self.vip.rpc.call("some_agent", "some_method", arg1, arg2) - - @Core.receiver("onstop") - def onstop(self, sender, **kwargs): - """ - This method is called when the Agent is about to shutdown, but before it disconnects from - the message bus. - """ - pass - -As the comment mentions, with the new configuration store feature `onstart` methods -are mostly unneeded. However this code does include an example of how to do a Remote -Procedure Call to another agent. - -Agent Remote Procedure Calls -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -An agent may receive commands from other agents via a Remote Procedure Call (RPC). -This is done with the `@RPC.export` decorator: - -:: - - @RPC.export - def rpc_method(self, arg1, arg2, kwarg1=None, kwarg2=None): - """ - RPC method - - May be called from another agent via self.core.rpc.call """ - return self.setting1 + arg1 - arg2 - - -Packaging Configuration -~~~~~~~~~~~~~~~~~~~~~~~ - -The wizard will automatically create a `setup.py` file. This file sets up the -name, version, required packages, method to execute, etc. for the agent based on -your answers to the wizard. The packaging process will also use this -information to name the resulting file. - -:: - - from setuptools import setup, find_packages - - MAIN_MODULE = 'agent' - - # Find the agent package that contains the main module - packages = find_packages('.') - agent_package = 'tester' - - # Find the version number from the main module - agent_module = agent_package + '.' + MAIN_MODULE - _temp = __import__(agent_module, globals(), locals(), ['__version__'], -1) - __version__ = _temp.__version__ - - # Setup - setup( - name=agent_package + 'agent', - version=__version__, - author_email="volttron@pnnl.gov", - url="https://volttron.org/", - description="Agent development tutorial.", - author="VOLTTRON Team", - install_requires=['volttron'], - packages=packages, - entry_points={ - 'setuptools.installation': [ - 'eggsecutable = ' + agent_module + ':main', - ] - } - ) - -Launch Configuration -~~~~~~~~~~~~~~~~~~~~ - -In TestAgent, the wizard will automatically create a JSON file called "config". -It contains configuration information for the agent. This file contains -examples of every datatype supported by the configuration system: - -:: - - { - # VOLTTRON config files are JSON with support for python style comments. - "setting1": 2, #Integers - "setting2": "some/random/topic2", #Strings - "setting3": true, #Booleans: remember that in JSON true and false are not capitalized. - "setting4": false, - "setting5": 5.1, #Floating point numbers. - "setting6": [1,2,3,4], #Lists - "setting7": {"setting7a": "a", "setting7b": "b"} #Objects - } - - - - -Packaging and Installing the Agent -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -To install the agent the platform must be running. Start the platform with the command: - -``./start-volttron`` - -.. note:: If you are not in an activated environment, this script will start - the platform running in the background in the correct environment. However - the environment will not be activated for you; you must activate it yourself. - -Now we must install it into the platform. Use the following command to install it and add a tag for easily referring to -the agent. From the project directory, run the following command: - -``python scripts/install-agent.py -s TestAgent/ -c TestAgent/config -t testagent`` - -To verify it has been installed, use the following command: -``vctl list`` - -This will result in output similar to the following: - -.. code-block:: bash - - AGENT IDENTITY TAG PRI - df testeragent-0.5 testeragent-0.5_1 testagent - -The initial number or letter is a unique portion of the full UUID for the agent. AGENT is -the "name" of the agent based on the contents of its class name and the version in its setup.py. IDENTITY is the -agent's identity in the platform. This is automatically assigned based on class name and instance number. This agent's -ID is _1 because it is the first instance. TAG is the name we assigned in the command above. PRI is the priority for -agents which have been "enabled" using the ``vctl enable`` command. - -When using lifecycle commands on agents, they can be referred to by the UUID (default) or AGENT (name) or TAG. - - -Testing the Agent -~~~~~~~~~~~~~~~~~ - -From the Command Line -^^^^^^^^^^^^^^^^^^^^^ - -To test the agent, we will start the platform (if not already running), launch the agent, and -check the log file. - -With the VOLTTRON environment activated, start the platform by running (if needed): - -``./start-volttron`` - -You can launch the agent in three ways, all of which you can find by using the -``vctl list`` command: - -- By using the : - -``vctl start `` - -- By name: - -``vctl start --name testeragent-0.1`` - -- By tag: - -``vctl start --tag testagent`` - -Check that it is :ref:`running `: - -``vctl status`` - -- Start the ListenerAgent as in :ref:`Building VOLTTRON `. -- Check the log file for messages indicating the TestAgent is receiving - the ListenerAgents messages: - -Automated Test cases and documentation -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -Before contributing a new agent to the VOLTTRON source code repository, please consider adding two other essential -elements. - -1. Integration and unit test cases -2. README file that includes details of pre-requisite software, agent setup details (such as setting up databases, - permissions, etc.)and sample configuration - -VOLTTRON uses py.test as a framework for executing tests. All unit tests should be based on py.test framework. -py.test is not installed with the distribution by default. To install py.test and it's dependencies execute the -following: - -.. code-block:: bash - - python bootstrap.py --testing - -.. note:: - - There are other options for different agent requirements. To see all of the options use: - - .. code-block:: bash - - python bootstrap.py --help - - in the Extra Package Options section. - -To run a single test module, use the command - -.. code-block:: bash - - pytest - -To run all of the tests in the volttron repository execute the following in the -root directory using an activated command prompt: - -.. code-block:: bash - - ./ci-integration/run-tests.sh diff --git a/docs/source/devguides/agent_development/Develop-Driver-Agent.rst b/docs/source/devguides/agent_development/Develop-Driver-Agent.rst deleted file mode 100644 index 24da1aa0b5..0000000000 --- a/docs/source/devguides/agent_development/Develop-Driver-Agent.rst +++ /dev/null @@ -1,260 +0,0 @@ -.. _Develop-Driver-Agent: - -Driver Development -================== - -Introduction ------------- - -All Voltton drivers are implemented through the :doc:`Master Driver -Agent <../../core_services/drivers/Driver-Configuration>` and are technically sub-agents running in -the same process as the :doc:`Master Driver -Agent <../../core_services/drivers/Driver-Configuration>`. -Each of these driver sub-agents is responsible for creating an interface -to a single device. Creating that interface is facilitated by an -instance of an interface class. Currently there are two interface -classes included: :ref:`Modbus ` and -:ref:`BACnet `. - -Existing Drivers ----------------- - -In the directory for the Master Driver Agent you'll see a directory -called interfaces: - -:: - - ├── master_driver - │   ├── agent.py - │   ├── driver.py - │   ├── __init__.py - │   ├── interfaces - │   │   ├── __init__.py - │   │   ├── bacnet.py - │   │   └── modbus.py - │   └── socket_lock.py - ├── master-driver.agent - └── setup.py - -The files bacnet.py and modbus.py implement the interface class for each -respective protocol. (The BACnet interface is mostly just a pass-though -to the :ref:`BACnet Proxy Agent `, but the Modbus -interface is self contained.) - -Looking at those two files is a good introduction into how they work. - -The file name is used when configuring a driver to determine which -interface to use. The name of the interface class in the file must be -called Interface. - -.. note:: - - Developing a new driver does not require that your code live with the - MasterDriverAgent code. You may create the interface file anywhere that - you would like and then create a symbolic link to the interface file - in the interfaces directory. When the MasterDriverAgent is packed for - distribution the a copy of the file represented by the symbolic link - is packed into the agent wheel. - See :ref:`Using Third Party Drivers ` - -Interface Basics ----------------- - -A complete interface consists of two parts: One or more register classes -and the interface class. - -Register Class -~~~~~~~~~~~~~~ - -The Base Interface class uses a Register class to describe the registers -of a device to the driver sub-agent. This class is commonly sub-classed -to store protocol specific information for the interface class to use. -For example, the BACnet interface uses a sub-classed base register to -store the instance number, object type, and property name of the point -on the device represented by the register class. The Modbus interface -uses several different Register classes to deal with the different types -of registers on Modbus devices and their different needs. - -The register class contains the following attributes: - -- **read\_only** - True or False -- **register\_type** - "bit" or "byte", used by the driver sub-agent to - help deduce some meta data about the point. -- **point\_name** - Name of the point on the device. Used by the base - interface for reference. -- **units** - units of the value, meta data for the driver -- **description** - meta data for the driver -- **python\_type** - python type of the point, used to produce meta - data. This must be set explicitly otherwise it default to int. - -Here is an example of a Registry Class for the BACnet driver: - -:: - - class Register(BaseRegister): - def __init__(self, instance_number, object_type, property_name, read_only, pointName, units, description = ''): - super(Register, self).__init__("byte", read_only, pointName, units, description = '') - self.instance_number = int(instance_number) - self.object_type = object_type - self.property = property_name - -Note that this implementation is incomplete. It does not properly set -the register\_type or python\_type. - -Interface Class -~~~~~~~~~~~~~~~ - -The Interface Class is what is instantiated by the driver sub-agent to -do it's work. - -configure(self, config\_dict, registry\_config\_str) -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -This method must be implemented by an Interface implementation. - -- **config\_dict** is a dictionary of key values pairs from the - configuration file's "driver\_config" section. -- **registry\_config\_str** is the contents of the "registry\_config" - entry in the driver configuration file. It is up to the Interface - class to parse this file according to the needs of the driver. - -Here is an example taken from the :ref:`BACnet ` driver: - -:: - - def configure(self, config_dict, registry_config_str): - self.parse_config(registry_config_str) #Parse the configuration string. - self.target_address = config_dict["device_address"] - self.proxy_address = config_dict.get("proxy_address", "platform.bacnet_proxy") - self.ping_target(self.target_address) #Establish routing to the device if needed. - -And here is the parse\_config method (See :ref:`BACnet Registry -Configuration `: - -:: - - def parse_config(self, config_string): - if config_string is None: - return - - f = StringIO(config_string) #Python's CSV file parser wants a file like object. - - configDict = DictReader(f) #Parse the CVS file contents. - - for regDef in configDict: - #Skip lines that have no address yet. - if not regDef['Point Name']: - continue - - io_type = regDef['BACnet Object Type'] - read_only = regDef['Writable'].lower() != 'true' - point_name = regDef['Volttron Point Name'] - index = int(regDef['Index']) - description = regDef['Notes'] - units = regDef['Units'] - property_name = regDef['Property'] - - register = Register(index, - io_type, - property_name, - read_only, - point_name, - units, - description = description) - - self.insert_register(register) - -Once a register is created it must be added with the insert\_register -method. - -get\_point(self, point\_name) -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -This method must be implemented by an Interface implementation. - -Gets the value of a point from a device and returns it. - -Here is a simple example from the BACnet driver. In this case it only -has to pass the work on to the BACnet Proxy Agent for handling. - -:: - - def get_point(self, point_name): - register = self.get_register_by_name(point_name) - point_map = {point_name:[register.object_type, - register.instance_number, - register.property]} - result = self.vip.rpc.call(self.proxy_address, 'read_properties', - self.target_address, point_map).get() - return result[point_name] - -Failure should be indicated by a useful exception being raised. (In this -case the we just leave the Exception raised by the BACnet proxy -un-handled. This could be improved with better handling when register -that does not exist is requested.) - -The Register instance for the point can be retrieved with -self.get\_register\_by\_name(point\_name) - -set\_point(self, point\_name, value) -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -This method must be implemented by an Interface implementation. - -Sets the value of a point on a device and ideally returns the actual -value set if different. - -Here is a simple example from the BACnet driver. In this case it only -has to pass the work on to the BACnet Proxy Agent for handling. - -:: - - def set_point(self, point_name, value): - register = self.get_register_by_name(point_name) - if register.read_only: - raise IOError("Trying to write to a point configured read only: "+point_name) - args = [self.target_address, value, - register.object_type, - register.instance_number, - register.property] - result = self.vip.rpc.call(self.proxy_address, 'write_property', *args).get() - return result - -Failure to raise a useful exception being raised. (In this case the we -just leave the Exception raised by the BACnet proxy un-handled unless -the point is read only.) - -scrape\_all(self) -^^^^^^^^^^^^^^^^^ - -This method must be implemented by an Interface implementation. - -This must return a dictionary mapping point names to values for ALL -registers. - -Here is a simple example from the BACnet driver. In this case it only -has to pass the work on to the BACnet Proxy Agent for handling. - -:: - - def scrape_all(self): - point_map = {} - read_registers = self.get_registers_by_type("byte", True) - write_registers = self.get_registers_by_type("byte", False) - for register in read_registers + write_registers: - point_map[register.point_name] = [register.object_type, - register.instance_number, - register.property] - - result = self.vip.rpc.call(self.proxy_address, 'read_properties', - self.target_address, point_map).get() - return result - -self.get\_registers\_by\_type allows you to get lists of registers by -their type and if they are read only. (As BACnet currently only uses -"byte", "bit" is ignored.) As the procedure for handling all the -different types in BACnet is the same we can bundle them all up into a -single request from the proxy. - -In the Modbus protocol the distinction is important and so each category -must be handled differently. diff --git a/docs/source/devguides/agent_development/Developing-Historian-Agents.rst b/docs/source/devguides/agent_development/Developing-Historian-Agents.rst deleted file mode 100644 index 178c645eff..0000000000 --- a/docs/source/devguides/agent_development/Developing-Historian-Agents.rst +++ /dev/null @@ -1,112 +0,0 @@ -.. _Developing-Historian-Agents: - -Developing Historian Agents -=========================== - -VOLTTRON provides a convenient base class for developing new historian -agents. The base class automatically subscribes to all pertinent topics, -cache published data to disk until it is successfully recorded to a -historian, create the public facing interface for querying results, and -spells out a simple interface for concrete implementation to meet to -make a working Historian Agent. The VOLTTRON provides support for -several :ref:`historians ` without modification. -Please use one of these if it fits your project criteria, otherwise -continue reading. - -The base class also breaks data to publish into reasonably sized chunks -before handing it off to the concrete implementation for publication. -The size of the chunk is configurable. - -The base class sets up a separate thread for publication. This way if -publication code needs to block for a long period of time (up to 10s of -seconds) this will no disrupt the collection of data from the bus or the -functioning of the agent itself. - -BaseHistorian -------------- - -All Historians must inherit from the BaseHistorian class in -volttron.platform.agent.base\_historian and implement the following -methods: - -publish\_to\_historian(self, to\_publish\_list) -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -This method is called by the BaseHistorian class when it has received -data from the message bus to be published. to\_publish\_list is a list -of records to publish in the form - -:: - - [ - { - '_id': 1, - 'timestamp': timstamp, - 'source': 'scrape', - 'topic': 'campus/building/unit/point', - 'value': 90, - 'meta': {'units':'F'} - } - { - ... - } - ] - -- \_id - ID of record. All IDs in the list are unique. This is used for - internal record tracking. -- timestamp - Python datetime object of the time data was published at - timezone UTC -- source - Source of the data. Can be scrape, analysis, log, or - actuator. -- topic - Topic data was published on. Prefix's such as "device" are - dropped. -- value - Value of the data. Can be any type. -- meta - Metadata for the value. Some sources will omit this entirely. - -For each item in the list the concrete implementation should attempt to -publish (or discard if non-publishable) every item in the list. -Publication should be batched if possible. For every successfully -published record and every record that is to be discarded because it is -non-publishable the agent must call report\_handled on those records. -Records that should be published but were not for whatever reason -require no action. Future calls to publish\_to\_historian will include -these unpublished records. publish\_to\_historian is always called with -the oldest unhandled records. This allows the historian to no lose data -due to lost connections or other problems. - -As a convenience report\_all\_handled can be called if all of the items -in published\_list were successfully handled. - -query\_topic\_list(self) -~~~~~~~~~~~~~~~~~~~~~~~~ - -Must return a list of all unique topics published. - -query\_historian(self, topic, start=None, end=None, skip=0, count=None, order=None) -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -This function must return the results of a query in the form: - -:: - - {"values": [(timestamp1: value1), (timestamp2: value2), ...], - "metadata": {"key1": value1, "key2": value2, ...}} - -metadata is not required (The caller will normalize this to {} for you -if you leave it out) - -- topic - the topic the user is querying for. -- start - datetime of the start of the query. None for the beginning of - time. -- end - datetime of the end of of the query. None for the end of time. -- skip - skip this number of results (for pagination) -- count - return at maximum this number of results (for pagination) -- order - "FIRST\_TO\_LAST" for ascending time stamps, - "LAST\_TO\_FIRST" for descending time stamps. - -historian\_setup(self) -~~~~~~~~~~~~~~~~~~~~~~ - -Implementing this is optional. This function is run on the same thread -as the rest of the concrete implementation at startup. It is meant for -connection setup. diff --git a/docs/source/devguides/agent_development/Eclipse.rst b/docs/source/devguides/agent_development/Eclipse.rst deleted file mode 100644 index cb383bdda4..0000000000 --- a/docs/source/devguides/agent_development/Eclipse.rst +++ /dev/null @@ -1,701 +0,0 @@ -.. _Eclipse: - -Agent Development in Eclipse -============================ - -The Eclipse IDE (integrated development environment), while not required for agent development, can be a powerful developmental tool. Download the IDE from the following links. Choose a download mirror closest to your location. - -`For 32-bit machines `_ - -`For 64-bit machines `_ - -To go to the main Eclipse webpage, go to `http://eclipse.org/ `_ - -Installing Eclipse ------------------- - -To install Eclipse, enter the following commands in a terminal: - -1. Install Eclipse dependency: - -:: - - # apt-get install openjdk-7-jdk - -2. After downloading the eclipse archive file, move the package to the opt directory (enter this command from a terminal in the directory where eclipse was downloaded): - -:: - - $ tar -xvf eclipse-java-mars-R-linux-gtk-x86_64.tar.gz - # mv eclipse /opt/ - -- For 32-bit machines, replace “gtk-x86_64” with “linux-gtk” in the previous command. - -3. Create desktop shortcut: - -:: - - # touch /usr/share/applications/eclipse.desktop - # nano /usr/share/applications/eclipse.desktop - -Enter the following text, as shown in Figure 1, and save the file. To avoid typos, copy and paste the following: - -:: - - [Desktop Entry] - Name=Eclipse - Type=Application - Exec=/opt/eclipse/eclipse - Terminal=false - Icon=/opt/eclipse/icon.xpm - Comment=Integrated Development Environment - NoDisplay=false - Categories=Development;IDE - Name[en]=eclipse - -.. image:: files/1-eclipse-desktop.jpg - -Figure 1. Eclipse Desktop File - -4. Copy the shortcut to the desktop: - -:: - - $ cp /usr/share/applications/eclipse.desktop ~/Desktop/ - -Eclipse is now installed and ready to use. - -Installing Pydev and EGit Eclipse Plug-ins ------------------------------------------- -The transactional network code is stored in a Git repository. A plug-in is available for Eclipse that makes development more convenient (note: you must have Git installed on the system and have built the project). - -1. Select Help. Select Install New Software (Figure 2). - -.. image:: files/2-egit-plugin.jpg - -Figure 2. Installing Eclipse EGit Plugin - -2. Click the Add button (Figure 3). - -.. image:: files/3-egit-plugin.jpg - -Figure 3. Installing Eclipse EGit Plugin (continued) - -3. As shown in Figure 4, enter the following information: - - For name use: EGit - - For location: http://download.eclipse.org/egit/updates - -.. image:: files/4-egit-plugin.jpg - -Figure 4. Installing Eclipse Egit Plugin (continued) - -4. After clicking OK, check the Select All button. -5. Click through Next > Agree to Terms > Finish. Allow Eclipse to restart. - -6. After installing Eclipse, you must add the PyDev plug-in to the environment. - -In Eclipse: - -- Select Help and select Install New Software. -- Click the Add button. -- As shown in Figure 5, enter the following information: - - For name use: PyDev - - For location: http://pydev.org/updates - - Click OK. - -.. image:: files/5-install-eclipse-pydev-plugin.jpg - -Figure 5. Installing Eclipse PyDev Plugin - -7. Check the box for PyDev. -8. Click through Next > Agree to Terms > Finish. Allow Eclipse to restart. - -Checkout VOLTTRON Project -------------------------- -VOLTTRON can be imported into Eclipse from an existing VOLTTRON project (VOLTTRON was previously checked out from GitHub) or a new download from GitHub. - -Import VOLTTRON into Eclipse from an Existing Local Repository (Previously Downloaded VOLTTRON Project) -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -To import an existing VOLTTRON project into Eclipse, complete the following steps: - -1. Select File and select Import (Figure 6). - -.. image:: files/6-check-volttron-with-eclipse.jpg - -Figure 6. Checking VOLTTRON with Eclipse from Local Source - -2. Select Git. Select Projects from Git. Click the Next button (Figure 7). - -.. image:: files/7-check-volttron-with-eclipse.jpg - -Figure 7. Checking VOLTTRON with Eclipse from Local Source (continued) - -3. Select Existing local repository and click the Next button (Figure 8). - -.. image:: files/8-check-volttron-with-eclipse.jpg - -Figure 8. Checking VOLTTRON with Eclipse from Local Source (continued) - -4. Select Add (Figure 9). - -.. image:: files/9-check-volttron-with-eclipse.jpg - -Figure 9. Checking VOLTTRON with Eclipse from Local Source (continued) - -5. Select Browse. Navigate to the top-level base VOLTTRON directory. Select OK (Figure 10). - -.. image:: files/10-check-volttron-with-eclipse.jpg - -Figure 10. Checking Out VOLTTRON with Eclipse from Local Source (continued) - -6. Click Finish (Figure 11). - -.. image:: files/11-check-volttron-with-eclipse.jpg - -Figure 11. Checking Out VOLTTRON with Eclipse from Local Source (continued) - -7. Click Next (Figure 12). - -.. image:: files/12-check-volttron-with-eclipse.jpg - -Figure 12. Checking Out VOLTTRON with Eclipse from Local Source (continued) - -8. Select Import as general project. Click Next. Click Finish (Figure 13). The project will be imported into the workspace. - -.. image:: files/13-check-volttron-with-eclipse.jpg - -Figure 13. Checking Out VOLTTRON with Eclipse from Local Source (continued) - -Import New VOLTTRON Project from GitHub -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -To import a new VOLTTRON project directly from GitHub into Eclipse, complete the following steps: - -1. Select File and select Import (Figure 14). - -.. image:: files/14-check-volttron-from-github.jpg - -Figure 14. Checking Out VOLTTRON with Eclipse from GitHub - -2. Select Git. Select Projects from Git. Click the Next button (Figure 15). - -.. image:: files/15-check-volttron-from-github.jpg - -Figure 15. Checking Out VOLTTRON with Eclipse from GitHub (continued) - -3. Select Clone URI and select Next (Figure 16). - -.. image:: files/16-check-volttron-from-github.jpg - -Figure 16. Checking Out VOLTTRON with Eclipse GitHub (continued) - -4. Fill in https://github.com/VOLTTRON/volttron.git for the URI. If you have a GitHub account, enter a username and password in the User and Password sections. This is not required but will allow you to receive notifications from GitHub for VOLTTRON related news. (Figure 17) - -.. image:: files/17-check-volttron-from-github.jpg - -Figure 17. Checking Out VOLTTRON with Eclipse from GitHub (continued) - -5. Select the master branch (Figure 18). - -.. image:: files/18-check-volttron-from-github.jpg - -Figure 18. Checking Out VOLTTRON with Eclipse from GitHub (continued) - -6. Select a location to save the local repository (Figure 19). - -.. image:: files/19-check-volttron-from-github.jpg - -Figure 19. Checking Out VOLTTRON with Eclipse from GitHub (continued) - -7. Select Import as general project. Select Next. Select Finish (Figure 20). The project will now be imported into the workspace. - -.. image:: files/20-check-volttron-from-github.jpg - -Figure 20. Checking Out VOLTTRON with Eclipse from GitHub (continued) - -If the VOLTTRON project has not been built (/bootstrap.py file has not been run), proceed to ##Section 2.4 Building the VOLTTRON Platform## and follow the instruction for running the bootstrap.py script before proceeding to the following sections. - -Linking Eclipses ----------------- -PyDev must now be configured to use the Python interpreter packaged with VOLTTRON. - -1. Select Window and select Preferences. -2. Expand the PyDev tree. -3. Select Interpreters and select Python interpreter. -4. Select New (Figure 21). - -.. image:: files/21-configuring-pydev.jpg - -Figure 21. Configuring PyDev - -5. Select Browse and navigate to the pydev-python file located at (``/scripts/pydev-python``) (Figure 22). - -6. Select OK (Figure 22). - -.. image:: files/22-configuring-pydev.jpg - -Figure 22. Configuring PyDev (continued) - -7. Select All and uncheck the VOLTTRON base directory (Figure 23). - -.. image:: files/23-configuring-pydev.jpg - -Figure 23. Configuring PyDev (continued) - -8. In the Project/PackageExplorer view on the left, right-click on the project, PyDev, and set as PyDev Project (Figure 24). - -.. image:: files/24-setting-pydev-project.jpg - -Figure 24. Setting as PyDev Project - -9. Switch to the PyDev perspective: Select Window. Select Perspective. Select Open Perspective. Select Other. Select PyDev (Figure 25). Eclipse should now be configured to use the project's environment. - -.. image:: files/25-setting-pydev-perspective.jpg - -Figure 25. Setting PyDev Perspective in Eclipse - -Running the VOLTTRON Platform and Agents ----------------------------------------- - -VOLTTRON and agents within VOLTTRON can now be run within Eclipse. This section will describe the process to run VOLTTRON and an agent within Eclipse. - -Setup a Run Configuration for the Platform -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -The following steps describe the process for running VOLTTRON within Eclipse: - -1. Select Run and select Run Configurations (Figure 26). - -.. image:: files/26-running-volttron.jpg - -Figure 26. Running VOLTTRON Platform, Setting Up a Run Configuration - -2. Select Python Run from the menu on left. Click the New launch configuration button (Figure 27). - -.. image:: files/27-running-volttron.jpg - -Figure 27. Running VOLTTRON Platform, Setting Up a Run Configuration (continued) - -3. Change the name (any name may be used but for this example the name VOLTTRON was chosen) and select the main module (``/volttron/platform/main.py``). - -4. Select the Arguments tab and enter '-vv' in the Program arguments field (Figure 28) then select the Run button. - -.. image:: files/28-running-volttron.jpg - -Figure 28. Running VOLTTRON Platform, Setting Up a Run Configuration (continued) - -5. If the run is successful, the console should appear similar to Figure 29. If the run does not succeed (red text describing why the run failed will populate the console), click the all stop icon (two red boxes overlaid) on the console and then retry. - -.. image:: files/29-running-volttron.jpg - -Figure 29. Running VOLTTRON Platform, Console View on Successful Run - -Configure a Run Configuration for the Listener Agent -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -The following steps describe the process for configuring an agent within Eclipse: - -1. Select Run and select Run Configurations (Figure 30). - -.. image:: files/30-running-listener-agent.jpg - -Figure 30. Running the Listener Agent, Setting Up a Run Configuration - -2. Select Python Run from the menu on left and click the New launch configuration button (Figure 31). - -.. image:: files/31-running-listener-agent.jpg - -Figure 31. Running the Listener Agent, Setting Up a Run Configuration (continued) - -3. Change the name (for this example Listener is used) and select the main module (``/examples/ListenerAgent/listener/agent.py``) (Figure 32). - -.. image:: files/32-running-listener-agent.jpg - -Figure 32. Running the Listener Agent, Setting Up a Run Configuration (continued) - -4. Click the Arguments tab and change Working directory to Default (Figure 33). - -.. image:: files/33-running-listener-agent.jpg - -Figure 33. Running the Listener Agent, Setting Up a Run Configuration (continued) - -5. In the Environment tab, select New and add the following environment variables (bulleted list below), as shown in Figure 34: - -- AGENT_CONFIG = /home//examples /ListenerAgent/config - -AGENT_CONFIG is the absolute path the agent’s configuration file. To access a remote message bus, use the VIP address as described in ##Section 3.5 Platform Management:VOLTTRON Management Central.## - -.. image:: files/34-running-listener-agent.jpg - -Figure 34. Running the Listener Agent, Setting Up a Run Configuration - -6. Click Run. This launches the agent. You should see the agent start to publish and receive its own heartbeat message (Figure 35). - -.. image:: files/35-listening_agent_output.jpg - -Figure 35. Listener Agent Output on Eclipse Console - -The process for running other agents in Eclipse is identical to that of the Listener agent. Several useful development tools are available within Eclipse and PyDev that make development, debugging, and testing of agents much simpler. - -Agent Creation Walkthrough --------------------------- -Developers should look at the Listener agent before developing their own agent. The Listener agent illustrates the basic functionality of an agent. The following example demonstrates the steps for creating an agent. - -Agent Folder Setup -^^^^^^^^^^^^^^^^^^ - -Create a folder within the workspace to help consolidate the code your agent will utilize. - -1. In the VOLTTRON base directory, create a new folder TestAgent. - -2. In TestAgent, create a new folder tester. This is the package where the Python code will be created (Figure 36). - -.. image:: files/36-agent-test-folder.jpg - -Figure 36. Creating an Agent Test Folder - -Create Agent Code -^^^^^^^^^^^^^^^^^ - -The following steps describe the necessary agent files and modules. - -1. In tester, create a file called *__init__.py*, which tells Python to treat this folder as a package. - -2. In the tester package folder, create the file *testagent.py* - -3. Create a class called TestAgent. - -4. Import the packages and classes needed: - -:: - - from __future__ import absolute_import - - from datetime import datetime - import logging - import sys - - from volttron.platform.vip.agent import Agent, Core - from volttron.platform.agent import utils - -5. Set up a logger. The ``utils`` module from ``volttron.platform.agent`` builds on Python’s already robust logging module and is easy to use. Add the following lines after the import statements: - -:: - - utils.setup_logging() - _log = logging.getLogger(__name__) - -This agent will inherit features from the Agent class (base class) extending the agent’s default functionality. The class definition for the TestAgent will be configured as shown below (with ``__init__``). - -:: - - class TestAgent(Agent): - def __init__(self, config_path, **kwargs): - super(TestAgent, self).__init__(**kwargs) - -Setting up a Subscription -^^^^^^^^^^^^^^^^^^^^^^^^^ -1. Create a startup method. This method is tagged with the decorator ``@Core.receiver("onstart")``. The startup method will run after the agent is initialized. The TestAgent’s startup method will contain a subscription to the Listener agent’s heartbeat (heartbeat/listeneragent). The TestAgent will detect when a message with this topic is published on the message bus and will run the method specified with the callback keyword argument passed to ``self.vip.pubsub.subscribe``. - -:: - - @Core.receiver("onstart") - def starting(self, sender, **kwargs): - ''' - Subscribes to the platform message bus on - the heatbeat/listeneragent topic - ''' - print('TestAgent example agent start-up function') - self.vip.pubsub.subscribe('pubsub', 'heartbeat/listeneragent', - callback=self.on_heartbeat) - -2. Create the callback method. Typically, the callback is the response to a message (or event). In this simple example, the TestAgent will do a print statement and publish a message to the bus: - -:: - - def on_heartbeat(self, peer, sender, bus, topic, headers, message): - '''TestAgent callback method''' - print('Matched topic: {}, for bus: {}'.format(topic, bus)) - self.vip.pubsub.publish('pubsub', - 'testagent/publish', - headers=headers, - message='test publishing').get(timeout=30) - -Argument Parsing Main Method -^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -The test agent will need to be able to parse arguments being passed on the command line by the agent launcher. Use the **utils.default_main** method to handle argument parsing and other default behavior. - -1. Create a main method that can be called by the launcher: - -:: - - def main(argv=sys.argv): - '''Main method called by the eggsecutable.''' - try: - utils.vip_main(TestAgent) - except Exception as e: - _log.exception(e) - - if __name__ == '__main__': - # Entry point for script - sys.exit(main()) - -Create Support Files for Test Agent -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -VOLTTRON agents need configuration files for packaging, configuration, and launching. The “setup.py” file details the naming and Python package information. The launch configuration file is a JSON-formatted text file used by the platform to launch instances of the agent. - -Packaging Configuration -^^^^^^^^^^^^^^^^^^^^^^^ -In the TestAgent folder, create a file called "setup.py". This file sets up the name, version, required packages, method to execute, etc. for the agent. The packaging process will also use this information to name the resulting file. - -:: - - from setuptools import setup, find_packages - - #get environ for agent name/identifier - packages = find_packages('.') - package = packages[0] - - setup( - name = package + 'agent', - version = "0.1", - install_requires = ['volttron'], - packages = packages, - entry_points = { - 'setuptools.installation': [ - 'eggsecutable = ' + package + '.testagent:main', - ] - } - ) - -Launch Configuration -^^^^^^^^^^^^^^^^^^^^ -In TestAgent, create a file called "testagent.launch.json". This is the file the platform will use to launch the agent. It can also contain configuration parameters for the agent: - -:: - - { - "agentid": "Test1" - } - -Testing the Agent -^^^^^^^^^^^^^^^^^ -From a terminal, in the base VOLTTRON directory, enter the following commands (with the platform activated and VOLTTRON running): - -1. Run `pack_install` script on TestAgent: - -:: - - $ ./scripts/core/pack_install.sh TestAgent TestAgent/config test-agent - -- Upon successful completion of this command, the terminal output will show the install directory, the agent UUID (unique identifier for an agent; the UUID shown in red is only an example and each instance of an agent will have a different UUID) and the agent name (blue text): - -:: - - Installed /home/volttron-user/.volttron/packaged/testeragent-0.1-py2-none-any.whl - as d4ca557a-496c-4f02-8ad9-42f5d435868a testeragent-0.1 - -2. Start the agent: - -:: - - $ vctl start --tag test-agent - -3. Verify that the agent is running: - -:: - - $ vctl status - $ tail -f volttron.log - -If changes are made to the Passive AFDD agent’s configuration file after the agent is launched, stop and reload the agent. In a terminal, enter the following commands: - -:: - - $ vctl stop --tag test-agent - $ vctl remove --tag test-agent - -Re-build and start the updated agent (Figure 37). - -.. image:: files/37-testagent-output.jpg - -Figure 37. TestAgent Output In VOLTTRON Log - -Running the TestAgent in Eclipse -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -.. warning:: - Before attempting to run an agent in Eclipse, please see the note in: :ref:`AgentDevelopment ` - -If you are working in Eclipse, create a run configuration for the TestAgent based on the Listener agent configuration in the Eclipse development environment ##(Section 5.5.5 Running the VOLTTRON Platform and Agents)##. - -1. Launch the platform (##Section 5.5.5.1 Setup a Run Configuration for the Platform##) - -2. Launch the TestAgent by following the steps outlined in `Launching the Listener ` for launching the Listener agent. - -3. Launch the Listener agent. TestAgent should start receiving the heartbeats from Listener agent and the following should be displayed in the console (Figure 38). - -.. image:: files/38-console-output.jpg - -Figure 38. Console Output for TestAgent - -Adding Additional Features to the TestAgent -------------------------------------------- -Additional code can be added to the TestAgent to utilize additional services in the platform. The following sections show how to use the weather and device scheduling service within the TestAgent. - -Subscribing to Weather Data -^^^^^^^^^^^^^^^^^^^^^^^^^^^ -This agent can be modified to listen to weather data from the Weather agent by adding the following line at the end of the TestAgent startup method. This will subscribe the agent to the temperature subtopic. For the full list of topics available, please see: - -https://github.com/VOLTTRON/volttron/wiki/WeatherAgentTopics - -:: - - self.vip.pubsub.subscribe('pubsub', 'weather/temperature/temp_f', - callback=self.on_weather) - -Add the callback method ``on_weather``: - -:: - - def on_weather(self, peer, sender, bus, topic, headers, message): - print("TestAgent got weather\nTopic: {}, Message: {}".format(topic, message)) - -The platform log file should appear similar to Figure 39. - -.. image:: files/39-testagent-output-weather-subscribed.jpg - -Figure 39. TestAgent Output when Subscribing to Weather Topic - -Utilizing the Scheduler Agent -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -The TestAgent can be modified to publish a schedule to the Actuator agent by reserving time on virtual devices. Modify the following code to include current time ranges and include a call to the publish schedule method in setup. The following example posts a simple schedule. For more detailed information on device scheduling, please see: - -https://github.com/VOLTTRON/volttron/wiki/ActuatorAgent - -Ensure the Actuator agent is running as per ##Section 3.3 Device Control: Configuring and Launching the Actuator Agent##. Add the following line to the TestAgent’s import statements: - -:: - - from volttron.platform.messaging import topics - -Add the following lines to the TestAgent’s starting method. This sets up a subscription to the **ACTUATOR_RESPONSE** topic and calls the **publish_schedule** method. - -:: - - self.vip.pubsub.subscribe('pubsub', topics.ACTUATOR_RESPONSE, - callback=self.on_schedule_result) - self.publish_schedule() - -The **publish_schedule** method sends a schedule request message to the Actuator agent (Update the schedule with appropriate times): - -:: - - def publish_schedule(self): - headers = { - 'AgentID': self._agent_id, - 'type': 'NEW_SCHEDULE', - 'requesterID': self._agent_id, # Name of requesting agent - 'taskID': self._agent_id + "-TASK", # Unique task ID - 'priority': 'LOW' # Task Priority (HIGH, LOW, LOW_PREEMPT) - } - msg = [ - ["campus/building/device1", # First time slot. - "2014-1-31 12:27:00", # Start of time slot. - "2014-1-31 12:29:00"], # End of time slot. - ["campus/building/device1", # Second time slot. - "2014-1-31 12:26:00", # Start of time slot. - "2014-1-31 12:30:00"], # End of time slot. - ["campus/building/device2", # Third time slot. - "2014-1-31 12:30:00", # Start of time slot. - "2014-1-31 12:32:00"], # End of time slot. - #etc... - ] - self.vip.rpc.call('platform.actuator', # Target agent - 'request_new_schedule', # Method to call - agent_id, # Requestor - "some task", # TaskID - "LOW", # Priority - msg).get(timeout=10) # Request message - -Add the call back method for the schedule request: - -:: - - def on_schedule_result(self, topic, headers, message, match): - print (("TestAgent schedule result \nTopic: {topic}, " - "{headers}, Message: {message}") - .format(topic=topic, headers=headers, message=message)) - -Full TestAgent Code -^^^^^^^^^^^^^^^^^^^ -The following is the full TestAgent code built in the previous steps: - -:: - - from __future__ import absolute_import - - from datetime import datetime - import logging - import sys - - from volttron.platform.vip.agent import Agent, Core - from volttron.platform.agent import utils - from volttron.platform.messaging import headers as headers_mod - - utils.setup_logging() - _log = logging.getLogger(__name__) - - class TestAgent(Agent): - def __init__(self, config_path, **kwargs): - super(TestAgent, self).__init__(**kwargs) - - @Core.receiver("onstart") - def starting(self, sender, **kwargs): - ''' - Subscribes to the platform message bus on - the heatbeat/listeneragent topic - ''' - _log.info('TestAgent example agent start-up function') - self.vip.pubsub.subscribe(peer='pubsub', topic='heartbeat/listeneragent', - callback=self.on_heartbeat) - self.vip.pubsub.subscribe('pubsub', topics.ACTUATOR_RESPONSE, - callback=self.on_schedule_result) - self.vip.pubsub.subscribe('pubsub', 'weather/temperature/temp_f', - callback=self.on_weather) - - self.publish_schedule() - - def on_heartbeat(self, peer, sender, bus, topic, headers, message): - '''TestAgent callback method''' - _log.info('Matched topic: {}, for bus: {}'.format(topic, bus)) - self.vip.pubsub.publish(peer='pubsub', - topic='testagent/publish', - headers=headers, - message='test publishing').get(timeout=30) - - def on_weather(self, peer, sender, bus, topic, headers, message): - _log.info( - "TestAgent got weather\nTopic: {}, Message: {}".format(topic, message)) - - def on_schedule_result(self, topic, headers, message, match): - print (("TestAgent schedule result \nTopic: {topic}, " - "{headers}, Message: {message}") - .format(topic=topic, headers=headers, message=message)) - - def main(argv=sys.argv): - '''Main method called by the eggsecutable.''' - try: - utils.vip_main(TestAgent) - except Exception as e: - _log.info(e) - - if __name__ == '__main__': - # Entry point for script - sys.exit(main()) - - - - - - - - diff --git a/docs/source/devguides/agent_development/TestAgent.rst b/docs/source/devguides/agent_development/TestAgent.rst deleted file mode 100644 index f0d9fee37e..0000000000 --- a/docs/source/devguides/agent_development/TestAgent.rst +++ /dev/null @@ -1,204 +0,0 @@ -.. _Test-Agent: - -TestAgent Source Code -===================== - -Full code of agent detailed in AgentDevelopment: - -:: - - """ - Agent documentation goes here. - """ - - __docformat__ = 'reStructuredText' - - import logging - import sys - from volttron.platform.agent import utils - from volttron.platform.vip.agent import Agent, Core, RPC - - _log = logging.getLogger(__name__) - utils.setup_logging() - __version__ = "0.5" - - - def tester(config_path, **kwargs): - """Parses the Agent configuration and returns an instance of - the agent created using that configuration. - - :param config_path: Path to a configuration file. - - :type config_path: str - :returns: Tester - :rtype: Tester - """ - try: - config = utils.load_config(config_path) - except StandardError: - config = {} - - if not config: - _log.info("Using Agent defaults for starting configuration.") - - setting1 = int(config.get('setting1', 1)) - setting2 = config.get('setting2', "some/random/topic") - - return Tester(setting1, - setting2, - **kwargs) - - - class Tester(Agent): - """ - Document agent constructor here. - """ - - def __init__(self, setting1=1, setting2="some/random/topic", - **kwargs): - super(Tester, self).__init__(**kwargs) - _log.debug("vip_identity: " + self.core.identity) - - self.setting1 = setting1 - self.setting2 = setting2 - - self.default_config = {"setting1": setting1, - "setting2": setting2} - - - #Set a default configuration to ensure that self.configure is called immediately to setup - #the agent. - self.vip.config.set_default("config", self.default_config) - #Hook self.configure up to changes to the configuration file "config". - self.vip.config.subscribe(self.configure, actions=["NEW", "UPDATE"], pattern="config") - - def configure(self, config_name, action, contents): - """ - Called after the Agent has connected to the message bus. If a configuration exists at startup - this will be called before onstart. - - Is called every time the configuration in the store changes. - """ - config = self.default_config.copy() - config.update(contents) - - _log.debug("Configuring Agent") - - try: - setting1 = int(config["setting1"]) - setting2 = str(config["setting2"]) - except ValueError as e: - _log.error("ERROR PROCESSING CONFIGURATION: {}".format(e)) - return - - self.setting1 = setting1 - self.setting2 = setting2 - - self._create_subscriptions(self.setting2) - - def _create_subscriptions(self, topic): - #Unsubscribe from everything. - self.vip.pubsub.unsubscribe("pubsub", None, None) - - self.vip.pubsub.subscribe(peer='pubsub', - prefix=topic, - callback=self._handle_publish) - - def _handle_publish(self, peer, sender, bus, topic, headers, - message): - pass - - @Core.receiver("onstart") - def onstart(self, sender, **kwargs): - """ - This is method is called once the Agent has successfully connected to the platform. - This is a good place to setup subscriptions if they are not dynamic or - do any other startup activities that require a connection to the message bus. - Called after any configurations methods that are called at startup. - - Usually not needed if using the configuration store. - """ - #Example publish to pubsub - #self.vip.pubsub.publish('pubsub', "some/random/topic", message="HI!") - - #Exmaple RPC call - #self.vip.rpc.call("some_agent", "some_method", arg1, arg2) - - @Core.receiver("onstop") - def onstop(self, sender, **kwargs): - """ - This method is called when the Agent is about to shutdown, but before it disconnects from - the message bus. - """ - pass - - @RPC.export - def rpc_method(self, arg1, arg2, kwarg1=None, kwarg2=None): - """ - RPC method - - May be called from another agent via self.core.rpc.call """ - return self.setting1 + arg1 - arg2 - - def main(): - """Main method called to start the agent.""" - utils.vip_main(tester, - version=__version__) - - - if __name__ == '__main__': - # Entry point for script - try: - sys.exit(main()) - except KeyboardInterrupt: - pass - - -Contents of setup.py for TestAgent: - -:: - - from setuptools import setup, find_packages - - MAIN_MODULE = 'agent' - - # Find the agent package that contains the main module - packages = find_packages('.') - agent_package = 'tester' - - # Find the version number from the main module - agent_module = agent_package + '.' + MAIN_MODULE - _temp = __import__(agent_module, globals(), locals(), ['__version__'], -1) - __version__ = _temp.__version__ - - # Setup - setup( - name=agent_package + 'agent', - version=__version__, - author_email="volttron@pnnl.gov", - url="https://volttron.org/", - description="Agent development tutorial.", - author="VOLTTRON Team", - install_requires=['volttron'], - packages=packages, - entry_points={ - 'setuptools.installation': [ - 'eggsecutable = ' + agent_module + ':main', - ] - } - ) - -Contents of config: - -:: - - { - # VOLTTRON config files are JSON with support for python style comments. - "setting1": 2, #Integers - "setting2": "some/random/topic2", #Strings - "setting3": true, #Booleans: remember that in JSON true and false are not capitalized. - "setting4": false, - "setting5": 5.1, #Floating point numbers. - "setting6": [1,2,3,4], #Lists - "setting7": {"setting7a": "a", "setting7b": "b"} #Objects - } diff --git a/docs/source/devguides/agent_development/index.rst b/docs/source/devguides/agent_development/index.rst deleted file mode 100644 index 623a6f81de..0000000000 --- a/docs/source/devguides/agent_development/index.rst +++ /dev/null @@ -1,11 +0,0 @@ -.. _Agent_Development: - -================= -Agent Development -================= - -.. toctree:: - :glob: - :maxdepth: 1 - - * diff --git a/docs/source/devguides/deployment/Deployment-Options.rst b/docs/source/devguides/deployment/Deployment-Options.rst deleted file mode 100644 index 770038dd42..0000000000 --- a/docs/source/devguides/deployment/Deployment-Options.rst +++ /dev/null @@ -1,172 +0,0 @@ -================== -Deployment Options -================== - -There are several ways to deploy the VOLTTRON platform in a Linux environment. It is up to the user to determine which -is right for them. The following assumes that the platform has already been bootstrapped and is ready to run. - -Simple Command Line -******************* - -With the VOLTTRON environment activated the platform can be started simply by running VOLTTRON on the command -line. - -:: - - $volttron -vv - -This will start the platform in the current terminal with very verbose logging turned on. This -is most appropriate for testing Agents or testing a deployment for problems before switching to a -more long term solution. This will print all log messages to the console in real time. - -This should not be used for long term deployment. As soon as an SSH session is terminated for whatever reason -the processes attached to that session will be killed. This also will not capture log message to a file. - -Running VOLTTRON as a Background Process -**************************************** - -A simple, more long term solution, is to run volttron in the background and disown it from the current terminal. - -.. warning:: - If you plan on running VOLTTRON in the background and detaching it from the - terminal with the ``disown`` command be sure to redirect stderr and stdout to ``/dev/null``. - Even if logging to a file is used some libraries which VOLTTRON relies on output - directly to stdout and stderr. This will cause problems if those file descriptors - are not redirected to ``/dev/null``. - -:: - - $volttron -vv -l volttron.log > /dev/null 2>&1& - -Alternatively: - -:: - - ``./start-volttron`` - -.. note:: If you are not in an activated environment, this script will start - the platform running in the background in the correct environment, however - the environment will not be activated for you, you must activate it yourself. - -**If there are other jobs running in your terminal be sure to disown the correct one.** - -:: - - $jobs - [1]+ Running something else - [2]+ Running ./start-volttron - - #Disown VOLTTRON - $disown %2 - -This will run the VOLTTRON platform in the background and turn it into a daemon. The log output will be directed -to a file called ``volttron.log`` in the current directory. - -To keep the size of the log under control for more longer term deployments us the rotating log configuration file -``examples/rotatinglog.py``. - -:: - - $volttron -vv --log-config examples/rotatinglog.py > /dev/null 2>&1& - -This will start a rotate the log file at midnight and limit the total log data to seven days worth. - -The main downside to this approach is that the VOLTTRON platform will not automatically -resume if the system is restarted. It will need to be restarted manually after reboot. - -Setting up VOLTTRON as a System Service -*************************************** - -Systemd -------- - -An example service file ``scripts/admin/volttron.service`` for systemd cas be used as a starting point -for setting up VOLTTRON as a service. Note that as this will redirect all the output that would -be going to stdout - to the syslog. This can be accessed using journalctl. For systems that run -all the time or have a high level of debugging turned on, we recommend checking the system's -logrotate settings. - -:: - - [Unit] - Description=VOLTTRON Platform Service - After=network.target - - [Service] - Type=simple - - #Change this to the user that VOLTTRON will run as. - User=volttron - Group=volttron - - #Uncomment and change this to specify a different VOLTTRON_HOME - #Environment="VOLTTRON_HOME=/home/volttron/.volttron" - - #Change these to settings to reflect the install location of VOLTTRON - WorkingDirectory=/var/lib/volttron - ExecStart=/var/lib/volttron/env/bin/volttron -vv - ExecStop=/var/lib/volttron/env/bin/volttron-ctl shutdown --platform - - - [Install] - WantedBy=multi-user.target - -After the file has been modified to reflect the setup of the platform you can install it with the -following commands. These need to be run as root or with sudo as appropriate. - -:: - - #Copy the service file into place - cp scripts/admin/volttron.service /etc/systemd/system/ - - #Set the correct permissions if needed - chmod 644 /etc/systemd/system/volttron.service - - #Notify systemd that a new service file exists (this is crucial!) - systemctl daemon-reload - - #Start the service - systemctl start volttron.service - -Init.d ------- - -An example init script ``scripts/admin/volttron`` can be used as a starting point for -setting up VOLTTRON as a service on init.d based systems. - -Minor changes may be needed for the file to work on the target system. Specifically -the ``USER``, ``VLHOME``, and ``VOLTTRON_HOME`` variables may need to be changed. - -:: - - ... - #Change this to the user VOLTTRON will run as. - USER=volttron - #Change this to the install location of VOLTTRON - VLHOME=/var/lib/volttron - - ... - - #Uncomment and change this to specify a different VOLTTRON_HOME - #export VOLTTRON_HOME=/home/volttron/.volttron - - -The script can be installed with the following commands. These need to be run as root or -with sudo as appropriate. - -:: - - #Copy the script into place - cp scripts/admin/volttron /etc/init.d/ - - #Make the file executable - chmod 755 /etc/init.d/volttron - - #Change the owner to root - chown root:root /etc/init.d/volttron - - #These will set it to startup automatically at boot - update-rc.d volttron defaults - - #Start the service - /etc/init.d/volttron start diff --git a/docs/source/devguides/deployment/Linux-Platform-Hardening-Recommendations-for-VOLTTRON-users.rst b/docs/source/devguides/deployment/Linux-Platform-Hardening-Recommendations-for-VOLTTRON-users.rst deleted file mode 100644 index 6129b0803e..0000000000 --- a/docs/source/devguides/deployment/Linux-Platform-Hardening-Recommendations-for-VOLTTRON-users.rst +++ /dev/null @@ -1,234 +0,0 @@ -.. _Platform-Hardening-for-VOLTTRON: - -Platform Hardening for VOLTTRON -=============================== - -Rev. 0 \| 1/29/2015 \| Initial Document Development - -Rev. 1 \| 2/5/2015 \| Integrate comments from extended VOLTTRON team. - -Introduction -============ - -VOLTTRON is an agent-based application development platform for -distributed control systems. VOLTTRON itself is built with modern -security principles in mind [security-wp] and implements many security -features for hosted agents. However, VOLTTRON is built on top of Linux -and the underlying Linux platform also needs to be secured in order to -declare the resulting control system as "secure." Any system is only -as secure as its weakest link. The rest of this note is dedicated to -making recommendations for hardening of the underlying Linux platform -that VOLTTRON uses. Note that no system can be 100% secure and the -cyber security strategy that is recommended in this document is based on -risk management. - -Linux System Hardening -====================== - -Here are the non-exhaustive recommendations for Linux -hardening from the VOLTTRON team: - -- Physical Security: Keep the system in locked cabinets or a locked room. - Limit physical access to systems and to the networks - to which they are attached. The goal should be to avoid physical access by - untrusted personnel. This could be extended to blocking or locking USB - ports, removable media drives, etc. Drive encryption could be - used to avoid access via alternate-media booting (off USB stick or DVD) if - physical access can't be guaranteed. Downside of drive encryption would be - needing to enter a passphrase to start system. Alternately, the - Trusted Platform Module (TPM) may be used, but the drive might still - be accessible to those with physical access. Enable chassis - intrusion detection and reporting if supported. If available, use a - physical tamper seal along with or in place of an interior switch. - -- Low level device Security: Keep firmware of all devices (including - BIOS) up-to-date. Password-protect the BIOS. Disable - unneeded/unnecessary devices including serial, parallel, USB, - Firewire, etc. ports; optical drives; wireless devices, such as - Wi-Fi and Bluetooth. Leaving a USB port enabled may be helpful if a - breach occurs to allow saving forensic data to an external drive. - -- Boot security: Disable automounting of external devices. Restrict - the boot device. Disable PXE and other network boot options (unless - that is the primary boot method). Disable booting from USB and other - removable drives. Secure the boot loader. Require an administrator - password to do anything but start the default kernel. Do not allow - editing of kernel parameters. Disable, remove, or password-protect - emergency/recovery boot entries. - -- Security Updates: First and foremost, configure the system to - automatically download security updates. Most security updates can - be installed without rebooting the system, but some updated - (e.g. shared libraries, kernel, etc) require the system to be - rebooted. If possible, configure the system to install the security - updates automatically and reboot at a particular time. We also - recommend reserving the reboot time (e.g. 1:30AM on a Saturday - morning) using the Actuator Agent so that no control actions can - happen during that time. - -- System Access only via Secured Protocols: Disallow all clear text - access to VOLTTRON systems. No telnet, no rsh, no ftp and no - exceptions. Use ssh to gain console access, and scp/sftp to get files in - and out of the system. Disconnect excessively idle SSH Sessions. - -- Disable remote login for "root" users. Do not allow a user to - directly access the system as the "root" user from a remote network - location. Root access to privileged operations can be accomplished - using “sudo” This adds an extra level of security by restricting - access to privileged operations and tracking those operations - through the system log. - -- Manage users and usernames. Limit the number of user accounts. Use - complex usernames rather than first names. - -- Authentication. If possible, use two factor authentication to allow - access to the system. Informally, two factor authentication uses - a combination of "something you know" and "something you have" - to allow access to the - system. RSA SecurID tokens are commonly used for two factor - authentication but other tools are available. When not using - two-factor authentication, use strong passwords and do not share - accounts. - -- Scan for weak passwords. Use password cracking tools such as John - the Ripper (http://www.openwall.com/john/) or nmap with password - cracking modules (http://nmap.org) to look for weak passwords. - -- Utilize Pluggable Authentication Modules (PAM) to strengthen - passwords and the login process. We recommend: - - - pam\_abl: Automated blacklisting on repeated failed - authentication attempts - - pam\_captcha: A visual text-based CAPTCHA challenge module - for PAM - - pam\_passwdqc: A password strength checking module for PAM-aware - password changing programs - - pam\_cracklib: PAM module to check the password against dictionary - words - - pam\_pwhistory: PAM module to remember last passwords - -- Disable unwanted services. Most desktop and server Linux - distributions come with many unnecessary services enabled. Disable - all unnecessary services. Refer to your distribution's documentation - to discover how to check and disable these services. - -- Just as scanning for weak passwords is a step to more secure systems, - regular network scans using Nmap (www.nmap.org) to find what network - services are being offered is another step towards a more secure - system. Note, use nmap or similar tools very carefully on BACnet and modbus - environments. These scanning tools are known to crash/reset BACnet and modbus - devices. - -- Control incoming and outgoing network traffic. Use the built-in - host-based firewall to control who/what can connect to this - system. Many iptables frontends offer a set of predefined rules that - provide a default deny policy for incoming connections and provide - rules to prevent or limit other well known attacks (i.e. rules that - limit certain responses that might amplify a DDoS attack). ufw - (uncomplicated firewall) is a good example. - For example, if the system administrators for the VOLTTRON - device are all located in 10.10.10.0/24 subnetwork, then allow SSH - and SCP logins from only that IP address range. If VOLTTRON system - exports data to a historian at 10.20.20.1 using TCP port 443, allow - outgoing traffic to that port on that server. The idea here is to - limit the attack surface of the system. The smaller the surface, the - better we can analyze the communication patterns of the system and - detect anomalies. One word of caution. While some system - administrators disable network-based diagnostic tools such as ICMP - ECHO responses, VOLTTRON team believes that this hampers - usability. As an example, monitoring which incoming and outgoing - firewall rules are triggering can be accomplished with this command: - ``watch --interval=5 'iptables -nvL | grep -v "0 0"'`` . - -- Rate limit incoming connections to discourage brute force hacking - attempts. Use a tool such as fail2ban - (http://www.fail2ban.org/wiki/index.php/Main_Page) to dynamically - manage firewall rules to rate limit incoming connections and - discourage brute force hacking attempts. sshguard - (http://www.sshguard.net/) is similar to - fail2ban but only used for ssh connections. Further rate limiting - can be accomplished at the firewall level. As an example, you can - restrict the number of connections used by a single IP address to - your server using iptables. Only allow 4 ssh connections per client - system: - ``iptables -A INPUT -p tcp --syn --dport 22 -m connlimit --connlimit-above 4 - –j DROP`` - You can limit the number of connections per minute. The following - example will drop incoming connections if an IP address makes more - than 10 connection attempts to port 22 within 60 seconds: - ``iptables -A INPUT -p tcp –dport 22 -i eth0 -m state --state NEW -m recent - --set`` - ``iptables -A INPUT -p tcp –dport 22 -i eth0 -m state --state NEW -m recent - --update –-seconds 60 -–hitcount 10 –j DROP`` - -- Use a file system integrity tool to monitor for unexpected file - changes. Tools such as tripwire - (http://sourceforge.net/projects/tripwire/) to monitor filesystem - for changed files. Another file integrity checking tool to consider - is AIDE (Advanced Intrusion Detect Environment) - (http://aide.sourceforge.net/). - -- Use filesystem scanning tools periodically to check for - exploits. Available tools such as checkrootkit - (http://www.chkrootkit.org), rkhunter - (http://rkhunter.sourceforge.net) and others should be used to check - for known exploits on a periodic basis and report their results. - -- VOLTTRON does not use apache or require it. If Apache is being used, - e recommend using mod\_security and mod\_evasive modules. - -System Monitoring -================= - -- Monitor system state and resources. Use a monitoring tool such as - Xymon (http://xymon.sourceforge.net) or big brother - (http://www.bb4.org/features.html) to remotely monitor the system - resources and state. Set the monitoring tools to alert the system - administrators if anomalous use of resources (e.g. connections, - memory, etc) are detected. An administrator can also use unix - commands such as netstat to look for open connections periodically. - -- Watch system logs and get logs off the system. Use a utility such as - logwatch (http://sourceforge.net/projects/logwatch/files/) or - logcheck (http://logcheck.org) to get - daily summary of system activity via email. For Linux distributions - that use systemd, use journalwatch - (http://git.the-compiler.org/journalwatch/) - to accomplish the same task. - Additionally, use a remote syslog server to collect logs from all - VOLTTRON systems in - the field at a centralized location for analysis. A tool such as - splunk is ideal for this task and comes with many built-in analysis - applications. Another benefit of sending logs remotely off the platform - is the ability to inspect the logs even when the platform may be - compromised. - -- An active intrusion sensor such as PSAD - (http://cipherdyne.org/psad/) can be used to look for intrusions as well. - -Security Testing -================ - -Every security control discussed in the previous sections must be -tested to determine correct operation and impact. -For example, if we inserted a firewall rule to ban connections -from an IP address such as 10.10.10.2, then we need to test that the -connections actually fail. - -In addition to functional correctness testing, common security testing -tools such as Nessus (http://www.tenable.com/products/nessus) and nmap -(http://nmap.org) should be used to perform cyber security testing. - -Conclusion -========== - -No system is 100% secure unless it is disconnected from the network and -is in a physically secure location. VOLTTRON team recommends a -risk-based cyber security approach that considers each risk, and the -impact of an exploit. Mitigating technologies can then be used to -mitigate the most impactful risks first. VOLTTRON is built with security -in mind from the ground up. But it is only as secure as the operating -system that it runs on top of. This document is intended to help -VOLTTRON users to secure the underlying Linux operating system to -further improve the robustness of the VOLTTRON platform. Any security -questions should be directed to volttron@pnnl.gov. diff --git a/docs/source/devguides/deployment/Multiple-Address-Configuration.rst b/docs/source/devguides/deployment/Multiple-Address-Configuration.rst deleted file mode 100644 index 1a38cb0062..0000000000 --- a/docs/source/devguides/deployment/Multiple-Address-Configuration.rst +++ /dev/null @@ -1,16 +0,0 @@ -.. _External-Address-Configuration: - -Platform External Address Configuration -======================================= - -In the configuration file located in $VOLTTRON\_HOME/config add -vip-address=tcp://ip:port for each address you want to listen on - -:: - - Example - vip-address=tcp://127.0.0.102:8182 - vip-address=tcp://127.0.0.103:8083 - vip-address=tcp://127.0.0.103:8183 - -.. note:: The config file is generated after running the vcfg command. The vip-address is for the local platform, NOT the remote platform. diff --git a/docs/source/devguides/deployment/index.rst b/docs/source/devguides/deployment/index.rst deleted file mode 100644 index bdb203ba06..0000000000 --- a/docs/source/devguides/deployment/index.rst +++ /dev/null @@ -1,11 +0,0 @@ -================= -Deployment Advice -================= - -.. toctree:: - :glob: - :maxdepth: 2 - - Deployment-Options - Linux-Platform-Hardening-Recommendations-for-VOLTTRON-users - Multiple-Address-Configuration diff --git a/docs/source/devguides/eclipse/Manual-Plugin-Install.rst b/docs/source/devguides/eclipse/Manual-Plugin-Install.rst deleted file mode 100644 index d9361ab872..0000000000 --- a/docs/source/devguides/eclipse/Manual-Plugin-Install.rst +++ /dev/null @@ -1,27 +0,0 @@ -.. _Manual-Plugin-Install: - -Manual Python Plugin Installation -================================= - -If Eclipse doesn't have the Marketplace, follow these steps to get the -plugins: - -- Help -> Install New Software -- Click on the "Add" button -- For name use: EGit -- For location: \\ -- After hitting OK, check the box for: Check Select All -- Click through Next, Agree to Terms, then Finish -- Allow Eclipse to restart - -After installing Eclipse, you must add the PyDev plugin to the -environment. In Eclipse: - -- Help -> Install New Software -- Click on the "Add" button -- For name use: PyDev -- For location: ``__ -- Check the box for PyDev -- Click through Next, Agree to Terms, Finish -- Allow Eclipse to restart - diff --git a/docs/source/devguides/eclipse/files/clone-existing.png b/docs/source/devguides/eclipse/files/clone-existing.png deleted file mode 100755 index 465f351bf0..0000000000 Binary files a/docs/source/devguides/eclipse/files/clone-existing.png and /dev/null differ diff --git a/docs/source/devguides/eclipse/files/eclipse-marketplace2.png b/docs/source/devguides/eclipse/files/eclipse-marketplace2.png deleted file mode 100755 index 91b4b5f706..0000000000 Binary files a/docs/source/devguides/eclipse/files/eclipse-marketplace2.png and /dev/null differ diff --git a/docs/source/devguides/eclipse/files/general-project.png b/docs/source/devguides/eclipse/files/general-project.png deleted file mode 100755 index 106228a41d..0000000000 Binary files a/docs/source/devguides/eclipse/files/general-project.png and /dev/null differ diff --git a/docs/source/devguides/eclipse/files/git-view.png b/docs/source/devguides/eclipse/files/git-view.png deleted file mode 100755 index 0b59e8fcc3..0000000000 Binary files a/docs/source/devguides/eclipse/files/git-view.png and /dev/null differ diff --git a/docs/source/devguides/eclipse/files/listener-all-vars.png b/docs/source/devguides/eclipse/files/listener-all-vars.png deleted file mode 100755 index f4fdde5296..0000000000 Binary files a/docs/source/devguides/eclipse/files/listener-all-vars.png and /dev/null differ diff --git a/docs/source/devguides/eclipse/files/pick-python.png b/docs/source/devguides/eclipse/files/pick-python.png deleted file mode 100755 index 2d3e8eafaf..0000000000 Binary files a/docs/source/devguides/eclipse/files/pick-python.png and /dev/null differ diff --git a/docs/source/devguides/eclipse/files/run-results.png b/docs/source/devguides/eclipse/files/run-results.png deleted file mode 100755 index 5568a59585..0000000000 Binary files a/docs/source/devguides/eclipse/files/run-results.png and /dev/null differ diff --git a/docs/source/devguides/eclipse/files/select-branch.png b/docs/source/devguides/eclipse/files/select-branch.png deleted file mode 100755 index 4333c0df90..0000000000 Binary files a/docs/source/devguides/eclipse/files/select-branch.png and /dev/null differ diff --git a/docs/source/devguides/eclipse/files/volttron-main-args.png b/docs/source/devguides/eclipse/files/volttron-main-args.png deleted file mode 100755 index 241e1c35c7..0000000000 Binary files a/docs/source/devguides/eclipse/files/volttron-main-args.png and /dev/null differ diff --git a/docs/source/devguides/eclipse/files/volttron-pick-main.png b/docs/source/devguides/eclipse/files/volttron-pick-main.png deleted file mode 100755 index 13168ba066..0000000000 Binary files a/docs/source/devguides/eclipse/files/volttron-pick-main.png and /dev/null differ diff --git a/docs/source/devguides/eclipse/index.rst b/docs/source/devguides/eclipse/index.rst deleted file mode 100644 index 4cef99041e..0000000000 --- a/docs/source/devguides/eclipse/index.rst +++ /dev/null @@ -1,210 +0,0 @@ -================= -Eclipse IDE Setup -================= - -The only thing that is necessary to create a VOLTTRON agent is a text -editor and the shell. However, we have found the Eclipse Development -Environment (IDE) to be a valuable tool for helping to develop VOLTTRON -agents. You can obtain the latest (MARS as fo 10/7/15) from -http://www.eclipse.org/. Once downloaded the `PyDev Plugin <#pydev-plugin>`__ -is a valuable tool for executing the platform as well as debugging agent code. - -- `Install PyDev Plugin <#pydev-plugin>`__ -- `Clone the VOLTTRON Source <#cloning-the-source-code>`__ -- `Build VOLTTRON <#build-volttron>`__ -- `Link Eclipse to VOLTTRON Python Environment <#linking-eclipse-and-the-volttron-python-environment>`__ -- `Make Project a PyDev Project <#make-project-a-pydev-project>`__ -- `Testing the Installation <#testing-the-installation>`__ -- `Execute VOLTTRON Through Shell <#execute-volttron-through-shell>`__ -- `Execute VOLTTRON Through Eclipse <#execute-volttron-through-eclipse>`__ -- `Start a ListenerAgent <#start-a-listeneragent>`__ - -PyDev Plugin ------------- - -Installing the PyDev plugin from the Eclipse Market place There is a -python plugin for eclipse that makes development much easier. Install it -from the eclipse marketplace. - -|Help -> Eclipse Marketplace...| - -|Click Install| - -Cloning the Source Code ------------------------ - -The VOLTTRON code is stored in a git repository. Eclipse (Luna and Mars) -come with a git plugin out of the box. For other versions the plugin is -available for Eclipse that makes development more convenient (note: you -must have Git :ref:`already installed ` on the -system and have :ref:`built VOLTTRON `): - -If your version of Eclipse does not have the marketplace follow these -:ref:`instructions `. - -The project can now be checked out from the repository into Eclipse. - -#. Open the Git view - - |Select Git view| - -#. Clone a Git Repository - - |Clone existing repo| - -#. Fill out the URI: https://github.com/VOLTTRON/volttron - - |Select repo| - -#. Select master for latest stable version - - |Select branch repo| - -#. Import the cloned repository as a general project - - |Import project| - -#. Pick a project name (default volttron) and hit Finish - - |Finish import| - -#. Switch to the PyDev perspective - -Build VOLTTRON --------------- - -Continue the setup process by opening a command shell. Make the current -directory the root of your cloned VOLTTRON directory. Follow the -instructions in our `Building VOLTTRON `__ section of -the wiki and then continue below. - -Linking Eclipse and the VOLTTRON Python Environment ---------------------------------------------------- - -From the Eclipse IDE right click on the project name and select Refresh -so eclipse will be aware of the file system changes. The next step will -define the python version that PyDev will use for VOLTTRON - -#. Choose Window - > Preferences -#. Expand the PyDev tree -#. Select Interpreters - > Python Interpreter -#. Click New -#. Click Browse and browse to the pydev-python file located in scripts - directory off of the volttron source -#. Click Ok - - |Pick Python| - -#. Select All, then uncheck the VOLTTRON root like the picture below - - |Select path| - -#. Click Ok - -.. note:: - - You may need redo this stage after platform updates - -Make Project a PyDev Project ----------------------------- - -#. In the Project/PackageExplorer view on the left, right-click on the - project, PyDev-> Set as PyDev Project -#. Switch to the PyDev perspective (if it has not already switched), - Window -> Open Perspective -> PyDev - |Set as Pydev| - -Eclipse should now be configured to use the project's environment. - -Testing the Installation ------------------------- - -In order to test the installation the VOLTTRON platform must be running. -You can do this either through `the shell <#execute-volttron-through-shell>`__ or -`through Eclipse <#execute-volttron-through-eclipse>`__. - -.. _Execute-Volttron-From-Shell: - -Execute VOLTTRON Through Shell -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -#. Open a console and cd into the root of the volttron repository. -#. Execute `source env/bin/activate` -#. Execute `volttron -vv` - - |Execute VOLTTRON in Shell| - -You now have a running VOLTTRON logging to standard out. The next step -to verifying the installation is to `start a listeneragent <#start-a-listeneragent>`__. - -Execute VOLTTRON Through Eclipse -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -#. Click Run -> Run Configuration from the Eclipse Main Menu -#. Click the New Launch Configuration button - - |New Launch Configuration| - -#. Change the name and select the main module `volttron/platform/main.py` - - |Main Module| - -#. Click the Arguments Tab add '-vv' to the arguments and change the working directory to default - - |Arguments| - -#. Click Run. The following image displays the output of a successfully started platform - - |Successful Start| - -:ref: _Start-Listener-Eclipse: - -Start a ListenerAgent -~~~~~~~~~~~~~~~~~~~~~ - -.. warning:: - Before attempting to run an agent in Eclipse, please see the note in: :ref:`AgentDevelopment ` - - - -The listener agent will listen to the message bus for any published -messages. It will also publish a heartbeat message ever 10 seconds (by -default). - -Create a new run configuration entry for the listener agent. - -#. In the Package Explorer view, open examples -> ListenerAgent --> - listener -#. Righ-click on agent.py and select Run As -> Python Run (this will - create a run configuration but fail) -#. On the menu bar, pick Run -> Run Configurations... -#. Under Python Run pick "volttron agent.py" -#. Click on the Arguments tab and Change Working Directory to Default - -#. In the Environment tab, click new set the variable to AGENT\_CONFIG - with the value of /home/git/volttron/examples/ListenerAgent/config - - |Listener Vars| - -#. Click Run, this launches the agent - -You should see the agent start to publish and receive its own heartbeat -message in the console. - -.. |Help -> Eclipse Marketplace...| image:: files/eclipse-marketplace.png -.. |Click Install| image:: files/eclipse-marketplace2.png -.. |Select Git view| image:: files/git-view.png -.. |Clone existing repo| image:: files/clone-existing.png -.. |Select repo| image:: files/select-repo.png -.. |Select branch repo| image:: files/select-branch.png -.. |Import project| image:: files/import-project.png -.. |Finish import| image:: files/finish-import.png -.. |Pick Python| image:: files/pick-python.png -.. |Select path| image:: files/select-path.png -.. |Set as Pydev| image:: files/set-as-pydev.png -.. |Execute VOLTTRON in Shell| image:: files/volttron-console.png -.. |New Launch Configuration| image:: files/new-python-run.png -.. |Main Module| image:: files/volttron-pick-main.png -.. |Arguments| image:: files/volttron-main-args.png -.. |Successful Start| image:: files/run-results.png -.. |Listener Vars| image:: files/listener-all-vars.png diff --git a/docs/source/devguides/files/clone-existing.png b/docs/source/devguides/files/clone-existing.png deleted file mode 100755 index 465f351bf0..0000000000 Binary files a/docs/source/devguides/files/clone-existing.png and /dev/null differ diff --git a/docs/source/devguides/files/eclipse-marketplace.png b/docs/source/devguides/files/eclipse-marketplace.png deleted file mode 100755 index cf6a0136b2..0000000000 Binary files a/docs/source/devguides/files/eclipse-marketplace.png and /dev/null differ diff --git a/docs/source/devguides/files/eclipse-marketplace2.png b/docs/source/devguides/files/eclipse-marketplace2.png deleted file mode 100755 index 91b4b5f706..0000000000 Binary files a/docs/source/devguides/files/eclipse-marketplace2.png and /dev/null differ diff --git a/docs/source/devguides/files/finish-import.png b/docs/source/devguides/files/finish-import.png deleted file mode 100755 index 5a32e44151..0000000000 Binary files a/docs/source/devguides/files/finish-import.png and /dev/null differ diff --git a/docs/source/devguides/files/general-project.png b/docs/source/devguides/files/general-project.png deleted file mode 100755 index 106228a41d..0000000000 Binary files a/docs/source/devguides/files/general-project.png and /dev/null differ diff --git a/docs/source/devguides/files/git-view.png b/docs/source/devguides/files/git-view.png deleted file mode 100755 index 0b59e8fcc3..0000000000 Binary files a/docs/source/devguides/files/git-view.png and /dev/null differ diff --git a/docs/source/devguides/files/import-project.png b/docs/source/devguides/files/import-project.png deleted file mode 100755 index 5ac524f41d..0000000000 Binary files a/docs/source/devguides/files/import-project.png and /dev/null differ diff --git a/docs/source/devguides/files/listener-all-vars.png b/docs/source/devguides/files/listener-all-vars.png deleted file mode 100755 index f4fdde5296..0000000000 Binary files a/docs/source/devguides/files/listener-all-vars.png and /dev/null differ diff --git a/docs/source/devguides/files/new-python-run.png b/docs/source/devguides/files/new-python-run.png deleted file mode 100755 index 580b462c8f..0000000000 Binary files a/docs/source/devguides/files/new-python-run.png and /dev/null differ diff --git a/docs/source/devguides/files/pick-python.png b/docs/source/devguides/files/pick-python.png deleted file mode 100755 index 2d3e8eafaf..0000000000 Binary files a/docs/source/devguides/files/pick-python.png and /dev/null differ diff --git a/docs/source/devguides/files/platform-run-config.png b/docs/source/devguides/files/platform-run-config.png deleted file mode 100755 index 083e157d83..0000000000 Binary files a/docs/source/devguides/files/platform-run-config.png and /dev/null differ diff --git a/docs/source/devguides/files/pydev-python.png b/docs/source/devguides/files/pydev-python.png deleted file mode 100755 index e20f45bdb2..0000000000 Binary files a/docs/source/devguides/files/pydev-python.png and /dev/null differ diff --git a/docs/source/devguides/files/run-results.png b/docs/source/devguides/files/run-results.png deleted file mode 100755 index 5568a59585..0000000000 Binary files a/docs/source/devguides/files/run-results.png and /dev/null differ diff --git a/docs/source/devguides/files/select-branch.png b/docs/source/devguides/files/select-branch.png deleted file mode 100755 index 4333c0df90..0000000000 Binary files a/docs/source/devguides/files/select-branch.png and /dev/null differ diff --git a/docs/source/devguides/files/select-path.png b/docs/source/devguides/files/select-path.png deleted file mode 100755 index 7f690f5696..0000000000 Binary files a/docs/source/devguides/files/select-path.png and /dev/null differ diff --git a/docs/source/devguides/files/select-repo.png b/docs/source/devguides/files/select-repo.png deleted file mode 100755 index d74fd20b45..0000000000 Binary files a/docs/source/devguides/files/select-repo.png and /dev/null differ diff --git a/docs/source/devguides/files/set-as-pydev.png b/docs/source/devguides/files/set-as-pydev.png deleted file mode 100755 index 681eeefd60..0000000000 Binary files a/docs/source/devguides/files/set-as-pydev.png and /dev/null differ diff --git a/docs/source/devguides/files/setup-python.png b/docs/source/devguides/files/setup-python.png deleted file mode 100755 index 6e90e4537c..0000000000 Binary files a/docs/source/devguides/files/setup-python.png and /dev/null differ diff --git a/docs/source/devguides/files/volttron-console.png b/docs/source/devguides/files/volttron-console.png deleted file mode 100755 index 1a63411b9a..0000000000 Binary files a/docs/source/devguides/files/volttron-console.png and /dev/null differ diff --git a/docs/source/devguides/files/volttron-main-args.png b/docs/source/devguides/files/volttron-main-args.png deleted file mode 100755 index 241e1c35c7..0000000000 Binary files a/docs/source/devguides/files/volttron-main-args.png and /dev/null differ diff --git a/docs/source/devguides/files/volttron-main.png b/docs/source/devguides/files/volttron-main.png deleted file mode 100755 index 2443671f96..0000000000 Binary files a/docs/source/devguides/files/volttron-main.png and /dev/null differ diff --git a/docs/source/devguides/files/volttron-pick-main.png b/docs/source/devguides/files/volttron-pick-main.png deleted file mode 100755 index 13168ba066..0000000000 Binary files a/docs/source/devguides/files/volttron-pick-main.png and /dev/null differ diff --git a/docs/source/devguides/index.rst b/docs/source/devguides/index.rst deleted file mode 100644 index 0d736ceb29..0000000000 --- a/docs/source/devguides/index.rst +++ /dev/null @@ -1,21 +0,0 @@ -.. _devguides_index: - -=================== -Developing VOLTTRON -=================== - -.. toctree:: - :glob: - :maxdepth: 1 - - agent_development/index - deployment/index - walkthroughs/index - eclipse/index - pycharm/index - scalability/index - supporting/index - roadmap/index - - - * diff --git a/docs/source/devguides/pycharm/files/00_open_pycharm.png b/docs/source/devguides/pycharm/files/00_open_pycharm.png deleted file mode 100644 index e785320dd7..0000000000 Binary files a/docs/source/devguides/pycharm/files/00_open_pycharm.png and /dev/null differ diff --git a/docs/source/devguides/pycharm/files/01_load_volttron.png b/docs/source/devguides/pycharm/files/01_load_volttron.png deleted file mode 100644 index f46d9ab762..0000000000 Binary files a/docs/source/devguides/pycharm/files/01_load_volttron.png and /dev/null differ diff --git a/docs/source/devguides/pycharm/files/02_set_project_interpreter.png b/docs/source/devguides/pycharm/files/02_set_project_interpreter.png deleted file mode 100644 index d37151ef7d..0000000000 Binary files a/docs/source/devguides/pycharm/files/02_set_project_interpreter.png and /dev/null differ diff --git a/docs/source/devguides/pycharm/files/03_run_settings.png b/docs/source/devguides/pycharm/files/03_run_settings.png deleted file mode 100644 index ad87c45697..0000000000 Binary files a/docs/source/devguides/pycharm/files/03_run_settings.png and /dev/null differ diff --git a/docs/source/devguides/pycharm/files/04_listener_settings.png b/docs/source/devguides/pycharm/files/04_listener_settings.png deleted file mode 100644 index 9e0659ae68..0000000000 Binary files a/docs/source/devguides/pycharm/files/04_listener_settings.png and /dev/null differ diff --git a/docs/source/devguides/pycharm/files/05_run_listener.png b/docs/source/devguides/pycharm/files/05_run_listener.png deleted file mode 100644 index dc007e0ef2..0000000000 Binary files a/docs/source/devguides/pycharm/files/05_run_listener.png and /dev/null differ diff --git a/docs/source/devguides/pycharm/files/06_run_tests.png b/docs/source/devguides/pycharm/files/06_run_tests.png deleted file mode 100644 index 63f31d1106..0000000000 Binary files a/docs/source/devguides/pycharm/files/06_run_tests.png and /dev/null differ diff --git a/docs/source/devguides/roadmap/3.0-Drivers.rst b/docs/source/devguides/roadmap/3.0-Drivers.rst deleted file mode 100644 index 6384778a21..0000000000 --- a/docs/source/devguides/roadmap/3.0-Drivers.rst +++ /dev/null @@ -1,59 +0,0 @@ -3.X drivers -=========== - -Changes from v2.X ------------------ - -- PNNL Point Name is now: Volttron Point Name -- Drivers are now agents -- No more smap config file, now it is an Agent config file. -- MODBUS, add port argument to driver\_config dictionary -- BACnet Change of Value services are supported by the Master - Driver Agent starting with version 3.2. -- Agent config file has links to driver config files which have links - to driver register file. - -Edit the master driver config. This points to the configuration files -for specific drivers. Each of these drivers uses a CSV file to specify -their points (registry file). - -Master Driver Config --------------------- - -- agentid - name of agent -- driver\_config\_list - list of configuration files for drivers under - this master - - | { - | "agentid": "master\_driver", - | "driver\_config\_list": [ - | "/home/user/git/volttron/services/core/MasterDriverAgent/master\_driver/test\_modbus1.config" - | ] - | } - -Device Driver Config --------------------- - -- driver\_config - driver specific information, modbus just needs the - ip for the device being controlled -- campus/building/unit - path to the device -- driver\_type - specify the type of driver (modbus, bacnet, custom) -- registry\_config - the registry file specifying points to collect -- interval - how often to grab/publish data -- timezone - TZ of data being collected -- heart\_beat\_point - registry point to use as a hearbeat to indicate - that VOLTTRON is still controlling device - - | { - | "driver\_config": {"device\_address": "", - | "proxy\_address": "9f18c8d7-ec4b-4674-ad49-e7d0d3328f99"}, - | "campus": "campus", - | "building": "building", - | "unit": "bacnet1", - | "driver\_type": "bacnet", - | "registry\_config":"/home/user/git/volttron/volttron/drivers/bacnet\_lab.csv", - | "interval": 5, - | "timezone": "UTC" - | } - - diff --git a/docs/source/devguides/roadmap/3.0-to-3.5-Migration.rst b/docs/source/devguides/roadmap/3.0-to-3.5-Migration.rst deleted file mode 100644 index 0a918ac940..0000000000 --- a/docs/source/devguides/roadmap/3.0-to-3.5-Migration.rst +++ /dev/null @@ -1,48 +0,0 @@ -Migration from 3.0 to 3.5 -~~~~~~~~~~~~~~~~~~~~~~~~~ - -Drivers -======= - -The BACnet driver configurations now require device ids. - -3.0 configurations had the line: - -:: - - "driver_config": {"device_address": address}, - -3.5 configs needs the following addition to the the *driver_config* dictionary: - -:: - - "driver_config": {"device_address": address, - "device_id": id}, - - -Historian -========= - -The 3.5 MySQL historian will try adding rows to a metadata table but will -not create the table automatically. It can be added to the database with - -.. code-block:: sql - - CREATE TABLE meta(topic_id INTEGER NOT NULL, - metadata TEXT NOT NULL, - PRIMARY KEY(topic_id)); - - -ActuatorAgent -============= - -The Heartbeat agent has been removed in version 3.5, its job now being -done from within the actuator. The period of the heartbeat toggle function -can be set by adding - -:: - - "heartbeat_period": 20 - -to the actuator's config file. This period defaults to 60 seconds if it -is not specified. diff --git a/docs/source/devguides/roadmap/4.1-5.0_Migration.rst b/docs/source/devguides/roadmap/4.1-5.0_Migration.rst deleted file mode 100644 index fa085119d6..0000000000 --- a/docs/source/devguides/roadmap/4.1-5.0_Migration.rst +++ /dev/null @@ -1,114 +0,0 @@ -.. _4.1_to_5.0: - -Migration from 4.1 to 5.0 -========================= - -5.0 includes numerous changes (Tagging Service, Message Bus performance increase, Multi-platform pub/sub, etc.), but -the majority of these should be invisible to most users. - -Key issues to note are: - - -Operations Agents ------------------ - -Several agents have been moved from "services/core" to "services/ops" to highlight their use in monitoring a -deployment. They are not necessary when developing against a single instance, but are essential for VOLTTRON(tm) in a -deployed environment. - -Agents affected: - -- services/ops/AgentWatcher -- services/ops/AlertAgent,0.4 -- services/ops/AlertMonitor -- services/ops/Alerter -- services/ops/EmailerAgent -- services/ops/FailoverAgent -- services/ops/FileWatchPublisher -- services/ops/LogStatisticsAgent -- services/ops/MessageDebuggerAgent -- services/ops/SysMonAgent -- services/ops/ThresholdDetectionAgent - - -Rebuild Agents --------------- - -Rebuilding agents is :underline:`required` when upgrading to a new VOLTTRON(tm) version to ensure that agents are -operating with the latest code. Errors will occur if agents built in a previous version attempt to run with the -latest version of the platform. - -ForwardHistorian ----------------- -The ForwardHistorian configuration has been changed. -Please see: https://github.com/VOLTTRON/volttron/blob/develop/services/core/ForwardHistorian/README.rst for the -new options. - -.. note:: NOTE If you have no entry for service_topic_list in your configuration, the new default will cause - ALL data to be forwarded. Please update your configuration if you are forwarding a subset of data. - - -VOLTTRON Central Management UI ------------------------------- - -The url for VOLTTRON Central Management is now http://IP:port/vc/index.html - -Agent Versions --------------- - -To get the versions of agents in the VOLTTRON project, run "python scripts/get_versions.py". - - -========================== ====== ======= -Agent Name 4.1 5.0 -========================== ====== ======= -CAgent 1.0 1.0 -CSVHistorian N/A 1.0.1 -ConfigActuation 0.1 0.1 -DataPublisher 3.0.1 3.0.1 -DataPuller N/A 3.5 -ExampleDrivenControlAgent 0.1 0.1 -ExampleSubscriber 3.0 3.0 -ListenerAgent 3.2 3.2 -ProcessAgent 0.1 0.1 -SchedulerExample 0.1 0.1 -SimpleForwarder 3.0 3.0 -SimpleWebAgent 0.1 0.1 -WeatherForecastCSV_UW -WebRPC -WebSocketAgent 0.0.1 0.0.1 -PrometheusScrapeAgent N/A 0.0.1 -WeatherAgent -ActuatorAgent 1.0 1.0 -BACnetProxy 0.2 0.3 -CrateHistorian 1.0.1 1.0.2 -DataMover 0.1 0.1 -ExternalData 1.0 1.0 -ForwardHistorian 3.7 4.0 -MQTTHistorian 0.1 0.2 -MasterDriverAgent 3.1.1 3.1.1 -MongodbAggregateHistorian 1.0 1.0 -MongodbHistorian 2.1 2.1 -MongodbTaggingService N/A 1.0 -OpenEISHistorian 3.1 3.1 -SEP2Agent N/A 1.0 -SEP2DriverTestAgent N/A 1.0 -SQLAggregateHistorian 1.0 1.0 -SQLHistorian 3.6.1 3.6.1 -SQLiteTaggingService N/A 1.0 -VolttronCentral 4.0.3 4.2 -VolttronCentralPlatform 4.0 4.5.2 -WeatherAgent 3.0 3.0 -AgentWatcher 0.1 0.1 -AlertAgent 0.4 0.4 -AlertMonitor 0.1 0.1 -Alerter 0.1 0.1 -EmailerAgent 1.3 1.3.1 -FailoverAgent 0.2 0.2 -FileWatchPublisher 3.6 3.6 -LogStatisticsAgent 1.0 1.0 -MessageDebuggerAgent N/A 1.0 -SysMonAgent 3.6 3.6 -ThresholdDetectionAgent 3.7 3.7 -========================== ====== ======= - diff --git a/docs/source/devguides/roadmap/index.rst b/docs/source/devguides/roadmap/index.rst deleted file mode 100644 index 7215a50ad3..0000000000 --- a/docs/source/devguides/roadmap/index.rst +++ /dev/null @@ -1,14 +0,0 @@ -.. _Roadmap: - -=============================== -Development History and Roadmap -=============================== - -For information on updating to the latest version of the platform see :ref:`4.0 to 5.0 migration <4.1_to_5.0>`. - - -.. toctree:: - :glob: - :maxdepth: 2 - - * diff --git a/docs/source/devguides/scalability/index.rst b/docs/source/devguides/scalability/index.rst deleted file mode 100644 index ed09005d88..0000000000 --- a/docs/source/devguides/scalability/index.rst +++ /dev/null @@ -1,9 +0,0 @@ -======================= -Scalability Experiments -======================= - -.. toctree:: - :glob: - :maxdepth: 2 - - * diff --git a/docs/source/devguides/supporting/JupyterNotebooks.rst b/docs/source/devguides/supporting/JupyterNotebooks.rst deleted file mode 100644 index f3005cc120..0000000000 --- a/docs/source/devguides/supporting/JupyterNotebooks.rst +++ /dev/null @@ -1,210 +0,0 @@ -.. _Jupyter-Notebooks: - -Jupyter Notebooks -================= - -Jupyter is an open-source web application that lets you create and share “notebook” documents. -A notebook displays formatted text along with live code that can be executed from the browser, -displaying the execution output and preserving it in the document. -Notebooks that execute Python code used to be called `iPython Notebooks`. -The iPython Notebook project has now merged into Project Jupyter. - -Using Jupyter to Manage a Set of VOLTTRON Servers -------------------------------------------------- - -The following Jupyter notebooks for VOLTTRON have been provided as examples: - - - **Collector notebooks**. Each Collector notebook sets up a particular type of device driver - and forwards device data to another VOLTTRON instance, the Aggregator. - - - **SimulationCollector notebook**. This notebook sets up a group of Simulation device drivers - and forwards device data to another VOLTTRON instance, the Aggregator. - - **BacnetCollector notebook**. This notebook sets up a Bacnet (or Bacnet gateway) device driver - and forwards device data to another VOLTTRON instance, the Aggregator. - - **ChargePointCollector notebook**. This notebook sets up a ChargePoint device driver - and forwards device data to another VOLTTRON instance, the Aggregator. - - **SEP2Collector notebook**. This notebook sets up a SEP2.0 (IEEE 2030.5) device driver - and forwards device data to another VOLTTRON instance, the Aggregator. - The Smart Energy Profile 2.0 ("SEP2") protocol implements IEEE 2030.5, and is capable - of connecting a wide array of smart energy devices to the Smart Grid. The standard is - designed to run over TCP/IP and is physical layer agnostic. - - - **Aggregator notebook**. This notebook sets up and executes aggregation of forwarded data - from other VOLTTRON instances, using a historian to record the data. - - **Observer notebook**. This notebook sets up and executes a DataPuller that captures data from - another VOLTTRON instance, using a Historian to record the data. It also uses the - Message Debugger agent to monitor messages flowing across the VOLTTRON bus. - -Each notebook configures and runs a set of VOLTTRON Agents. When used as a set, they -implement a multiple-VOLTTRON-instance architecture that catures remote device data, aggregates it, -and reports on it, routing the data as follows: - -.. image:: files/jupyter_notebooks.jpg - - -Install VOLTTRON and Jupyter on a Server ----------------------------------------- - -The remainder of this guide describes how to set up a host for VOLTTRON and Jupyter. -Use this setup process on a server in order to prepare it to run Jupyter notebook for VOLLTTRON. - -**Set Up the Server and Install VOLTTRON** - -The following is a complete, but terse, description of the steps for installing -and running VOLTTRON on a server. For more detailed, general instructions, -see :ref:`Installing Volttron `. - -The VOLTTRON server should run on the same host as the Jupyter server. - -*Load third-party software:* -:: - - $ sudo apt-get update - $ sudo apt-get install build-essential python-dev openssl libssl-dev libevent-dev git - $ sudo apt-get install sqlite3 - -*Clone the VOLTTRON repository from github:* -:: - - $ cd ~ - $ mkdir repos - $ cd repos - $ git clone https://github.com/VOLTTRON/volttron/ - -*Check out the develop (or master) branch and bootstrap the development environment:* -:: - - $ cd volttron - $ git checkout develop - $ python bootstrap.py - -*Activate and initialize the VOLTTRON virtual environment:* - -Run the following each time you open a new command-line shell on the server: -:: - - $ export VOLTTRON_ROOT=~/repos/volttron - $ export VOLTTRON_HOME=~/.volttron - $ cd $VOLTTRON_ROOT - $ source env/bin/activate - -**Install Extra Libraries** - -*Add Python libraries to the VOLTTRON virtual environment:* - -These notebooks use third-party software that's not included in VOLTTRON's standard distribution -that was loaded by ``bootstrap.py``. The following additional packages are required: - -- Jupyter -- SQLAlchemy (for the Message Debugger) -- Suds (for the ChargePoint driver) -- Numpy and MatPlotLib (for plotted output) - -*Note*: A Jupyter installation also installs and/or upgrades many dependent libraries. -Doing so could disrupt other work on the OS, so it’s safest to load Jupyter (and any other -library code) in a virtual environment. VOLTTRON runs in a virtual environment anyway, -so if you're using Jupyter in conjunction with VOLTTRON, it should be installed in your -VOLTTRON virtual environment. -(In other words, be sure to use ``cd $VOLTTRON_ROOT`` and ``source env/bin/activate`` -to activate the virtual environment before running ``pip install``.) - -*Install the third-party software:* -:: - - $ pip install SQLAlchemy==1.1.4 - $ pip install suds-jurko==0.6 - $ pip install numpy - $ pip install matplotlib - $ pip install jupyter - -Note: If ``pip install`` fails due to an untrusted cert, try using this command instead: -:: - - $ pip install --trusted-host pypi.python.org - -(An InsecurePlatformWarning may be displayed, but it typically won't stop the installation from proceeding.) - -**Configure VOLTTRON** - -Use the ``vcfg`` wizard to configure the VOLTTRON instance. By default, the wizard -configures a VOLTTRON instance that communicates with agents only on the local host (ip 127.0.0.1). -This set of notebooks manages communications among multiple VOLTTRON instances on different hosts. -To enable this cross-host communication on VOLTTRON's web server, replace 127.0.0.1 with the -host's IP address, as follows: -:: - - $ vcfg - -- Accept all defaults, except as follows. -- If a prompt defaults to 127.0.0.1 as an IP address, substitute the ``host's IP address`` (this may happen multiple times). -- When asked whether this is a volttron central, answer ``Y``. -- When prompted for a username and password, use ``admin`` and ``admin``. - -**Start VOLTTRON** - -Start the main VOLTTRON process, logging to $VOLTTRON_ROOT/volttron.log: -:: - - $ volttron -vv -l volttron.log --msgdebug - -This runs VOLTTRON as a foreground process. To run it in the background, use: -:: - - $ ./start-volttron --msgdebug - -This also enables the Message Debugger, a non-production VOLTTRON debugging aid -that's used by some notebooks. To run with the Message Debugger disabled (VOLTTRON's normal state), -omit the ``--msgdebug`` flag. - -Now that VOLTTRON is running, it's ready for agent configuration and execution. -Each Jupyter notebook contains detailed instructions and executable code for doing that. - -**Configure Jupyter** - -More detailed information about installing, configuring and using Jupyter Notebooks is available -on the Project Jupyter site, http://jupyter.org/. - -*Create a Jupyter configuration file:* -:: - - $ jupyter notebook --generate-config - -*Revise the Jupyter configuration:* - -Open ``~/.jupyter/jupyter_notebook_config.py`` in your favorite text editor. -Change the configuration to accept connections from any IP address (not just from localhost) -and use a specific, non-default port number: - -- Un-comment ``c.NotebookApp.ip`` and set it to: ``'*'`` instead of ``'localhost'`` -- Un-comment ``c.NotebookApp.port`` and set it to: ``'8891'`` instead of ``'8888'`` - -Save the config file. - -*Open ports for TCP connections:* - -Make sure that your Jupyter server host's security rules allow inbound TCP connections on port ``8891``. - -If the VOLTTRON instance needs to receive TCP requests, for example ForwardHistorian or DataPuller -messages from other VOLTTRON instances, make sure that the host's security rules also allow inbound TCP -communications on VOLTTRON's port, which is usually ``22916``. - -**Launch Jupyter** - -*Start the Jupyter server:* - -In a separate command-line shell, set up VOLTTRON's environment variables and virtual environment, -and then launch the Jupyter server: -:: - - $ export VOLTTRON_HOME=(your volttron home directory, e.g. ~/.volttron) - $ export VOLTTRON_ROOT=(where volttron was installed; e.g. ~/repos/volttron) - $ cd $VOLTTRON_ROOT - $ source env/bin/activate - $ cd examples/JupyterNotebooks - $ jupyter notebook --no-browser - -*Open a Jupyter client in a web browser:* - -Look up the host's IP address (e.g., using ifconfig). Open a web browser and navigate to -the URL that was displayed when you started jupyter, replacing ``localhost`` with that -IP address. A Jupyter web page should display, listing your notebooks. diff --git a/docs/source/devguides/supporting/Python-for-Matlab-Users.rst b/docs/source/devguides/supporting/Python-for-Matlab-Users.rst deleted file mode 100644 index 78af2f0c41..0000000000 --- a/docs/source/devguides/supporting/Python-for-Matlab-Users.rst +++ /dev/null @@ -1,170 +0,0 @@ -.. _Python-for-Matlab-Users: - -Python for Matlab Users -======================= - -Matlab is a popular, proprietary programming language and tool suite with built -in support for matrix operations and graphically plotting computation results. -The purpose of this document is to introduce Python to those already familiar -Matlab so it will be easier for them to develop tools and agents in VOLTTRON. - -A Simple Function ------------------ - -Python and Matlab are similar in many respects, syntactically and semantically. -With the addition of the NumPy library in Python, almost all numerical -operations in Matlab can be emulated or directly translated. Here are functions -in each language that perform the same operation: - -.. code-block:: matlab - - % Matlab - function [result] = times_two(number) - result = number * 2; - end - -.. code-block:: python - - # Python - def times_two(number): - result = number * 2 - return result - -Some notes about the previous functions: - -#. Values are explicitly returned with the `return` statement. It is possible - to return multiple values, as in Matlab, but doing this without a good reason - can lead to overcomplicated functions. - -#. Semicolons are not used to end statements in python, and white space is - significant. After a block is started (if, for, while, functions, classes) - subsequent lines should be indented with four spaces. The block ends when the - programmer stops adding the extra level of indentation. - -Translating ------------ - -The following may be helpful if you already have a Matlab file or function -that will be translated into Python. Many of the syntactic differences -between Matlab and Python can be rectified with your text editor's find and -replace feature. - -Start by copying all of your Matlab code into a new file with a `.py` -extension. I recommend commenting everything out and uncommenting the -Matlab code in chunks. This way you can write valid Python and verify -it as you translate, instead of waiting till the whole file is "translated". -Editors designed to work with Python should be able to highlight syntax errors -for you as well. - -#. Comments are created with a `%`. Find and replace these with `#`. - -#. Change `elseif` blocks to `elif` blocks. - -#. Python indexes start at zero instead of one. Array slices - and range operations, however, don't include the upper bound, so only the - lower bound should decrease by one. - -#. Semicolons in Matlab are used to suppress output at the - end of lines and for organizing array literals. After arranging the arrays - into nested lists, all semicolons can be removed. - -#. The `end` keyword in Matlab is used both to access the last element - in an array and to close blocks. The array use case can be replaced with `-1` - and the others can be removed entirely. - - -A More Concrete Example ------------------------ - -In the `Building Economic Dispatch `_ -project, a sibling project to VOLTTRON, a number of components written in Matlab -would create a matrix out of some collection of columns and perform least -squares regression using the `matrix division` operator. This is straightforward -and very similar in both languages so long as all of the columns are defined and -are the same length. - -.. code-block:: matlab - - % Matlab - XX = [U, xbp, xbp2, xbp3, xbp4, xbp5]; - AA = XX \ ybp; - -.. code-block:: python - - # Python - import numpy as np - - XX = np.column_stack((U, xbp, xbp2, xbp3, xbp4, xbp5)) - AA, resid, rank, s = np.linalg.lstsq(XX, ybp) - -This pattern also included the creation of the `U` column, a column of -ones used as the bias term in the linear equation. In order to make the Python -version more readable and more robust, the pattern was removed from each -component and replaced with a single function call to -`least_squares_regression`. - -This function does some validation on the input -parameters, automatically creates the bias column, and returns the least squares -solution to the system. Now if we want to change how the solution is calculated -we only have to change the one function, instead of each instance where the -pattern was written originally. - -.. code-block:: python - - def least_squares_regression(inputs=None, output=None): - if inputs is None: - raise ValueError("At least one input column is required") - if output is None: - raise ValueError("Output column is required") - - if type(inputs) != tuple: - inputs = (inputs,) - - ones = np.ones(len(inputs[0])) - x_columns = np.column_stack((ones,) + inputs) - - solution, resid, rank, s = np.linalg.lstsq(x_columns, output) - return solution - -Lessons Learned (sometimes the hard way) ----------------------------------------- - -Variable Names -~~~~~~~~~~~~~~ - -Use descriptive function and variable names whenever possible. The most -important things to consider here are reader comprehension and searching. -Consider a variable called `hdr`. Is it `header` without any vowels, or is it -short for `high-dynamic-range`? Spelling out full words in variable names can -save someone else a lot of guesswork. - -Searching comes in when we're looking for instances of a string or variable. -Single letter variable names are impossible to search for. Variables with two -or three characters are often not much better. - -Matlab load/save -~~~~~~~~~~~~~~~~ - -Matlab has built-in functions to automatically save and load variables from your -programs to disk. Using these functions can lead to poor program design and -should be avoided if possible. It would be best to refactor as you translate if -they are being used. Few operations are so expensive that that cannot be -redone every time the program is run. For part of the program that saves -variables, consider making a function that simply returns them instead. - -If your Matlab program is loading csv files then use the Pandas library when -working in python. Pandas works well with NumPy and is the go-to library when -using csv files that contain numeric data. - -More Resources --------------- - -`NumPy for Matlab Users -`_ -Has a nice list of common operations in Matlab and NumPy. - -`NumPy Homepage -`_ - -`Pandas Homepage -`_ diff --git a/docs/source/devguides/supporting/applications/index.rst b/docs/source/devguides/supporting/applications/index.rst deleted file mode 100755 index 83e4d7f302..0000000000 --- a/docs/source/devguides/supporting/applications/index.rst +++ /dev/null @@ -1,14 +0,0 @@ -============ -Applications -============ -These resources summarize the use of the sample applications that are pre-packaged with VOLTTRON. For detailed -information on these applications, refer to the report Transactional Network Platform: Applications available -at http://www.pnl.gov/main/publications/external/technical_reports/PNNL-22941.pdf. - -Note, as of VOLTTRON 4.0, applications are now in their own repository at: https://github.com/VOLTTRON/volttron-applications - -.. toctree:: - :glob: - :maxdepth: 2 - - * diff --git a/docs/source/devguides/supporting/examples/CAgent.rst b/docs/source/devguides/supporting/examples/CAgent.rst deleted file mode 100644 index 578716a16b..0000000000 --- a/docs/source/devguides/supporting/examples/CAgent.rst +++ /dev/null @@ -1,31 +0,0 @@ -.. _CAgent: - -CAgent -====== - -The C Agent uses the ctypes module to load a shared object into memory -so its functions can be called from python. - -There are two versions of the C Agent. The first is a standard agent that can -be installed with the make agent script. The other is a driver interface for -the master driver. - -Building the Shared Object --------------------------- - -The shared object library must be built before installing C Agent examples. -Running *make* in the C Agent source directory will compile the provided C code -using the position independent flag; a requirement for creating shared objects. - -Files created by make can be removed by running *make clean*. - -Agent Installation ------------------- - -After building the shared object library the standard agent can be installed -with the scripts/install-agent.py script. - -The driver interface example must be copied or moved to the master driver's -interface directory. The C Driver configuration tells the interface where to -find the shared object. An example is available in the C Agent's *driver* -directory. diff --git a/docs/source/devguides/supporting/examples/Example-Agents.rst b/docs/source/devguides/supporting/examples/Example-Agents.rst deleted file mode 100644 index 5f330ee570..0000000000 --- a/docs/source/devguides/supporting/examples/Example-Agents.rst +++ /dev/null @@ -1,41 +0,0 @@ -.. _Example-Agents: - -Example Agents Overview -======================= - -Some example agents are included with the platform to help explore its -features. - -- :ref:`DataPublisher` -- :ref:`ListenerAgent` -- :ref:`ProcessAgent` -- :ref:`SchedulerExampleAgent` -- :ref:`CAgent` -- :ref:`DDSAgent` -- :ref:`CSVHistorian` - -More complex agents contributed by other researchers can also be found -in the examples directory. It is recommended that developers new to -VOLTTRON understand the example agents first before diving into the -other agents. - -Example Agent Conventions -------------------------- - -Some of the example agent classes are defined inside a method, for -instance: - -:: - - def ScheduleExampleAgent(config_path, **kwargs): - config = utils.load_config(config_path) - campus= config['campus'] - -This allows configuration information to be extracted from an agent -config file for use in topics. - -:: - - @Pubsub.subscribe('pubsub', DEVICES_VALUE(campus=campus)) - def actuate(self, peer, sender, bus, topic, headers, message): - diff --git a/docs/source/devguides/supporting/examples/FakeDriver.rst b/docs/source/devguides/supporting/examples/FakeDriver.rst deleted file mode 100644 index 1777ce244b..0000000000 --- a/docs/source/devguides/supporting/examples/FakeDriver.rst +++ /dev/null @@ -1,67 +0,0 @@ -.. _FakeDriver: - -.. role:: bash(code) - :language: bash - -Fake Driver -=========== - -The FakeDriver is included as a way to quickly see data published to the message bus in a format -that mimics what a true Driver would produce. This is an extremely simple implementation of the -:ref:`VOLTTRON driver framework`. - -Here, we make a script to build and deploy the fake driver. - - -- Create a config directory (if one doesn't already exist) inside your Volttron repository: :code:`mkdir config`. All local config files will be worked on here. -- Copy over the example config file and registry config file: - -.. code-block:: bash - - `cp examples/configurations/drivers/fake.config config/` - `cp examples/configurations/drivers/fake.csv config/` - -- Edit :code:`registry_config` for the paths on your system: - -fake.config:: - - { - "driver_config": {}, - "registry_config": "config://fake.csv", - "interval": 5, - "timezone": "US/Pacific", - "heart_beat_point": "Heartbeat", - "driver_type": "fakedriver", - "publish_breadth_first_all": false, - "publish_depth_first": false, - "publish_breadth_first": false - } - -- Create a copy of the Master Driver config: - -.. code-block:: bash - - cp examples/configurations/drivers/master-driver.agent config/fake-master-driver.config - -- Add fake.csv and fake.config to the :ref:`configuration store`: - -.. code-block:: bash - - vctl config store platform.driver devices/campus/building/fake config/fake.config - vcfl config store platform.driver fake.csv config/fake.csv --csv - -- Edit fake-master-driver.config to reflect paths on your system - -fake-master-driver.config:: - - { - "driver_scrape_interval": 0.05 - } - -- Use the scripts/install-agent.py script to install the Master Driver agent: - -.. code-block:: bash - - python scripts/install-agent.py -s services/core/MasterDriverAgent -c config/fake-master-driver.config - -- If you have a :ref:`Listener Agent` already installed, you should start seeing data being published to the bus. diff --git a/docs/source/devguides/supporting/examples/MatLabAgent.rst b/docs/source/devguides/supporting/examples/MatLabAgent.rst deleted file mode 100644 index cb6a7e2b77..0000000000 --- a/docs/source/devguides/supporting/examples/MatLabAgent.rst +++ /dev/null @@ -1,610 +0,0 @@ -.. _MatlabAgent: - -MatLab Agent -============ - -The MatLab agent and Matlab standalone agent together are -example agents that allow for matlab scripts to be run in a -Windows environment and interact with the VOLTTRON platform running in a Linux environment. -The MatLab agent takes advantage of the config store to -dynamically send scripts and commandline arguments across -the message bus to one or more standalone agents in -Windows. The standalone agent then executes the requested script -and arguments, and sends back the results to the MatLab agent. - - -Overview of Matlab Agents -------------------------- - -There are multiple components that are used for the matlab agent. -This diagram is to represent the components that are connected to -the matlab agents. In this example, the scripts involved are -based on the default settings in the matlab agent. - -|matlab-agent-diagram| - -MatLabAgentV2 -~~~~~~~~~~~~~ - -MatLabAgentV2 publishes the name of a python script along with any command -line arguments that are needed for the script to the appropriate topic. -The agent then listens on another topic, and whenever anything is published -on this topic, it stores the message in the log file chosen when the volttron -instance is started. If there are multiple standalone agents, the agent can -send a a script to each of them, along with their own set of command line -arguments. In this case, each script name and set of command line arguments -should be sent to separate subtopics. This is done so that no matter how many -standalone agents are in use, MatLabAgentV2 will record all of their responses. - -.. code:: - - class MatlabAgentV2(Agent): - - def __init__(self,script_names=[], script_args=[], topics_to_matlab=[], - topics_to_volttron=None,**kwargs): - - super(MatlabAgentV2, self).__init__(**kwargs) - _log.debug("vip_identity: " + self.core.identity) - - self.script_names = script_names - self.script_args = script_args - self.topics_to_matlab = topics_to_matlab - self.topics_to_volttron = topics_to_volttron - self.default_config = {"script_names": script_names, - "script_args": script_args, - "topics_to_matlab": topics_to_matlab, - "topics_to_volttron": topics_to_volttron} - - - #Set a default configuration to ensure that self.configure is called immediately to setup - #the agent. - self.vip.config.set_default("config", self.default_config) - #Hook self.configure up to changes to the configuration file "config". - self.vip.config.subscribe(self.configure, actions=["NEW", "UPDATE"], pattern="config") - - def configure(self, config_name, action, contents): - """ - Called after the Agent has connected to the message bus. - If a configuration exists at startup this will be - called before onstart. - Is called every time the configuration in the store changes. - """ - config = self.default_config.copy() - config.update(contents) - - _log.debug("Configuring Agent") - - try: - script_names = config["script_names"] - script_args = config["script_args"] - topics_to_matlab = config["topics_to_matlab"] - topics_to_volttron = config["topics_to_volttron"] - - except ValueError as e: - _log.error("ERROR PROCESSING CONFIGURATION: {}".format(e)) - return - - self.script_names = script_names - self.script_args = script_args - self.topics_to_matlab = topics_to_matlab - self.topics_to_volttron = topics_to_volttron - self._create_subscriptions(self.topics_to_volttron) - - for script in range(len(self.script_names)): - cmd_args = "" - for x in range(len(self.script_args[script])): - cmd_args += ",{}".format(self.script_args[script][x]) - _log.debug("Publishing on: {}".format(self.topics_to_matlab[script])) - self.vip.pubsub.publish('pubsub', topic=self.topics_to_matlab[script], - message="{}{}".format(self.script_names[script],cmd_args)) - _log.debug("Sending message: {}{}".format(self.script_names[script],cmd_args)) - - _log.debug("Agent Configured!") - -For this example, the agent is publishing to the matlab/to_matlab/1 topic, -and is listening to the matlab/to_volttron topic. It is sending the script -name testScript.py with the argument 20. These are the default values found -in the agent, if no configuration is loaded. - -.. code:: - - script_names = config.get('script_names', ["testScript.py"]) - script_args = config.get('script_args', [["20"]]) - topics_to_matlab = config.get('topics_to_matlab', ["matlab/to_matlab/1"]) - topics_to_volttron = config.get('topics_to_volttron', "matlab/to_volttron/") - -StandAloneMatLab.py -~~~~~~~~~~~~~~~~~~~ - -The StandAloneMatLab.py script is a standalone agent designed to be able to -run in a windows environment. Its purpose is to listen to a topic, and when -something is published to this topic, it takes the message, and sends it to -the script_runner function in scriptwrapper.py. This function processes the -inputs, and then the output is published to another topic. - -.. code:: - - class StandAloneMatLab(Agent): - '''The standalone version of the MatLab Agent''' - - @PubSub.subscribe('pubsub', _topics['volttron_to_matlab']) - def print_message(self, peer, sender, bus, topic, headers, message): - print('The Message is: ' + str(message)) - messageOut = script_runner(message) - self.vip.pubsub.publish('pubsub', _topics['matlab_to_volttron'], message=messageOut) - -settings.py -~~~~~~~~~~~ - -The topic to listen to and the topic to publish to are defined in settings.py, -along with the information needed to connect the standalone agent to the primary -volttron instance. These should be the same topics that the MatLabAgentV2 is -publishing and listening to, so that the communication can be successful. To -connect the standalone agent to the primary volttron instance, the ip address -and port of the instance are needed, along with the server key. - -.. code:: - - _topics = { - 'volttron_to_matlab': 'matlab/to_matlab/1', - 'matlab_to_volttron': 'matlab/to_volttron/1' - } - - # The parameters dictionary is used to populate the agent's - # remote vip address. - _params = { - # The root of the address. - # Note: - # 1. volttron instance should be configured to use tcp. use command vcfg - # to configure - 'vip_address': 'tcp://192.168.56.101', - 'port': 22916, - - # public and secret key for the standalone_matlab agent. - # These can be created using the command: volttron-ctl auth keypair - # public key should also be added to the volttron instance auth - # configuration to enable standalone agent access to volttron instance. Use - # command 'vctl auth add' Provide this agent's public key when prompted - # for credential. - - 'agent_public': 'dpu13XKPvGB3XJNVUusCNn2U0kIWcuyDIP5J8mAgBQ0', - 'agent_secret': 'Hlya-6BvfUot5USdeDHZ8eksDkWgEEHABs1SELmQhMs', - - # Public server key from the remote platform. This can be - # obtained using the command: - # volttron-ctl auth serverkey - 'server_key': 'QTIzrRGQ0-b-37AbEYDuMA0l2ETrythM2V1ac0v9CTA' - - } - - def remote_url(): - return "{vip_address}:{port}?serverkey={server_key}" \ - "&publickey={agent_public}&" \ - "secretkey={agent_secret}".format(**_params) - -The primary volttron instance will then need to add the public key from the -standalone agent. In this example, the topic that the standalone agent is -listening to is matlab/to_matlab/1, and the topic it is publishing to is matlab/to_volttron/1. - -scriptwrapper.py -~~~~~~~~~~~~~~~~ - -Scriptwrapper.py contains the script_runner function. The purpose of -this function is to take in a string that contains a python script -and command line arguments separated by commas. This string is parsed -and passed to the system arguments, which allows the script sent to -the function to use the command line arguments. The function then -redirects standard output to a StringIO file object, and then attempts -to execute the script. If there are any errors with the script, the -error that is generated is returned to the standalone agent. Otherwise, -the file object stores the output from the script, is converted to a string, -and is sent to the standalone agent. -In this example, the script that is to be run is testScript.py. - -.. code:: - - #Script to take in a string, run the program, - #and output the results of the command as a string. - - import time - import sys - from io import StringIO - - - def script_runner(message): - original = sys.stdout - # print(message) - # print(sys.argv) - sys.argv = message.split(',') - # print(sys.argv) - - try: - out = StringIO() - sys.stdout = out - exec(open(sys.argv[0]).read()) - sys.stdout = original - return out.getvalue() - except Exception as ex: - out = str(ex) - sys.stdout = original - return out - -.. note:: - - The script that is to be run needs to be in the same folder as the agent - and the scriptwrapper.py script. The script_runner function needs to be edited - if it is going to call a script at a different location. - - -testScript.py -~~~~~~~~~~~~~ - -This is a very simple test script designed to demonstrate the -calling of a matlab function from within python. First it initializes -the matlab engine for python. It then takes in a single command line -argument, and passes it to the matlab function testPy.m. If no -arguments are sent, it will send 0 to the testPy.m function. It then -prints the result of the testPy.m function. In this case, since -standard output is being redirected to a file object, this is -how the result is passed from this function to the standalone agent. - -.. code:: - - import matlab.engine - import sys - - - eng = matlab.engine.start_matlab() - - if len(sys.argv) == 2: - result = eng.testPy(float(sys.argv[1])) - else: - result = eng.testPy(0.0) - - print(result) - -testPy.m -~~~~~~~~ - -This matlab function is a very simple example, designed to show a -function that takes an argument, and produces an array as the output. -The input argument is added to each element in the array, and the -entire array is then returned. - -.. code:: - - function out = testPy(z) - x = 1:100 - out = x + z - end - -Setup on Linux --------------- - -1. Setup and run Volttron from develop branch using instructions :ref:`here `. - -2. Configure volttron instance using the ``vcfg`` command. - When prompted for the vip address use tcp://. - This is necessary to enable volttron communication with external processes. - - .. note:: - - If you are running VOLTTRON from within VirtualBox, It would be good to set - one of your adapters as a Host-only adapter. This can be done within the - VM's settings, under the Network section. Once this is done, use this IP - for the vip address. - - -.. _MatlabAgent_config: - -3. Update the configuration for MatLabAgent_v2 at /example/MatLabAgent_v2/config. - - The configuration file for the MatLab agent has four variables. - - 1. script_names - - 2. script_args - - 3. topics_to_matlab - - 4. topics_to_volttron - - An example config file included with the folder. - - .. code:: - - { - # VOLTTRON config files are JSON with support for python style comments. - "script_names": ["testScript.py"], - "script_args": [["20"]], - "topics_to_matlab": ["matlab/to_matlab/1"], - "topics_to_volttron": "matlab/to_volttron/" - } - - To edit the configuration, the format should be as follows: - - .. code:: - - { - "script_names": ["script1.py", "script2.py", ...], - "script_args": [["arg1","arg2"], ["arg1"], ...], - "topics_to_matlab": ["matlab/to_matlab/1", "matlab/to_matlab/2", ...], - "topics_to_volttron": "matlab/to_volttron/" - } - - The config requires that each script name lines up with a set of - commandline arguments and a topic. So a commandline argument - must be included, even if it is not used. The placement of - brackets are important, even when only communicating with one - standalone agent. - - For example, if only one standalone agent is used, and no command line - arguments are in place, the config file may look like this. - - .. code:: - - { - "script_names": ["testScript.py"], - "script_args": [["0"]], - "topics_to_matlab": ["matlab/to_matlab/1"], - "topics_to_volttron": "matlab/to_volttron/" - } - - -4. Install MatLabAgent_v2 and start agent (from volttron root directory) - - ``python ./scripts/install-agent.py -s examples/MatLabAgent_v2 --start`` - - .. note:: - - The MatLabAgent_v2 pulishes the command to be run to the message bus only on start or on a - configuration update. Once we configure the standalone_matlab agent on the windows machine, we will - send a configuration update to the running MatLabAgent_v2. The configuration would contain the topics to - which the standalone agent is listening to and will be publishing result to. - - .. seealso:: - - The MatLab agent uses the configuration store to dynamically change inputs. - More information on the config store and how it used can be found here. - - * :ref:`VOLTTRON Configuration Store ` - - * :ref:`Agent Configuration Store ` - - * :ref:`Agent Configuration Store Interface ` - -5. Run the below command and make a note of the server key. This is required for configuring the stand alone agent - on windows. (This is run on the linux machine) - - ``vctl auth serverkey`` - - -Setup on Windows ----------------- - -Install pre-requisites -~~~~~~~~~~~~~~~~~~~~~~~ - -1. Install python 3.6 64-bit from `here `__. - -2. Install MatLab engine from `here `_. - - .. warning:: - - The MatLab engine for Python only supports certain version of Python - depending on the version of MatLab used. - Please check `here `__ to see - if the current version of MatLab supports your version of Python. - - -.. note:: - - At this time, you may want to verify that you are able to communicate with - your Linux machine across your network. The simplest method would be to open - up the command terminal and use ``ping ``, and ``telnet - `` Please make sure that the port is opened for outside access. - -Install StandAloneMatLab Agent -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -The standalone MatLab agent is designed to be usable in a -Windows environment. - -.. warning:: - - VOLTTRON is not designed to run in a Windows environment. - Outside of cases where it is stated to be usable in a - Windows environment, it should be assumed that it will - NOT function as expected. - - -1. Download VOLTTRON - - Download the VOLTTRON develop repository from github. Download the zip - from `GitHub `_. - - |github-image| - - |github-zip-image| - - Once the zipped file has been downloaded, go to your Downloads folder, - right-click on the file, and select "Extract All..." - - |extract-image_1| - - Choose a location for the extracted folder, and select "Extract" - - |extract-image_2| - - -2. Setup the PYTHONPATH - - Open the Windows explorer, and navigate to "Edit environment variables for your account." - - |cmd-image| - - Select "New" - - |env-vars-image_1| - - For "Variable name" enter: "PYTHONPATH" - For "Variable value" either browse to your volttron installation, or enter in the path to your volttron installation. - - |env-vars-image_2| - - Select "OK" twice. - -3. Set python version in MatLab - - Open your MatLab application. Run the command **pyversion** This should print the path to python2.7. If you have - multiple versions of python on your machine and pyversion points to a different version of python, - use **pyversion /path/to/python.exe** to set the appropriate version of python for your system. - For example, to use python 3.6 with MatLab: - - .. code:: - - pyversion C:\Python36\python.exe - -4. Set up the environment. - - Open up the command prompt. - - |cmd-image_2| - - Naviage to your volttron installation. - - ``cd \Your\directory\path\to\volttron-develop`` - - Use pip to install and setup dependencies. - - ``pip install -r examples\StandAloneMatLab\requirements.txt`` - - ``pip install -e .`` - - .. note:: - - If you get the error doing the second step because of an already installed volttron - from a different directory, manually delete the volttron-egg.link file from your - \\Lib\\site-pacakages directory (for example, - del C:\\Python27\\lib\\site-packages\\volttron-egg.link ) - and re-run the second command - -5. Configure the agent - - The configuration settings for the standalone agent are in setting.py (located in volttron-develop\\examples\\StandAloneMatLab\\) - - **settings.py** - - * 'volttron_to_matlab' needs to be set to the topic that will send your script - and command line arguments to your stand alone agent. This was defined in :ref:`config. ` - - * 'matlab_to_volttron' needs to be set to the topic that will send your script's - output back to your volttron platform. This was defined in :ref:`config. ` - - * 'vip_address' needs to be set to the address of your volttron instance - - * 'port' needs to be set to the port of your volttron instance - - * 'server_key' needs to be set to the public server key of your primary volttron platform. - This can be obtained from the primary volttron platform using ``vctl auth serverkey``. - (volttron must be running to use this command) - - - It is possible to have multiple standalone agents running. In this case, - copy the StandAloneMatLab folder, and make the necessary changes to the - new settings.py file. Unless it is connecting to a separate volttron instance, - you should only need to change the volttron_to_matlab. - - .. note:: - - It is recommended that you generate a new agent_public and agent_private - key for your standalone agent. This can be done using the ``vctl auth keypair`` - command on your primary volttron platform on Linux. If you plan to use multiple standalone agents, - they will each need their own keypair. - -6. Add standalone agent key to volttron platform - - * Copy the public key from settings.py in the StandAloneMatLab folder. - - * While the primary volttron platform is running on the linux machine, - add the agent public key using the vctl auth command on the linux machine. This will make volttron platform - allow connections from the standalone agent - - .. code:: - - vctl auth add --credentials - -7. Run standalone agent - - - At this point, the agent is ready to run. To use the agent, navigate to the - example folder and use python to start the agent. The agent will then wait for - a message to be published to the selected topic by the MatLab agent. - - ``cd examples\StandAloneMatLab\`` - - ``python standalone_matlab.py`` - - Your output should be similar to this: - - .. code:: - - 2019-08-01 10:42:47,592 volttron.platform.vip.agent.core DEBUG: identity: standalone_matlab - 2019-08-01 10:42:47,592 volttron.platform.vip.agent.core DEBUG: agent_uuid: None - 2019-08-01 10:42:47,594 volttron.platform.vip.agent.core DEBUG: serverkey: None - 2019-08-01 10:42:47,596 volttron.platform.vip.agent.core DEBUG: AGENT RUNNING on ZMQ Core standalone_matlab - 2019-08-01 10:42:47,598 volttron.platform.vip.zmq_connection DEBUG: ZMQ connection standalone_matlab - 2019-08-01 10:42:47,634 volttron.platform.vip.agent.core INFO: Connected to platform: router: ebae9efa-5e8f-49e3-95a0-2020ddff9e8a version: 1.0 identity: standalone_matlab - 2019-08-01 10:42:47,634 volttron.platform.vip.agent.core DEBUG: Running onstart methods. - - - .. note:: - - If you have python3 as your default python run the command ``python -2 standalone_matlab.py`` - -8. On the Linux machine configure the MatlabAgent to publish commands to the topic standalone agent is listening to. -To load a new configuration or to change the current configuration enter - - .. code:: - - vctl config store config - - Whenever there is a change in the configuration in the config store, or whenever - the agent starts, the matlab agent sends the configgured command to the topic configured. As long as the standalone - agent has been started and is listening to the appropriate topic, the output in the log - should look similar to this: - - .. code:: - - 2019-08-01 10:43:18,925 (matlab_agentV2agent-0.3 3539) matlab_agentV2.agent DEBUG: Configuring Agent - 2019-08-01 10:43:18,926 (matlab_agentV2agent-0.3 3539) matlab_agentV2.agent DEBUG: Publishing on: matlab/to_matlab/1 - 2019-08-01 10:43:18,926 (matlab_agentV2agent-0.3 3539) matlab_agentV2.agent DEBUG: Sending message: testScript2.py,20 - 2019-08-01 10:43:18,926 (matlab_agentV2agent-0.3 3539) matlab_agentV2.agent DEBUG: Agent Configured! - 2019-08-01 10:43:18,979 (matlab_agentV2agent-0.3 3539) matlab_agentV2.agent INFO: Agent: matlab/to_volttron/1 - Message: - '20' - - Once the matlab agent publishes the message (in the above case, "testScript2.py,20") on the windows command prompt - running the standalone agent, you should see the message that was received by the standalone agent. - - .. code:: - - 2019-08-01 10:42:47,671 volttron.platform.vip.agent.subsystems.configstore DEBUG: Processing callbacks for affected files: {} - The Message is: testScript2.py,20 - - .. note:: - - If MatLabAgent_v2 has been installed and started, and you have not started the - standalone_matlab agent, you will need to either restart the matlab_agentV2, or make - a change to the configuration in the config store to send command to the topic standalone agent is - actively listening to. - -.. |github-image| image:: files/github-image.png -.. |cmd-image| image:: files/cmd-image.png -.. |env-vars-image_1| image:: files/env-vars-image_1.png -.. |env-vars-image_2| image:: files/env-vars-image_2.png -.. |cmd-image_2| image:: files/cmd-image_2.png -.. |github-zip-image| image:: files/github-zip-image.png -.. |extract-image_1| image:: files/extract-image_1.png -.. |extract-image_2| image:: files/extract-image_2.png -.. |matlab-agent-diagram| image:: files/matlab-agent-diagram.png diff --git a/docs/source/devguides/supporting/examples/SchedulerExampleAgent.rst b/docs/source/devguides/supporting/examples/SchedulerExampleAgent.rst deleted file mode 100644 index 385f146b99..0000000000 --- a/docs/source/devguides/supporting/examples/SchedulerExampleAgent.rst +++ /dev/null @@ -1,59 +0,0 @@ -.. _SchedulerExampleAgent: - -SchedulerExampleAgent -===================== - -The SchedulerExampleAgent demonstrates how to use the scheduling feature -of the [[ActuatorAgent]] as well as how to send a command. This agent -publishes a request for a reservation on a (fake) device then takes an -action when it's scheduled time appears. The ActuatorAgent must be -running to exercise this example. - -Note: Since there is no actual device, an error is produced when the -agent attempts to take its action. - -:: - - def publish_schedule(self): - '''Periodically publish a schedule request''' - headers = { - 'AgentID': agent_id, - 'type': 'NEW_SCHEDULE', - 'requesterID': agent_id, #The name of the requesting agent. - 'taskID': agent_id + "-ExampleTask", #The desired task ID for this task. It must be unique among all other scheduled tasks. - 'priority': 'LOW', #The desired task priority, must be 'HIGH', 'LOW', or 'LOW_PREEMPT' - } - - start = str(datetime.datetime.now()) - end = str(datetime.datetime.now() + datetime.timedelta(minutes=1)) - - - msg = [ - ['campus/building/unit',start,end] - ] - self.vip.pubsub.publish( - 'pubsub', topics.ACTUATOR_SCHEDULE_REQUEST, headers, msg) - -The agent listens to schedule announcements from the actuator and then -issues a command - -:: - - @PubSub.subscribe('pubsub', topics.ACTUATOR_SCHEDULE_ANNOUNCE(campus='campus', - building='building',unit='unit')) - def actuate(self, peer, sender, bus, topic, headers, message): - print ("response:",topic,headers,message) - if headers[headers_mod.REQUESTER_ID] != agent_id: - return - '''Match the announce for our fake device with our ID - Then take an action. Note, this command will fail since there is no - actual device''' - headers = { - 'requesterID': agent_id, - } - self.vip.pubsub.publish( - 'pubsub', topics.ACTUATOR_SET(campus='campus', - building='building',unit='unit', - point='point'), - headers, 0.0) - diff --git a/docs/source/devguides/supporting/examples/index.rst b/docs/source/devguides/supporting/examples/index.rst deleted file mode 100644 index b9b734e67e..0000000000 --- a/docs/source/devguides/supporting/examples/index.rst +++ /dev/null @@ -1,9 +0,0 @@ -====== -Agents -====== - -.. toctree:: - :glob: - :maxdepth: 2 - - * diff --git a/docs/source/devguides/supporting/index.rst b/docs/source/devguides/supporting/index.rst deleted file mode 100644 index da273cdd11..0000000000 --- a/docs/source/devguides/supporting/index.rst +++ /dev/null @@ -1,13 +0,0 @@ -============================ -Examples/Samples -============================ - -.. toctree:: - :glob: - :maxdepth: 2 - - examples/index - utilities/index - applications/index - JupyterNotebooks - * diff --git a/docs/source/devguides/supporting/utilities/Driven-Applications.rst b/docs/source/devguides/supporting/utilities/Driven-Applications.rst deleted file mode 100644 index 946aec2d00..0000000000 --- a/docs/source/devguides/supporting/utilities/Driven-Applications.rst +++ /dev/null @@ -1,39 +0,0 @@ -.. _Driven-Applications: - -Driven Agents -============= - -Configuration for running OpenEIS applications within VOLTTRON. ---------------------------------------------------------------- - -The configuration of an agent within VOLTTRON requires a small -modification to the imports of the OpenEIS application and a couple of -configuration parameters. - -Import and Extend -~~~~~~~~~~~~~~~~~ - -:: - - from volttron.platform.agent import (AbstractDrivenAgent, Results) - ... - class OpeneisApp(AbstractDrivenAgent): - -Configuration -~~~~~~~~~~~~~ - -The two parameters that are necessary in the json configuration file are -"application" and "device". An optional but recommended argument should -also be added "agentid". - -:: - - { - "agentid": "drivenlogger1", - "application": "drivenlogger.logdevice.LogDevice", - "device": "pnnl/isb1/oat", - ... - } - -Any other keys will be passed on to the openeis application when it is -run. diff --git a/docs/source/devguides/supporting/utilities/ProcessAgent.rst b/docs/source/devguides/supporting/utilities/ProcessAgent.rst deleted file mode 100644 index 21ac5da147..0000000000 --- a/docs/source/devguides/supporting/utilities/ProcessAgent.rst +++ /dev/null @@ -1,15 +0,0 @@ -.. _ProcessAgent: - -Process Agent -============= - -This agent can be used to launch non-Python agents in the VOLTTRON -platform. The agent handles keeping track of the process so that it can -be started and stopped with platform commands. Edit the configuration -file to specify how to launch your process. - -This agent was originally created for launching sMAP along with the -platform, but can be used for any process. - -Note: Currently this agent does not respond to a blanket "shutdown" -request and must be stopped with the "stop" command. diff --git a/docs/source/devguides/supporting/utilities/Scripts.rst b/docs/source/devguides/supporting/utilities/Scripts.rst deleted file mode 100644 index 8975ef4a72..0000000000 --- a/docs/source/devguides/supporting/utilities/Scripts.rst +++ /dev/null @@ -1,39 +0,0 @@ -.. _Scripts: - -Scripts -======= - -In order to make repetitive tasks less repetitive the VOLTTRON team has -create several scripts in order to help. These tasks are available in -the scripts directory. Before using these scripts you should become -familiar with the :ref:`Agent Development ` process. - -In addition to the scripts directory, the VOLTTRON team has added the -config directory to the .gitignore file. By convention this is where we -store customized scripts and configuration that will not be made public. -Please feel free to use this convention in your own processes. - -The scripts/core directory is laid out in such a way that we can build -scripts on top of a base core. For example the scripts in sub-folders -such as the historian-scripts and demo-comms use the scripts that are -present in the core directory. - -The most widely used script is scripts/install-agent.py. The -install_agent.py script will remove an agent if the tag is already -present, create a new agent package, and install the agent to -VOLTTRON\_HOME. This script has three required arguments and has the -following signature: - -:: - - # Agent to Package must have a setup.py in the root of the directory. - scripts/install_agent.py - -The install_agent.py script will respect the VOLTTRON\_HOME specified on -the command line or set in the global environment. An example of setting -VOLTTRON\_HOME is as follows. - -:: - - # Sets VOLTTRON_HOME to /tmp/v1home - VOLTTRON_HOME=/tmp/v1home scripts/core/pack_install.sh diff --git a/docs/source/devguides/supporting/utilities/index.rst b/docs/source/devguides/supporting/utilities/index.rst deleted file mode 100644 index dde196e865..0000000000 --- a/docs/source/devguides/supporting/utilities/index.rst +++ /dev/null @@ -1,9 +0,0 @@ -========= -Utilities -========= - -.. toctree:: - :glob: - :maxdepth: 2 - - * diff --git a/docs/source/devguides/walkthroughs/Deployment-Walkthrough.rst b/docs/source/devguides/walkthroughs/Deployment-Walkthrough.rst deleted file mode 100644 index e45852e4a5..0000000000 --- a/docs/source/devguides/walkthroughs/Deployment-Walkthrough.rst +++ /dev/null @@ -1,128 +0,0 @@ -.. _Deployment-Walkthrough: - -Deployment Walkthrough -~~~~~~~~~~~~~~~~~~~~~~ - -This page is meant as an overview of setting up a VOLTTRON deployment -which consists of one or more platforms collecting data and being -managed by another platform running the VOLTTRON Central agent. High -level instructions are included but for more details on each step, -please follow links to that section of the wiki. - -Assumptions: - -- “Data Collector” is the box that has the drivers and is collecting data it needs to forward. -- “Volttron Central/VC” is the box that has the historian which will save data to the database. -- VOLTTRON_HOME is assumed to the default on both boxes which is: /home//.volttron - -Notes/Tips: - -- Aside from installing the required packages with apt-get, ``sudo`` is - not required and should not be used. VOLTTRON is designed to be run - as a non-root user and running with sudo is not supported. -- The convenience :ref:`scripts ` have been developed to simplify - many of the repetitive multi-step processes. For instance, - ``scripts/core/make-listener`` can be modified for any agent and make - it one command to stop, remove, build, install, configure, tag, - start, and (optionally) enable an agent for autostart. -- These instructions assume default directories are used (for instance, - ``/home//volttron`` for the project directory and - ``/home//.volttron`` for the VOLTTRON Home directory. -- Creating a separate ``config`` directory for agent configuration - files used in the deployment can prevent them from being committed - back to the repository. -- Double check firewall rules/policies when setting up a multi-node - deployment to ensure that platforms can communicate - -On all machines: -================ - -On all machines in the deployment, setup the platform, setup encryption, -authentication, and authorization. Also, build the basic agents for the -deployment. All platforms will need a PlatformAgent and a Historian. -Using :ref:`scripts ` will help simplify this project. - -:ref:`Install required packages ` ----------------------------------------------------------- - -- ``sudo apt-get install build-essential python-dev openssl libssl-dev libevent-dev git`` - -:ref:`Build the project ` ----------------------------------------------- - -- Clone the repository and build using ``python bootstrap.py`` - -Configuring Platform ---------------------- - -On VC: -====== - -- Run :ref:`vcfg ` -- Setup as VOLTTRON Central. -- Set appropriate ip, port, etc for this machine -- Pick to install a platform historian (defaults to sqlite) -- Start up the platform and find the line with the server public key “cat volttron.log|grep “public key”: - 2016-05-19 08:42:58,062 () volttron.platform.main INFO: public key: - - -On the data collector: -====================== - -Setup :ref:`drivers ` ---------------------------------------- - -For a simple case, follow instructions to install a :ref:`Fake Driver`` -for testing purposes. For an actual deployment against real devices see the following: - -- Create a :ref:`Master Driver Agent ` to coordinate - drivers for the devices controlled by this platform. -- For :ref:`MODBUS ` devices, create config files and point - configuration files. -- For BACnet devices, create a :ref:`Proxy Agent ` for - :ref:`BACnet drivers ` to communicate through - - -Setup the Forwarder -------------------- - Now that data is being published to the bus, a :ref:`Forward Historian` can be - configured to send this data to the VC instance for storage. - -- Use: vctl keypair to generate a keypair -- cat VOLTTRON_HOME/keypair to get the public and secret keys -- Create a config directory in the main project directory -- Setup a :ref:`Forward Historian` - - - cp services/core/ForwardHistorian/config config/forwarder.config - - Edit forwarder.config using the VC’s VIP address, the public server key, and the keypair - - -"destination-vip": "tcp://:?serverkey=&secretkey=&publickey= - - - For ease of use, you can use the install-agent.py in the scripts directory to install the Forward Historian: - -:: - python scripts/install-agent.py -s services/core/ForwardHistorian -c config/forwarder.config - - - Execute that script and the forward historian should be installed - -To check that things are working: -Start a listener agent on VC, you should see data from the data collector appear - -In the log for VC, check for credentials success for the ip of data collector. - -Registering the collection platform -==================================== - -- In a browser, go to the url for your VC instance. -- Click on Register Platforms -- Enter a name for the collection platform and the ip configured http://: -- Open the tree upper left of the UI and find your platform. - -Troubleshooting: -================ - -- Check firewall rules - registering VC on VC: - ipc:\ //@/home/volttron/.volttron/run/vip.socket - Change password by putting pw hash in config file - Add remote ip address to config file diff --git a/docs/source/devguides/walkthroughs/Forward-Historian-Deployment.rst b/docs/source/devguides/walkthroughs/Forward-Historian-Deployment.rst deleted file mode 100644 index 4fa1bd92be..0000000000 --- a/docs/source/devguides/walkthroughs/Forward-Historian-Deployment.rst +++ /dev/null @@ -1,83 +0,0 @@ -.. _Forward-Historian-Deployment: - -Forward Historian Deployment -============================= - -This guide describes a simple setup where one Volttron instance collects -data from a fake devices and sends to another instance . Lets consider the -following example. - -We are going to create two VOLTTRON instances and send data from one VOLTTRON -instance running a fake driver(subscribing values from a fake device)and sending -the values to the second VOLTTRON instance. - -VOLTTRON instance 1 forwards data to VOLTTRON instance 2 --------------------------------------------------------- - -VOLTTRON instance 1 -~~~~~~~~~~~~~~~~~~~ - -- vctl shutdown –platform (if the platform is already working) -- vcfg (this helps in configuring the volttron instance - http://volttron.readthedocs.io/en/releases-4.1/core_services/control/VOLTTRON-Config.html - - - Specify the IP of the machine : tcp://130.20.*.*:22916" - - Specify the port you want to use - - Specify if you want to run VC(Volttron Central) here or this this instance would be controlled - by a VC and the IP and port of the VC - - - Then install agents like Master driver Agent with fake driver agent for the instance. - - Install a listener agent so see the topics that are coming from the diver agent - - Then run the volttron instance by : ./start-volttron -- Volttron authentication: We need to add the IP of the instance 2 in the auth.config file of the VOLTTRON agent. - This is done as follows: - - - vctl auth-add - - We specify the IP of the instance 2 and the credentials of the agent - (http://volttron.readthedocs.io/en/releases-4.1/devguides/walkthroughs/Agent-Authentication-Walkthrough.html?highlight=auth-add) - - For specifying authentication for all the agents , we specify /.*/ for credentials as shown in - http://volttron.readthedocs.io/en/releases-4.1/devguides/agent_development/index.html - - This should enable authentication for all the volttron-instance based on the IP you specify here - -For this documentation, the topics from the driver agent will be send to the instance 2 -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -- We use the existing agent called the Forward Historian for this purpose which is available in service/core in the VOLTTRON directory. -- In the config file under the ForwardHistorian directory , we modify the following field: - - - Destination-vip : the IP of the volttron instance to which we have to forward the data to along with the port number . - Example : "tcp://130.20.*.*:22916" - - Destination-serverkye: The server key of the VOLTTRON instance to which we need to forward the data to. - This can be obtained at the VOLTTRON instance by typing vctl auth serverkey - -- Service_topic_list: specify the topics you want to forward specifically instead of all the values. -- Once the above values are set, your forwarder is all set . -- You can create a script file for the same and execute the agent. - -VOLTTRON instance 2 -~~~~~~~~~~~~~~~~~~~ - -- Vctl shutdown –platform (if the platform is already working ) -- volttron-cfg (this helps in configuring the volttron instance ) - http://volttron.readthedocs.io/en/releases-4.1/core_services/control/VOLTTRON-Config.html - - - Specify the IP of the machine : tcp://130.20.*.*:22916 - - Specify the port you want to use. - - Install the listener agent (this will show the connection from instance 1 if its successful - and then show all the topics from instance 1. - -- Volttron authentication: We need to add the IP of the instance 1 in the auth.config file of the VOLTTRON agent .This is done as follows: - - - vctl auth-add - - We specify the IP of the instance 1 and the credentials of the agent - http://volttron.readthedocs.io/en/releases-4.1/devguides/walkthroughs/Agent-Authentication-Walkthrough.html?highlight=auth-add - - For specifying authentication for all the agents , we specify /.*/ for credentials as shown in - http://volttron.readthedocs.io/en/releases-4.1/devguides/agent_development/index.html - - This should enable authentication for all the volttron-instance based on the IP you specify here - -Listener Agent -~~~~~~~~~~~~~~ -- Run the listener agent on this instance to see the values being forwarded from instance 1. - -Once the above setup is done, you should be able to see the values from instance 1 on the listener agent of instance 2. - - diff --git a/docs/source/devguides/walkthroughs/Forward-Historian-Walkthrough.rst b/docs/source/devguides/walkthroughs/Forward-Historian-Walkthrough.rst deleted file mode 100644 index 4eb7534168..0000000000 --- a/docs/source/devguides/walkthroughs/Forward-Historian-Walkthrough.rst +++ /dev/null @@ -1,68 +0,0 @@ -.. _Forward-Historian-Walkthrough: - -Forward Historian Walkthrough -============================= - -This guide describes a simple setup where one VOLTTRON instance collects -data from a fake devices and sends to another instance . Lets consider the -following example. - -We are going to create two VOLTTRON instances and send data from one VOLTTRON -instance running a fake driver(subscribing values from a fake device)and sending -the values to the second VOLTTRON instance. - -VOLTTRON instance 1 forwards data to VOLTTRON instance 2 --------------------------------------------------------- - -VOLTTRON INSTANCE 1 -~~~~~~~~~~~~~~~~~~~ -- ``vctl shutdown --platform`` (If VOLTTRON is already running it must be shut down before running ``volttron-cfg``). -- ``vcfg`` - this helps in configuring the VOLTTRON instance(:ref:`VOLTTRON Config `). - - - Specify the IP of the machine : ``tcp://127.0.0.1:22916``. - - Specify the port you want to use. - - Specify if you want to run VC ( VOLTTRON Central) here or this this instance would be controlled by a VC and the IP and port of the VC. -- Then start the VOLTTRON instance by : ``volttron -vv & > volttron.log&``. -- Then install agents like Master driver Agent with fake driver agent for the instance. -- Install a listener agent so see the topics that are coming from the diver agent. -- VOLTTRON authentication : We need to add the IP of the instance 1 in the auth.config file of the VOLTTRON agent .This is done as follow : - - - ``vctl auth-add`` - - We specify the IP of the instance 1 and the credentials of the agent.(:ref:`Agent authentication walkthrough `) - - For specifying authentication for all the agents , we specify ``/.*/`` for credentials as shown in :ref:`Agent Development`. - - This should enable authentication for all the VOLTTRON instances based on the IP you specify here . - -For this documentation, the topics from the driver agent will be sent to the instance 2 -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -- We use the existing agent called the Forward Historian for this purpose which is available in ``service/core`` in the VOLTTRON directory. -- In the config file under the ForwardHistorian directory , we modify the following field: - - - destination-vip : the IP of the VOLTTRON instance to which we have to forward the data to along with the port number . Example : ``tcp://130.20.*.*:22916``. - - destination-serverkey: The server key of the VOLTTRON instance to which we need to forward the data to. This can be obtained at the VOLTTRON instance by typing ``volttron-ctl auth serverkey``. - - service_topic_list: specify the topics you want to forward specifically instead of all the values. -- Once the above values are set, your forwarder is all set . -- You can create a script file for the same and execute the agent. - -VOLTTRON INSTANCE 2 -~~~~~~~~~~~~~~~~~~~ - -- ``vctl shutdown --platform`` (If VOLTTRON is already running it must be shut down before running ``volttron-cfg``). -- ``vcfg`` - this helps in configuring the VOLTTRON instance.(:ref:`VOLTTRON Config `) - - Specify the IP of the machine : ``tcp://127.0.0.1:22916``. - - Specify the port you want to use. - - Install the listener agent (this will show the connection from instance 1 if its successful and then show all the topics from instance 1. -- Then start the VOLTTRON instance by : ``volttron -vv & > volttron.log&``. -- VOLTTRON authentication : We need to add the IP of the instance 1 in the auth.config file of the VOLTTRON agent .This is done as follow : - - - ``vctl auth-add`` - - We specify the IP of the instance 1 and the credentials of the agent.(:ref:`Agent authentication walkthrough `) - - For specifying authentication for all the agents , we specify ``/.*/`` for credentials as shown in :ref:`Agent Development`. - - This should enable authentication for all the VOLTTRON instances based on the IP you specify here . - -LISTENER AGENT - -- Run the listener agent on this instance to see the values being forwarded from instance 1. - -Once the above setup is done, you should be able to see the values from instance 1 on the listener agent of instance 2. - - diff --git a/docs/source/devguides/walkthroughs/Simple-WebAgent-Walkthrough.rst b/docs/source/devguides/walkthroughs/Simple-WebAgent-Walkthrough.rst deleted file mode 100644 index eca93f8a0f..0000000000 --- a/docs/source/devguides/walkthroughs/Simple-WebAgent-Walkthrough.rst +++ /dev/null @@ -1,65 +0,0 @@ -.. _Simple-WebAgent-Walkthrough: - -Simple Web Agent Walkthrough -============================ - -A simple web enabled agent that will hook up with a volttron message bus and -allow interaction between it via http. This example agent shows a simple file -serving agent, a json-rpc based call, and a websocket based connection -mechanism. - -Starting VOLTTRON Platform --------------------------- - -.. note:: Activate the environment first :ref:`active the environment ` - -In order to start the simple web agent, we need to bind the VOLTTRON instance -to the a web server. We need to specify the address and the port for the -web server. For example, if we want to bind the localhost:8080 as the web server -we start the VOLTTRON platform as follows: - -``./start-volttron --bind-web-address http://127.0.0.1:8080`` - -Once the platform is started, we are ready to run the Simple Web Agent. - -Running Simple Web Agent ------------------------- - -.. note:: The following assumes the shell is located at the :ref:`VOLTTRON_ROOT`. - -Copy the following into your shell (save it to a file for executing it again -later). - -.. code-block:: console - - python scripts/install-agent.py \ - --agent-source examples/SimpleWebAgent \ - --tag simpleWebAgent \ - --vip-identity webagent \ - --force \ - --start - -This will create a web server on http://localhost:8080. The index.html file -under ``simpleweb/webroot/simpleweb/`` can be any html page which binds to the -VOLTTRON message bus .This provides a simple example of providing a web endpoint -in VOLTTRON. - -Path based registration examples --------------------------------- - -- Files will need to be in webroot/simpleweb in order for them to be browsed - from http://localhost:8080/simpleweb/index.html - -- Filename is required as we don't currently autoredirect to any default pages - as shown in ``self.vip.web.register_path("/simpleweb", os.path.join(WEBROOT))`` - -The following two examples show the way to call either a jsonrpc (default) -endpoint and one that returns a different content-type. With the JSON-RPC -example from volttron central we only allow post requests, however this is not -required. - -- Endpoint will be available at http://localhost:8080/simple/text ``self.vip.web.register_endpoint("/simple/text", self.text)`` - -- Endpoint will be available at http://localhost:8080/simple/jsonrpc ``self.vip.web.register_endpoint("/simpleweb/jsonrpc", self.rpcendpoint)`` -- Text/html content type specified so the browser can act appropriately like ``[("Content-Type", "text/html")]`` -- The default response is application/json so our endpoint returns appropriately with a json based response. diff --git a/docs/source/devguides/walkthroughs/SingleMachine-Walkthrough.rst b/docs/source/devguides/walkthroughs/SingleMachine-Walkthrough.rst deleted file mode 100644 index 940a671e63..0000000000 --- a/docs/source/devguides/walkthroughs/SingleMachine-Walkthrough.rst +++ /dev/null @@ -1,217 +0,0 @@ -.. _SingleMachine-Walkthrough: - -Single Machine Deployment -========================= - -The purpose of this demonstration is to show the process of setting up a simple VOLTTRON instance for use on a single machine. - -Install and Build VOLTTRON --------------------------- - -First, :ref:`install ` and :ref:`build ` VOLTTRON: - -For a quick reference: - -.. code-block:: console - - sudo apt-get update - sudo apt-get install build-essential python-dev openssl libssl-dev libevent-dev git - git clone https://github.com/VOLTTRON/volttron/ - cd volttron - python3 bootstrap.py - - - -Activating the VOLTTRON Environment ------------------------------------- - -After the build is complete, activate the VOLTTRON environment. - -.. code-block:: console - - source env/bin/activate - - -Configuring VOLTTRON --------------------- - -The ``vcfg`` command allows for an easy configuration of the VOLTTRON environment. - -.. note:: - - To create a simple instance of VOLTTRON, leave the default response, or select yes (y) if - prompted for a yes or no response [Y/N]. You must choose a username and password for the - VOLTTRON Central admin account. - -A set of example responses are included here (username is user, localhost is volttron-pc): - -.. code-block:: console - - (volttron) user@volttron-pc:~/volttron$ vcfg - - Your VOLTTRON_HOME currently set to: /home/user/.volttron - - Is this the volttron you are attempting to setup? [Y]: - What type of message bus (rmq/zmq)? [zmq]: - What is the vip address? [tcp://127.0.0.1]: - What is the port for the vip address? [22916]: - Is this instance web enabled? [N]: y - What is the protocol for this instance? [https]: - Web address set to: https://volttron-pc - What is the port for this instance? [8443]: - Would you like to generate a new web certificate? [Y]: - WARNING! CA certificate does not exist. - Create new root CA? [Y]: - - Please enter the following details for web server certificate: - Country: [US]: - State: WA - Location: Richland - Organization: PNNL - Organization Unit: VOLTTRON - Created CA cert - Creating new web server certificate. - Is this an instance of volttron central? [N]: y - Configuring /home/user/volttron/services/core/VolttronCentral. - Installing volttron central. - ['volttron', '-vv', '-l', '/home/user/.volttron/volttron.cfg.log'] - Should the agent autostart? [N]: y - VC admin and password are set up using the admin web interface. - After starting VOLTTRON, please go to https://volttron-pc:8443/admin/login.html to complete the setup. - Will this instance be controlled by volttron central? [Y]: - Configuring /home/user/volttron/services/core/VolttronCentralPlatform. - What is the name of this instance? [volttron1]: - Volttron central address set to https://volttron-pc:8443 - ['volttron', '-vv', '-l', '/home/user/.volttron/volttron.cfg.log'] - Should the agent autostart? [N]: y - Would you like to install a platform historian? [N]: y - Configuring /home/user/volttron/services/core/SQLHistorian. - ['volttron', '-vv', '-l', '/home/user/.volttron/volttron.cfg.log'] - Should the agent autostart? [N]: y - Would you like to install a master driver? [N]: y - Configuring /home/user/volttron/services/core/MasterDriverAgent. - ['volttron', '-vv', '-l', '/home/user/.volttron/volttron.cfg.log'] - Would you like to install a fake device on the master driver? [N]: y - Should the agent autostart? [N]: y - Would you like to install a listener agent? [N]: y - Configuring examples/ListenerAgent. - ['volttron', '-vv', '-l', '/home/user/.volttron/volttron.cfg.log'] - Should the agent autostart? [N]: y - Finished configuration! - - You can now start the volttron instance. - - If you need to change the instance configuration you can edit - the config file is at /home/user/.volttron/config - - - -Once this is finished, run VOLTTRON and test the new configuration. - - -Testing VOLTTRON ----------------- - -To test that the configuration was successful, start an instance of VOLTTRON in the background: - -.. code-block:: console - - ./start-volttron - -.. note:: - - This command must be run from the root VOLTTRON directory. - - -Command Line -~~~~~~~~~~~~ - -If the example ``vcfg`` responses were used, the listener, master_driver, platform_historian, vcp, and vc agents -should have all started automatically. This can be checked using ``vctl status``. - -The output should look similar to this: - -.. code-block:: console - - (volttron)user@volttron-pc:~/volttron$ vctl status - AGENT IDENTITY TAG STATUS HEALTH - 8 listeneragent-3.2 listeneragent-3.2_1 listener running [2810] GOOD - 0 master_driveragent-3.2 platform.driver master_driver running [2813] GOOD - 3 sqlhistorianagent-3.7.0 platform.historian platform_historian running [2811] GOOD - 2 vcplatformagent-4.8 platform.agent vcp running [2812] GOOD - 9 volttroncentralagent-5.0 volttron.central vc running [2808] GOOD - -You can further verify that the agents are functioning correctly with ``tail -f volttron.log`` - - -VOLTTRON Admin -~~~~~~~~~~~~~~ - -The admin page allows the user to manage RMQ and ZMQ certificates and credentials. - -Open a web browser and navigate to https://volttron-pc:8443/admin/login.html - -There may be a message warning about a potential security risk. Check to see if the certificate -that was created in vcfg is being used. The process below is for firefox. - -|vc-cert-warning-1| - -.. |vc-cert-warning-1| image:: files/vc-cert-warning-1.png - -|vc-cert-warning-2| - -.. |vc-cert-warning-2| image:: files/vc-cert-warning-2.png - -|vc-cert-warning-3| - -.. |vc-cert-warning-3| image:: files/vc-cert-warning-3.png - -|vc-cert-warning-4| - -.. |vc-cert-warning-4| image:: files/vc-cert-warning-4.png - -When the admin page is accessed for the first time, the user will be prompted to set up a master -username and password. - -|admin-page-login| - -.. |admin-page-login| image:: files/volttron-admin-page.png - - -This will allow the user to log into both the admin page and VOLTTRON Central. - - -VOLTTRON Central -~~~~~~~~~~~~~~~~ - -Navigate to https://volttron-pc:8443/vc/index.html - -Log in using the username and password you set up on the admin web page. - -|vc-login| - -.. |vc-login| image:: files/vc-login.png - - -Once you have logged in, click on the Platforms tab in the upper right corner of the window. - -|vc-dashboard| - -.. |vc-dashboard| image:: files/vc-dashboard.png - -Once in the Platforms screen, click on the name of the platform. - -|vc-platform| - -.. |vc-platform| image:: files/vc-platform.png - -You will now see a list of agents. They should all be running. - -|vc-agents| - -.. |vc-agents| image:: files/vc-agents.png - -For more information on VOLTTRON Central, please see: - -* :ref:`VOLTTRON Central Management ` -* :ref:`VOLTTRON Central Demo ` diff --git a/docs/source/devguides/walkthroughs/VC-Device-Configuration-Demo.rst b/docs/source/devguides/walkthroughs/VC-Device-Configuration-Demo.rst deleted file mode 100644 index 5ddd86e0fa..0000000000 --- a/docs/source/devguides/walkthroughs/VC-Device-Configuration-Demo.rst +++ /dev/null @@ -1,332 +0,0 @@ -.. _Device-Conifiguration-in-VOLTTRON-Central: - -======================================== -Device Configuration in VOLTTRON Central -======================================== - -Devices in your network can be detected and configured through the VOLTTRON Central UI. The current version of VOLTTRON enables device detection and configuration for BACnet devices. The following sections describe the processes involved with performing scans to detect physical devices and get their points, and configuring them as virtual devices installed on VOLTTRON instances. - -- `Launching Device Configuration <#launching-device-configuration>`__ -- `Scanning for Devices <#scanning-for-devices>`__ -- `Scanning for Points <#scanning-for-points>`__ -- `Registry Configuration File <#registry-configuration-file>`__ -- `Additional Attributes <#additional-attributes>`__ -- `Quick Edit Features <#quick-edit-features>`__ -- `Keyboard Commands <#keyboard-commands>`__ -- `Registry Preview <#registry-preview>`__ -- `Registry Configuration Options <#registry-configuration-options>`__ -- `Reloading Device Points <#reloading-device-points>`__ -- `Device Configuration Form <#device-configuration-form>`__ -- `Configuring Subdevices <#configuring-subdevices>`__ -- `Reconfiguring Devices <#reconfiguring-devices>`__ -- `Exporting Registry Configuration Files <#exporting-registry-configuration-files>`__ - -Launching Device Configuration ------------------------------- - -To begin device configuration in VOLTTRON Central, extend the side panel on the left and find the cogs button next to the platform instance you want to add a device to. Click the cogs button to launch the device configuration feature. - -|Add Devices| - -|Install Devices| - -Currently the only method of adding devices is to conduct a scan to detect BACnet devices. A BACnet Proxy Agent must be running in order to do the scan. If more than one BACnet Proxy is installed on the platform, choose the one that will be used for the scan. - -The scan can be conducted using default settings that will search for all physical devices on the network. However, optional settings can be used to focus on specific devices or change the duration of the scan. Entering a range of device IDs will limit the scan to return only devices with IDs in that range. Advanced options include the ability to specify the IP address of a device to detect as well as the ability to change the duration of the scan from the default of five seconds. - -Scanning for Devices --------------------- - -To start the scan, click the large cog button to the right of the scan settings. - -|Start Scan| - -Devices that are detected will appear in the space below the scan settings. Scanning can be repeated at any time by clicking the large cog button again. - -|Devices Found| - -Scanning for Points -------------------- - -Another scan can be performed on each physical device to retrieve its available points. This scan is initiated by clicking the triangle next to the device in the list. The first time the arrow is clicked, it initiates the scan. After the points are retrieved, the arrow becomes a hide-and-show toggle button and won't reinitiate scanning the device. - -|Get Device Points| - -After the points have been retrieved once, the only way to scan the same device -for points again is to relaunch the device configuration process from the start -by clicking on the small cogs button next to the platform instance in the panel tree. - -Registry Configuration File -~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -The registry configuration determines which points on the physical device will be associated with the virtual device that uses that particular registry configuration. The registry configuration determines which points' data will be published to the message bus and recorded by the historian, and it determines how the data will be presented. - -When all the points on the device have been retrieved, the points are loaded into the registry configuration editor. There, the points can be modified and selected to go into the registry configuration file for a device. - -Each row in the registry configuration editor represents a point, and each cell in the row represents an attribute of the point. - -Only points that have been selected will be included in the registry configuration file. To select a point, check the box next to the point in the editor. - -|Select Point Before| - -|Select Point During| - -|Select Point After| - -Type directly in a cell to change an attribute value for a point. - -|Edit Points| - -Additional Attributes ---------------------- - -The editor's default view shows the attributes that are most likely to be changed during configuration: the VOLTTRON point name, the writable setting, and the units. Other attributes are present but not shown in the default view. To see the entire set of attributes for a point, click the Edit Point button (the three dots) at the end of the point row. - -|Edit Point Button| - -In the window that opens, point attributes can be changed by typing in the fields and clicking the Apply button. - -|Edit Point Dialog| - -Checking or unchecking the "Show in Table" box for an attribute will add or remove it as a column in the registry configuration editor. - -Quick Edit Features -------------------- - -Several quick-edit features are available in the registry configuration editor. - -The list of points can be filtered based on values in the first column by clicking the filter button in the first column's header and entering a filter term. - -|Filter Points Button| - -|Filter Set| - -The filter feature allows points to be edited, selected, or deselected more quickly by narrowing down potentially large lists of points. However, the filter doesn't select points, and if the registry configuration is saved while a filter is applied, any selected points not included in the filter will still be included in the registry file. - -To clear the filter, click on the Clear Filter button in the filter popup. - -|Clear Filter| - -To add a new point to the points listed in the registry configuration editor, click on the Add Point button in the header of the first column. - -|Add New Point| - -|Add Point Dialog| - -Provide attribute values, and click the Apply button to add the new point, which will be appended to the bottom of the list. - -To remove points from the list, select the points and click the Remove Points button in the header of the first column. - -|Remove Points| - -|Confirm Remove Points| - -Each column has an Edit Column button in its header. - -|Edit Columns| - -Click on the button to display a popup menu of operations to perform on the column. The options include inserting a blank new column, duplicating an existing column, removing a column, or searching for a value within a column. - -|Edit Column Menu| - -A duplicate or new column has to be given a unique name. - -|Name Column| - -|Duplicated Column| - -To search for values in a column, choose the Find and Replace option in the popup menu. - -|Find in Column| - -Type the term to search for, and click the Find Next button to highlight all the matched fields in the column. - -|Find Next| - -Click the Find Next button again to advance the focus down the list of matched terms. - -To quickly replace the matched term in the cell with focus, type a replacement term, and click on the Replace button. - -|Replace in Column| - -To replace all the matched terms in the column, click on the Replace All button. Click the Clear Search button to end the search. - -Keyboard Commands ------------------ - -Some keyboard commands are available to expedite the selection or de-selection of points. To initiate use of the keyboard commands, strike the Control key on the keyboard. For keyboard commands to be activated, the registry configuration editor has to have focus, which comes from interacting with it. But the commands won't be activated if the cursor is in a typable field. - -If the keyboard commands have been successfully activated, a faint highlight will appear over the first row in the registry configuration editor. - -|Start Keyboard Commands| - -Keyboard commands are deactivated when the mouse cursor moves over the configuration editor. If unintentional deactivation occurs, strike the Control key again to reactivate the commands. - -With keyboard commands activated, the highlighted row can be advanced up or down by striking the up or down arrow on the keyboard. A group of rows can be highlighted by striking the up or down arrow while holding down the Shift key. - -|Keyboard Highlight| - -To select the highlighted rows, strike the Enter key. - -|Keyboard Select| - -Striking the Enter key with rows highlighted will also deselect any rows that were already selected. - -Click on the Keyboard Shortcuts button to show a popup list of the available keyboard commands. - -|Keyboard Shortcuts Button| - -|Keyboard Shortcuts| - -Registry Preview ----------------- - -To save the registry configuration, click the Save button at the bottom of the registry configuration editor. - -|Save Registry Button| - -A preview will appear to let you confirm that the configuration is what you intended. - -|Registry Preview Table| - -The configuration also can be inspected in the comma-separated format of the actual registry configuration file. - -|Registry Preview CSV| - -Provide a name for the registry configuration file, and click the Save button to save the file. - -|Name Registry File| - -|Registry Saved| - -Registry Configuration Options ------------------------------- - -Different subsets of configured points can be saved from the same physical device and used to create separate registry files for multiple virtual devices and subdevices. Likewise, a single registry file can be reused by multiple virtual devices and subdevices. - -To reuse a previously saved registry file, click on the Select Registry File (CSV) button at the end of the physical device's listing. - -|Select Saved Registry File| - -The Previously Configured Registry Files window will appear, and a file can be selected to load it into the registry configuration editor. - -|Saved Registry Selector| - -Another option is to import a registry configuration file from the computer running the VOLTTRON Central web application, if one has been saved to local storage connected to the computer. To import a registry configuration file from local storage, click on the Import Registry File (CSV) button at the end of the physical device's listing, and use the file selector window to locate and load the file. - -|File Import Button| - -Reloading Device Points ------------------------ - -Once a physical device has been scanned, the original points from the scan can be reloaded at any point during device configuration by clicking on the Reload Points From Device button at the end of the device's listing. - -|Reload Points| - -Device Configuration Form -~~~~~~~~~~~~~~~~~~~~~~~~~ - -After the registry configuration file has been saved, the device configuration form appears. Creating the device configuration results in the virtual device being installed in the platform and determines the device's position in the side panel tree. It also contains some settings that determine how data is collected from the device. - -|Configure Device Dialog| - -After the device configuration settings have been entered, click the Save button to save the configuration and add the device to the platform. - -|Save Device Config| - -|Device Added| - -Configuring Subdevices ----------------------- - -After a device has been configured, subdevices can be configured by pointing to their position in the Path attribute of the device configuration form. But a subdevice can't be configured until its parent device has been configured first. - -|Subdevice Path| - -|Subdevice 2| - -As devices are configured, they're inserted into position in the side panel tree, along with their configured points. - -|Device Added to Tree| - - -Reconfiguring Devices -~~~~~~~~~~~~~~~~~~~~~ - -A device that's been added to a VOLTTRON instance can be reconfigured by changing its registry configuration or its device configuration. To launch reconfiguration, click on the wrench button next to the device in the side panel tree. - -|Reconfigure Device Button| - -Reconfiguration reloads the registry configuration editor and the device configuration form for the virtual device. The editor and the form work the same way in reconfiguration as during initial device configuration. - -|Reconfiguring Device| - -The reconfiguration view shows the name, address, and ID of the physical device that the virtual device was configured from. It also shows the name of the registry configuration file associated with the virtual device as well as its configured path. - -A different registry configuration file can be associated with the device by clicking on the Select Registry File (CSV) button or the Import Registry File (CSV) button. - -The registry configuration can be edited by making changes to the configuration in the editor and clicking the Save button. - -To make changes to the device configuration form, click on the File to Edit selector and choose Device Config. - -|Reconfigure Option Selector| - -|Reconfigure Device Config| - -Exporting Registry Configuration Files --------------------------------------- - -The registry configuration file associated with a virtual device can be exported from the web browser to the computer's local storage by clicking on the File Export Button in the device reconfiguration view. - -|File Export Button| - -.. |Add Devices| image:: files/01-add-devices.png -.. |Install Devices| image:: files/02-install-devices.png -.. |Start Scan| image:: files/03-start-scan.png -.. |Devices Found| image:: files/04-devices-found.png -.. |Get Device Points| image:: files/05-get-device-points.png -.. |Select Point Before| image:: files/07-select-point-a.png -.. |Select Point During| image:: files/07-select-point-b.png -.. |Select Point After| image:: files/07-select-point-c.png -.. |Edit Points| image:: files/07-edit-points.png -.. |Edit Point Button| image:: files/21-edit-point-button.png -.. |Edit Point Dialog| image:: files/22-edit-point-dialog.png -.. |Filter Points Button| image:: files/08-filter-points-button.png -.. |Filter Set| image:: files/09-filter-set.png -.. |Clear Filter| image:: files/10-clear-filter.png -.. |Add New Point| image:: files/11-add-new-point.png -.. |Add Point Dialog| image:: files/12-add-point-dialog.png -.. |Remove Points| image:: files/13-remove-points-button.png -.. |Confirm Remove Points| image:: files/14-confirm-remove-points.png -.. |Edit Columns| image:: files/15-edit-column-button.png -.. |Edit Column Menu| image:: files/16-edit-column-menu.png -.. |Name Column| image:: files/17-name-column.png -.. |Duplicated Column| image:: files/18-duplicated-column.png -.. |Find in Column| image:: files/19-find-in-column.png -.. |Find Next| image:: files/19-find-in-column-b.png -.. |Replace in Column| image:: files/20-replace-in-column.png -.. |Start Keyboard Commands| image:: files/23-start-keyboard-commands.png -.. |Keyboard Highlight| image:: files/24-keyboard-highlight.png -.. |Keyboard Select| image:: files/25-keyboard-select.png -.. |Keyboard Shortcuts Button| image:: files/26-keyboard-shortcuts-button.png -.. |Keyboard Shortcuts| image:: files/27-keyboard-shortcuts.png -.. |Save Registry Button| image:: files/28-save-registry-button.png -.. |Registry Preview Table| image:: files/29-registry-preview-table.png -.. |Registry Preview CSV| image:: files/30-preview-registry-csv.png -.. |Name Registry File| image:: files/31-name-registry-file.png -.. |Registry Saved| image:: files/32-registry-saved.png -.. |Select Saved Registry File| image:: files/38-select-saved-registry-file.png -.. |Saved Registry Selector| image:: files/39-saved-registry-selector.png -.. |File Import Button| image:: files/40-file-import-button.png -.. |Reload Points| image:: files/41-reload-points-from-device.png -.. |Configure Device Dialog| image:: files/33-configure-device-dialog.png -.. |Save Device Config| image:: files/34-save-device-config.png -.. |Device Added| image:: files/37-device-added.png -.. |Subdevice Path| image:: files/35-subdevice-path.png -.. |Subdevice 2| image:: files/36-subdevice2.png -.. |Device Added to Tree| image:: files/37-device-added-b.png -.. |Reconfigure Device Button| image:: files/43-reconfigure-device-button.png -.. |Reconfiguring Device| image:: files/44-reconfiguring-device.png -.. |Reconfigure Option Selector| image:: files/45-reconfigure-option-selector.png -.. |Reconfigure Device Config| image:: files/46-reconfigure-device-config.png -.. |File Export Button| image:: files/47-file-export-button.png diff --git a/docs/source/devguides/walkthroughs/files/platform-config.png b/docs/source/devguides/walkthroughs/files/platform-config.png deleted file mode 100644 index 3dac2abeff..0000000000 Binary files a/docs/source/devguides/walkthroughs/files/platform-config.png and /dev/null differ diff --git a/docs/source/devguides/walkthroughs/files/register-new-platform.png b/docs/source/devguides/walkthroughs/files/register-new-platform.png deleted file mode 100644 index 6c4d27d1a0..0000000000 Binary files a/docs/source/devguides/walkthroughs/files/register-new-platform.png and /dev/null differ diff --git a/docs/source/devguides/walkthroughs/files/vc-config.png b/docs/source/devguides/walkthroughs/files/vc-config.png deleted file mode 100644 index 5c1793810b..0000000000 Binary files a/docs/source/devguides/walkthroughs/files/vc-config.png and /dev/null differ diff --git a/docs/source/devguides/walkthroughs/index.rst b/docs/source/devguides/walkthroughs/index.rst deleted file mode 100644 index d7e26d6612..0000000000 --- a/docs/source/devguides/walkthroughs/index.rst +++ /dev/null @@ -1,9 +0,0 @@ -============ -Walkthroughs -============ - -.. toctree:: - :glob: - :maxdepth: 1 - - * diff --git a/docs/source/driver-framework/actuator/actuator-agent.rst b/docs/source/driver-framework/actuator/actuator-agent.rst new file mode 100644 index 0000000000..452d86fed7 --- /dev/null +++ b/docs/source/driver-framework/actuator/actuator-agent.rst @@ -0,0 +1,457 @@ +.. _Actuator-Agent: + +============== +Actuator Agent +============== + +This agent is used to manage write access to devices. Agents may request scheduled times, called Tasks, to interact with +one or more devices. + + +.. _Actuator-Communication: + +Actuator Agent Communication +============================ + + +Scheduling a Task +----------------- + +An agent can request a task schedule by publishing to the `devices/actuators/schedule/request` topic with the following +header: + +.. code-block:: python + + { + 'type': 'NEW_SCHEDULE', + 'requesterID': + 'taskID': , #The desired task ID for this task. It must be unique among all other scheduled tasks. + 'priority': , #The desired task priority, must be 'HIGH', 'LOW', or 'LOW_PREEMPT' + } + +with the following message: + +.. code-block:: python + + [ + ["campus/building/device1", #First time slot. + "2013-12-06 16:00:00", #Start of time slot. + "2013-12-06 16:20:00"], #End of time slot. + ["campus/building/device1", #Second time slot. + "2013-12-06 18:00:00", #Start of time slot. + "2013-12-06 18:20:00"], #End of time slot. + ["campus/building/device2", #Third time slot. + "2013-12-06 16:00:00", #Start of time slot. + "2013-12-06 16:20:00"], #End of time slot. + #etc... + ] + +.. warning:: + + If time zones are not included in schedule requests then the Actuator will interpret them as being in local time. + This may cause remote interaction with the actuator to malfunction. + + +Points on Task Scheduling +^^^^^^^^^^^^^^^^^^^^^^^^^ + +- Everything in the header is required +- Task id and requester id (agentid) should be a non empty value of type string +- A Task schedule must have at least one time slot. +- The start and end times are parsed with `dateutil's date/time + parser `__. + **The default string representation of a python datetime object will parse without issue.** +- Two Tasks are considered conflicted if at least one time slot on a device from one task overlaps the time slot of the + other on the same device. +- The end time of one time slot can be the same as the start time of another time slot for the same device. This will + not be considered a conflict. For example, ``time_slot1(device0, time1, **time2**)`` and + ``time_slot2(device0, **time2**, time3)`` are not considered a conflict +- A request must not conflict with itself +- If something goes wrong see :ref:`this failure string list ` for an + explanation of the error. + + +Task Priorities +^^^^^^^^^^^^^^^ + +* `HIGH`: This Task cannot be preempted under any circumstance. This task may preempt other conflicting preemptable + Tasks. + +* `LOW`: This Task cannot be preempted **once it has started**. A Task is considered started once the earliest time slot + on any device has been reached. This Task may **not** preempt other Tasks. + +* `LOW_PREEMPT`: This Task may be preempted at any time. If the Task is preempted once it has begun running any + current time slots will be given a grace period (configurable in the ActuatorAgent configuration file, defaults to 60 + seconds) before being revoked. This Task may **not** preempt other Tasks. + + +Canceling a Task +---------------- + +A task may be canceled by publishing to the `devices/actuators/schedule/request` topic with the following header: + +.. code-block:: python + + { + 'type': 'CANCEL_SCHEDULE', + 'requesterID': + 'taskID': , #The desired task ID for this task. It must be unique among all other scheduled tasks. + } + + +Points on Task Canceling +^^^^^^^^^^^^^^^^^^^^^^^^ + +- The requesterID and taskID must match the original values from the original request header. +- After a Tasks time has passed there is no need to cancel it. Doing so will result in a `TASK_ID_DOES_NOT_EXIST` + error. +- If something goes wrong see :ref:`this failure string list ` for an explanation + of the error. + + +Actuator Agent Schedule Response +-------------------------------- + +In response to a Task schedule request the ActuatorAgent will respond on the topic `devices/actuators/schedule/result` +with the header: + +.. code-block:: python + + { + 'type': <'NEW_SCHEDULE', 'CANCEL_SCHEDULE'> + 'requesterID': , + 'taskID': + } + +And the message (after parsing the json): + +.. code-block:: python + + { + 'result': <'SUCCESS', 'FAILURE', 'PREEMPTED'>, + 'info': , + 'data': + } + +The Actuator Agent may publish cancellation notices for preempted Tasks using the `PREEMPTED` result. + + +Preemption Data +^^^^^^^^^^^^^^^ + +Preemption data takes the form: + +.. code-block:: python + + { + 'agentID': , + 'taskID': + } + + +.. _Actuator-Failure-Reasons: + +Failure Reasons +^^^^^^^^^^^^^^^ + +In many cases the Actuator Agent will try to give good feedback as to why a request failed. + + +General Failures +"""""""""""""""" + +* `INVALID_REQUEST_TYPE`: Request type was not `NEW_SCHEDULE` or `CANCEL_SCHEDULE`. +* `MISSING_TASK_ID`: Failed to supply a taskID. +* `MISSING_AGENT_ID`: AgentID not supplied. + + +Task Schedule Failures +"""""""""""""""""""""" + +* `TASK_ID_ALREADY_EXISTS`: The supplied taskID already belongs to an existing task. +* `MISSING_PRIORITY`: Failed to supply a priority for a Task schedule request. +* `INVALID_PRIORITY`: Priority not one of `HIGH`, `LOW`, or `LOW_PREEMPT`. +* `MALFORMED_REQUEST_EMPTY`: Request list is missing or empty. +* `REQUEST_CONFLICTS_WITH_SELF`: Requested time slots on the same device overlap. +* `MALFORMED_REQUEST`: Reported when the request parser raises an unhandled exception. The exception name and info are + appended to this info string. +* `CONFLICTS_WITH_EXISTING_SCHEDULES`: This schedule conflict with an existing schedules that it cannot preempt. The + data item for the results will contain info about the conflicts in this form (after parsing json) + +.. code-block:: python + + { + '': + { + '': + [ + ["campus/building/device1", + "2013-12-06 16:00:00", + "2013-12-06 16:20:00"], + ["campus/building/device1", + "2013-12-06 18:00:00", + "2013-12-06 18:20:00"] + ] + '':[...] + } + '': {...} + } + + +Task Cancel Failures +"""""""""""""""""""" + +* `TASK_ID_DOES_NOT_EXIST`: Trying to cancel a Task which does not exist. This error can also occur when trying to + cancel a finished Task. +* `AGENT_ID_TASK_ID_MISMATCH`: A different agent ID is being used when trying to cancel a Task. + + +.. _Actuator-Value-Request: + +Actuator Agent Value Request +---------------------------- + +Once an Task has been scheduled and the time slot for one or more of the devices has started an agent may interact with +the device using the **get** and **set** topics. + +Both **get** and **set** are responded to the same way. See :ref:`Actuator Reply ` below. + +Getting values +^^^^^^^^^^^^^^ + +While a driver for a device should always be setup to periodically broadcast the state of a device you may want an +up-to-the-moment value for an actuation point on a device. + +To request a value publish a message to the following topic: + +.. code-block:: python + + 'devices/actuators/get//' + + +Setting Values +^^^^^^^^^^^^^^ + +Value are set in a similar manner: + +To set a value publish a message to the following topic: + +.. code-block:: python + + 'devices/actuators/set//' + +With this header: + +.. code-block:: python + + #python + { + 'requesterID': + } + +And the message contents being the new value of the actuator. + +.. warning:: + + The actuator agent expects all messages to be JSON and will parse them accordingly. Use `publish_json` to send + messages where possible. This is significant for Boolean values especially + +.. _Actuator-Reply: + +Actuator Reply +^^^^^^^^^^^^^^ + +The ActuatorAgent will reply to both `get` and `set` on the `value` topic for an actuator: + +.. code-block:: python + + 'devices/actuators/value//' + +With this header: + +.. code-block:: python + + { + 'requesterID': + } + +With the message containing the value encoded in JSON. + +Actuator Error Reply +^^^^^^^^^^^^^^^^^^^^ + +If something goes wrong the Actuator Agent will reply to both `get` and `set` on the `error` topic for an actuator: + +.. code-block:: python + + 'devices/actuators/error//' + +With this header: + +.. code-block:: python + + { + 'requesterID': + } + +The message will be in the following form: + +.. code-block:: python + + { + 'type': + 'value': + } + +Common Error Types +^^^^^^^^^^^^^^^^^^ + +* `LockError`: Returned when a request is made when we do not have permission to use a device. (Forgot to schedule, + preempted and we did not handle the preemption message correctly, ran out of time in time slot, etc...) +* `ValueError`: Message missing or could not be parsed as JSON + + +.. _Actuator-Schedule-State: + +Schedule State Broadcast +------------------------ + +Periodically the ActuatorAgent will publish the state of all currently scheduled devices. For each device the +ActuatorAgent will publish to an associated topic: + +.. code-block:: python + + 'devices/actuators/schedule/announce/' + +With the following header: + +.. code-block:: python + + { + 'requesterID': , + 'taskID': + 'window': + } + +The frequency of the updates is configurable with the `schedule_publish_interval` setting. + + +Task Preemption +--------------- + +Both `LOW` and `LOW_PREEMPT` priority Tasks can be preempted. `LOW` priority Tasks may be preempted by a conflicting +`HIGH` priority Task before it starts. `LOW_PREEMPT` priority Tasks can be preempted by `HIGH` priority Tasks even +after they start. + +When a Task is preempted the ActuatorAgent will publish to `devices/actuators/schedule/response` with the following +header: + +.. code-block:: python + + { + 'type': 'CANCEL_SCHEDULE', + 'requesterID': , + 'taskID': + } + +And the message (after parsing the json): + +.. code-block:: python + + { + 'result': 'PREEMPTED', + 'info': '', + 'data': + { + 'agentID': , + 'taskID': + } + } + + +Preemption Grace Time +^^^^^^^^^^^^^^^^^^^^^ + +If a `LOW_PREEMPT` priority Task is preempted while it is running the Task will be given a grace period to clean up +before ending. For every device which has a current time slot the window of remaining time will be reduced to the grace +time. At the end of the grace time the Task will finish. If the Task has no currently open time slots on any devices +it will end immediately. + + +.. _Actuator-Config: + +ActuatorAgent Configuration +--------------------------- + +* `schedule_publish_interval`: Interval between current schedules being published to the message bus for all devices +* `preempt_grace_time`: Minimum time given to Tasks which have been preempted to clean up in seconds. Defaults to 60 +* `schedule_state_file`: File used to save and restore Task states if the ActuatorAgent restarts for any reason. File + will be created if it does not exist when it is needed + +Sample configuration file +^^^^^^^^^^^^^^^^^^^^^^^^^ + +.. code-block:: json + + { + "schedule_publish_interval": 30, + "schedule_state_file": "actuator_state.pickle" + } + + +Heartbeat Signal +---------------- + +The ActuatorAgent can be configured to send a heartbeat message to the device to indicate the platform is running. +Ideally, if the heartbeat signal is not sent the device should take over and resume normal operation. + +The configuration has two parts, the interval (in seconds) for sending the heartbeat and the specific point that should +be modified each iteration. + +The heart beat interval is specified with a global `heartbeat_interval` setting. The ActuatorAgent will automatically +set the heartbeat point to alternating "1" and "0" values. Changes to the heartbeat point will be published like any +other value change on a device. + +The heartbeat points are specified in the driver configuration file of individual devices. + + +.. _Actuator-Notes: + +Notes on Working With the ActuatorAgent +--------------------------------------- + +- An agent can watch the window value from :ref:`device state updates ` to perform scheduled + actions within a timeslot + + - If an Agent's Task is `LOW_PREEMPT` priority it can watch for device state updates where the window is less than + or equal to the grace period (default 60.0) + +- When considering if to schedule long or multiple short time slots on a single device: + + - Do we need to ensure the device state for the duration between slots? + + - Yes: Schedule one long time slot instead + - No: Is it all part of the same Task or can we break it up in case there is a conflict with one of our time + slots? + +- When considering time slots on multiple devices for a single Task: + + - Is the Task really dependent on all devices or is it actually multiple Tasks? + +- When considering priority: + + - Does the Task have to happen **on an exact day**? + + - Yes: Use `HIGH` + - No: Consider `LOW` and reschedule if preempted + + - Is it problematic to prematurely stop a Task once started? + + - Yes: Consider `LOW` or `HIGH` + - No: Consider `LOW_PREEMPT` and watch the device state updates for a small window value + +- If an agent is only observing but needs to assure that no another Task is going on while taking readings it can + schedule the time to prevent other agents from messing with a devices state. The schedule updates can be used as a + reminder as to when to start watching +- **Any** device, existing or not, can be scheduled. This allows for agents to schedule fake devices to create + reminders to start working later rather then setting up their own internal timers and schedules diff --git a/docs/source/driver-framework/bacnet/bacnet-auto-configuration.rst b/docs/source/driver-framework/bacnet/bacnet-auto-configuration.rst new file mode 100644 index 0000000000..31cb43c2af --- /dev/null +++ b/docs/source/driver-framework/bacnet/bacnet-auto-configuration.rst @@ -0,0 +1,291 @@ +.. _BACnet-Auto-Configuration: + +========================= +BACnet Auto-Configuration +========================= + +Included with the platform are two scripts for finding and configuring BACnet devices. These scripts are located in +`scripts/bacnet`. `bacnet_scan.py` will scan the network for devices. `grab_bacnet_config.py` creates a CSV file +for the BACnet driver that can be used as a starting point for creating your own register configuration. + +Both scripts are configured with the file `BACpypes.ini`. + + +Configuring the Utilities +------------------------- + +While running both scripts create a temporary virtual BACnet device using the `bacpypes` library. The virtual +device must be configured properly in order to work. This configuration is stored in `scripts/bacnet/BACpypes.ini` +and will be read automatically when the utility is run. + +.. note:: + + The only value that (usually) needs to be changed is the **address** field. + +.. warning:: + + This is the address bound to the port on the machine you are running the script from, **NOT A TARGET DEVICE** + +This value should be set to the IP address of the network interface used to communicate with the remote device. If +there is more than one network interface you must use the address of the interface connected to the network that can +reach the device. + +In Linux you can usually get the addresses bound to all interfaces by running ``ifconfig`` from the command line. + +If a different outgoing port other than the default 47808 must be used, it can be specified as part of the address in +the form: + +:: + +
: + +In some cases, the netmask of the network will be needed for proper configuration. This can be done following this +format: + +:: + +
/: + +where ```` is the netmask length. The most common value is 24. See +http://www.computerhope.com/jargon/n/netmask.htm + +In some cases, you may also need to specify a different device ID by changing the value of `objectIdentifier` so the +virtual BACnet device does not conflict with any devices on the network. `objectIdentifier` defaults to 599. + + +Sample BACpypes.ini +^^^^^^^^^^^^^^^^^^^ + +.. code-block:: console + + [BACpypes] + objectName: Betelgeuse + address: 10.0.2.15/24 + objectIdentifier: 599 + maxApduLengthAccepted: 1024 + segmentationSupported: segmentedBoth + vendorIdentifier: 15 + + +Scanning for BACnet Devices +--------------------------- + +If the addresses for BACnet devices are unknown they can be discovered using the `bacnet_scan.py` utility. + +To run the utility simply execute the following command: + +.. code-block:: bash + + python bacnet_scan.py + +and expect output similar to this: + +.. code-block:: console + + Device Address =
+ Device Id = 699 + maxAPDULengthAccepted = 1024 + segmentationSupported = segmentedBoth + vendorID = 15 + + Device Address = + Device Id = 540011 + maxAPDULengthAccepted = 480 + segmentationSupported = segmentedBoth + vendorID = 5 + + +Reading Output +^^^^^^^^^^^^^^ + +The address where the device can be reached is listed on the `Device Address` line. The BACnet device ID is listed on +the `Device Id` line. The remaining lines are informational and not needed to configure the BACnet driver. + +For the first example, the IP address ``192.168.1.42`` can be used to reach the device. The second device is behind a +BACnet router and can be reached at ``1002:11``. See :ref:`BACnet router addressing `. + + +BACNet Scan Options +^^^^^^^^^^^^^^^^^^^ + + - ``--address ADDRESS``: Send the WhoIs request only to a specific address. Useful as a way to ping devices on a + network that blocks broadcast traffic. + - ``--range LOW/HIGH``: Specify the device ID range for the results. Useful for filtering. + - ``--timeout SECONDS``: Specify how long to wait for responses to the original broadcast. This defaults to 5 which + should be sufficient for most networks. + - ``--csv-out CSV_OUT``: Write the discovered devices to a CSV file. This can be used as inout for + ``grab_multiple_configs.py``. See :ref:`Scraping Multiple Devices `. + + +Automatically Generating a BACnet Registry Configuration File +------------------------------------------------------------- + +A CSV registry configuration file for the BACnet driver can be generated with the ``grab_bacnet_config.py`` script. + +.. warning:: + + This configuration will need to be edited before it can be used! + +The utility is invoked with the command: + +.. code-block:: bash + + python grab_bacnet_config.py + +This will query the device with the matching device ID for configuration information and print the resulting CSV file to +the console. + +In order to save the configuration to a file use the ``--out-file`` option to specify the output file name. + +Optionally the ``--address`` option can be used to specify the address of the target. In some cases, this is needed to +help establish a route to the device. + + +Output and Assumptions +^^^^^^^^^^^^^^^^^^^^^^ + +* Attempts at determining if a point is writable proved too unreliable. Therefore all points are considered to be + read-only in the output. + +* The only property for which a point is setup for an object is `presentValue`. + +* By default, the `Volttron Point Name` is set to the value of the `name` property of the BACnet object on the + device. In most cases this name is vague. No attempt is made at choosing a better name. A duplicate of + `Volttron Point Name` column called `Reference Point Name` is created to so that once `Volttron Point Name` is + changed a reference remains to the actual BACnet device object name. + +* Meta data from the objects on the device is used to attempt to put useful info in the `Units`, `Unit Details`, + and ``Notes`` columns. Information such as the range of valid values, defaults, the resolution or sensor input, and + enumeration or state names are scraped from the device. + +With a few exceptions "Units" is pulled from the object's "units" property and given the name used by the `bacpypes` +library to describe it. If a value in the **Units** column takes the form + +.. code-block:: python + + UNKNOWN UNIT ENUM VALUE: + +then the device is using a nonstandard value for the units on that object. + + +.. _Scraping-Multiple-BACnet-Devices: + +Scraping Multiple Devices +------------------------- + +The `grab_multiple_configs.py` script will use the CSV output of `bacnet_scan.py` to automatically run +`grab_bacnet_config.py` on every device listed in the CSV file. + +The output is put in two directories. `devices/` contains basic driver configurations for the scrapped devices. +`registry_configs/` contains the registry file generated by grab_bacnet_config.py. + +`grab_multiple_configs.py` makes no assumptions about device names or topics, however the output is appropriate for +the `install_platform_driver_configs.py` script. + + +Grab Multiple Configs Options +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + + - ``--out-directory OUT_DIRECTORY`` Specify the output directory. + - ``--use-proxy`` Use `proxy_grab_bacnet_config.py` to gather configuration data. + - ``--proxy-id`` When using ``-use-proxy``, supply ``proxy-id`` with the VIP identity of a BACnet proxy agent. This + is useful for deployments with multiple BACnet proxies, such as on segmented networks, or in deployments + communicating with multiple BACnet networks. + + +BACnet Proxy Alternative Scripts +-------------------------------- + +Both `grab_bacnet_config.py` and `bacnet_scan.py` have alternative versions called +`proxy_grab_bacnet_config.py` and `proxy_bacnet_scan.py` respectively. These versions require that the +VOLTTRON platform is running and BACnet Proxy agent is running. Both of these agents use the same command line +arguments as their independent counterparts. + +.. warning:: + + These versions of the BACnet scripts are intended as a proof of concept and have not been optimized for performance. + `proxy_grab_bacnet_config.py` takes about 10 times longer to grab a configuration than `grab_bacnet_config.py` + + +Problems and Debugging +---------------------- + +* Both `grab_bacnet_config.py` and `bacnet_scan.py` creates a virtual device that open up a port for communication + with devices. If the BACnet Proxy is running on the VOLTTRON platform it will cause both of these scripts to fail at + startup. Stopping the BACnet Proxy will resolve the problem. + +* Typically the utility should run quickly and finish in 30 seconds or less. In our testing, we have never seen a + successful scrape take more than 15 seconds on a very slow device with many points. Many devices will scrape in less + than 3 seconds. + +* If the utility has not finished after about 60 seconds it is probably having trouble communicating with the device and + should be stopped. Rerunning with debug output can help diagnose the problem. + +To output debug messages to the console add the ``--debug`` switch to the **end** of the command line arguments. + +.. code-block:: bash + + python grab_bacnet_config.py --out-file test.csv --debug + +On a successful run you will see output similar to this: + +.. code-block:: console + + DEBUG:main:initialization + DEBUG:main: - args: Namespace(address='10.0.2.20', buggers=False, debug=[], ini=, max_range_report=1e+20, out_file=) + DEBUG:main.SynchronousApplication:init (, '10.0.2.15') + DEBUG:main:starting build + DEBUG:main:pduSource =
+ DEBUG:main:iAmDeviceIdentifier = ('device', 500) + DEBUG:main:maxAPDULengthAccepted = 1024 + DEBUG:main:segmentationSupported = segmentedBoth + DEBUG:main:vendorID = 5 + DEBUG:main:device_name = MS-NCE2560-0 + DEBUG:main:description = + DEBUG:main:objectCount = 32 + DEBUG:main:object name = Building/FCB.Local Application.Room Real Temp 2 + DEBUG:main: object type = analogInput + DEBUG:main: object index = 3000274 + DEBUG:main: object units = degreesFahrenheit + DEBUG:main: object units details = -50.00 to 250.00 + DEBUG:main: object notes = Resolution: 0.1 + DEBUG:main:object name = Building/FCB.Local Application.Room Real Temp 1 + DEBUG:main: object type = analogInput + DEBUG:main: object index = 3000275 + DEBUG:main: object units = degreesFahrenheit + DEBUG:main: object units details = -50.00 to 250.00 + DEBUG:main: object notes = Resolution: 0.1 + DEBUG:main:object name = Building/FCB.Local Application.OSA + DEBUG:main: object type = analogInput + DEBUG:main: object index = 3000276 + DEBUG:main: object units = degreesFahrenheit + DEBUG:main: object units details = -50.00 to 250.00 + DEBUG:main: object notes = Resolution: 0.1 + ... + +and will finish something like this: + +.. code-block:: console + + ... + DEBUG:main:object name = Building/FCB.Local Application.MOTOR1-C + DEBUG:main: object type = binaryOutput + DEBUG:main: object index = 3000263 + DEBUG:main: object units = Enum + DEBUG:main: object units details = 0-1 (default 0) + DEBUG:main: object notes = BinaryPV: 0=inactive, 1=active + DEBUG:main:finally + +Typically if the BACnet device is unreachable for any reason (wrong IP, network down/unreachable, wrong interface +specified, device failure, etc) the scraper will stall at this message: + +.. code-block:: console + + DEBUG:main:starting build + +If you have not specified a valid interface in BACpypes.ini you will see the following error with a stack trace: + +.. code-block:: console + + ERROR:main:an error has occurred: [Errno 99] Cannot assign requested address + diff --git a/docs/source/driver-framework/bacnet/bacnet-driver.rst b/docs/source/driver-framework/bacnet/bacnet-driver.rst new file mode 100644 index 0000000000..05bbce6e25 --- /dev/null +++ b/docs/source/driver-framework/bacnet/bacnet-driver.rst @@ -0,0 +1,157 @@ +.. _BACnet-Driver: + +============= +BACnet Driver +============= + +BACnet Driver Configuration +=========================== + +Communicating with BACnet devices requires that the :ref:`BACnet Proxy Agent ` is configured and +running. All device communication happens through this agent. + + +Requirements +------------ +The BACnet driver requires the BACPypes package. This package can be installed in an activated environment with: + +.. code-block:: bash + + pip install bacpypes + +Alternatively, running :ref:`bootstrap.py ` with the ``--drivers`` option will install all +requirements for drivers included in the repository including BACnet. + +.. code-block:: bash + + python3 bootstrap.py --drivers + +.. warning:: + + Current versions of VOLTTRON support **only** BACPypes version 0.16.7 + + +.. _BACnet-Configuration-File: + +Driver Config +------------- + +There are nine arguments for the `driver_config` section of the device configuration file: + + - **device_address** - Address of the device. If the target device is behind an IP to MS/TP router then Remote + Station addressing will probably be needed for the driver to find the device + - **device_id** - BACnet ID of the device. Used to establish a route to the device at startup + - **min_priority** - (Optional) Minimum priority value allowed for this device whether specifying the priority + manually or via the registry config. Violating this parameter either in the configuration or when writing to the + point will result in an error. Defaults to 8 + - **max_per_request** - (Optional) Configure driver to manually segment read requests. The driver will only grab up + to the number of objects specified in this setting at most per request. This setting is primarily for scraping + many points off of low resource devices that do not support segmentation. Defaults to 10000 + - **proxy_address** - (Optional) :term:`VIP` address of the BACnet proxy. Defaults to ``platform.bacnet_proxy``. See + :ref:`bacnet-proxy-multiple-networks` for details. Unless your BACnet network has special needs you should not + change this value + - **ping_retry_interval** - (Optional) The driver will ping the device to establish a route at startup. If the + BACnet proxy is not available the driver will retry the ping at this interval until it succeeds. Defaults to 5 + - **use_read_multiple** - (Optional) During a scrape the driver will tell the proxy to use a + ReadPropertyMultipleRequest to get data from the device. Otherwise the proxy will use multiple ReadPropertyRequest + calls. If the BACnet proxy is reporting a device is rejecting requests try changing this to false for that device. + Be aware that setting this to false will cause scrapes for that device to take much longer. Only change if needed. + Defaults to true + - **cov_lifetime** - (Optional) When a device establishes a change of value subscription for a point, this argument + will be used to determine the lifetime and renewal period for the subscription, in seconds. Defaults to 180 + (Added to Platform Driver version 3.2) + +Here is an example device configuration file: + +.. code-block:: json + + { + "driver_config": {"device_address": "10.1.1.3", + "device_id": 500, + "min_priority": 10, + "max_per_request": 24 + }, + "driver_type": "bacnet", + "registry_config":"config://registry_configs/vav.csv", + "interval": 5, + "timezone": "UTC", + "heart_beat_point": "heartbeat" + } + +A `sample BACnet configuration file `_ +can be found in the VOLTTRON repository at `examples/configurations/drivers/bacnet1.config` + + +.. _BACnet-Registry-Configuration-File: + +BACnet Registry Configuration File +---------------------------------- + +The registry configuration file is a `CSV `_ file. Each row +configures a point on the device. + +Most of the configuration file can be generated with the `grab_bacnet_config.py` utility in `scripts/bacnet`. See +:ref:`BACnet Auto-Configuration `. + +Currently, the driver provides no method to access array type properties even if the members of the array are of a +supported type. + +The following columns are required for each row: + + - **Volttron Point Name** - The name by which the platform and agents running on the platform will refer to this + point. For instance, if the Volttron Point Name is `HeatCall1` (and using the example device configuration above) + then an agent would use `pnnl/isb2/hvac1/HeatCall1` to refer to the point when using the RPC interface of the + Actuator agent + - **Units** - Used for meta data when creating point information on the historian. + - **BACnet Object Type** - A string representing what kind of BACnet standard object the point belongs to. Examples + include: + + * analogInput + * analogOutput + * analogValue + * binaryInput + * binaryOutput + * binaryValue + * multiStateValue + + - **Property** - A string representing the name of the property belonging to the object. Usually, this will be + `presentValue` + - **Writable** - Either `TRUE` or `FALSE`. Determines if the point can be written to. Only points labeled `TRUE` + can be written to through the Actuator Agent. Points labeled `TRUE` incorrectly will cause an error to be + returned when an agent attempts to write to the point + - **Index** - Object ID of the BACnet object + +The following columns are optional: + + - **Write Priority** - BACnet priority for writing to this point. Valid values are 1-16. Missing this column or + leaving the column blank will use the default priority of 16 + - **COV Flag** - Either `True` or `False`. Determines if a BACnet Change-of-Value subscription should be + established for this point. Missing this column or leaving the column blank will result in no change of value + subscriptions being established. (Added to Platform Driver version 3.2) + +Any additional columns will be ignored. It is common practice to include a `Point Name` or `Reference Point Name` +column to include the device documentation's name for the point and `Notes` and `Unit Details` columns for additional +information about a point. + +.. csv-table:: BACnet + :header: Point Name,Volttron Point Name,Units,Unit Details,BACnet Object Type,Property,Writable,Index,Notes + + Building/FCB.Local Application.PH-T,PreheatTemperature,degreesFahrenheit,-50.00 to 250.00,analogInput,presentValue,FALSE,3000119,Resolution: 0.1 + Building/FCB.Local Application.RA-T,ReturnAirTemperature,degreesFahrenheit,-50.00 to 250.00,analogInput,presentValue,FALSE,3000120,Resolution: 0.1 + Building/FCB.Local Application.RA-H,ReturnAirHumidity,percentRelativeHumidity,0.00 to 100.00,analogInput,presentValue,FALSE,3000124,Resolution: 0.1 + Building/FCB.Local Application.CLG-O,CoolingValveOutputCommand,percent,0.00 to 100.00 (default 0.0),analogOutput,presentValue,TRUE,3000107,Resolution: 0.1 + Building/FCB.Local Application.MAD-O,MixedAirDamperOutputCommand,percent,0.00 to 100.00 (default 0.0),analogOutput,presentValue,TRUE,3000110,Resolution: 0.1 + Building/FCB.Local Application.PH-O,PreheatValveOutputCommand,percent,0.00 to 100.00 (default 0.0),analogOutput,presentValue,TRUE,3000111,Resolution: 0.1 + Building/FCB.Local Application.RH-O,ReheatValveOutputCommand,percent,0.00 to 100.00 (default 0.0),analogOutput,presentValue,TRUE,3000112,Resolution: 0.1 + Building/FCB.Local Application.SF-O,SupplyFanSpeedOutputCommand,percent,0.00 to 100.00 (default 0.0),analogOutput,presentValue,TRUE,3000113,Resolution: 0.1 + + +A sample BACnet registry file can be found `here `_ or +in the VOLTTRON repository in `examples/configurations/drivers/bacnet.csv` + + +.. toctree:: + + bacnet-proxy-agent + bacnet-auto-configuration + bacnet-router-addressing diff --git a/docs/source/driver-framework/bacnet/bacnet-proxy-agent.rst b/docs/source/driver-framework/bacnet/bacnet-proxy-agent.rst new file mode 100644 index 0000000000..e3f32e0dfd --- /dev/null +++ b/docs/source/driver-framework/bacnet/bacnet-proxy-agent.rst @@ -0,0 +1,206 @@ +.. _BACnet-Proxy-Agent: + +================== +BACnet Proxy Agent +================== + +Introduction +------------ + +Communication with BACnet device on a network happens via a single virtual BACnet device. In VOLTTRON driver framework, +we use a separate agent specifically for communicating with BACnet devices and managing the virtual BACnet device. + +Requirements +------------ +The BACnet Proxy agent requires the BACPypes package. This package can be installed in an activated environment with: + +.. code-block:: bash + + pip install bacpypes + +Alternatively, running :ref:`bootstrap.py ` with the `--drivers` option will install all +requirements for drivers included in the repository including BACnet. + +.. code-block:: bash + + python3 bootstrap.py --drivers + +.. warning:: + + Current versions of VOLTTRON support **only** BACPypes version 0.16.7 + + +Configuration +------------- + +The agent configuration sets up the virtual BACnet device. + +.. code-block:: json + + { + "device_address": "10.0.2.15", + "max_apdu_length": 1024, + "object_id": 599, + "object_name": "Volttron BACnet driver", + "vendor_id": 15, + "segmentation_supported": "segmentedBoth" + } + + +BACnet device settings +********************** + +- **device_address** - Address bound to the network port over which BACnet communication will happen on the computer + running VOLTTRON. This is **NOT** the address of any target device. See + :ref:`BACnet Router Addressing `. +- **object_id** - ID of the Device object of the virtual BACnet device. Defaults to 599. Only needs to be changed if + there is a conflicting BACnet device ID on your network. + +These settings determine the capabilities of the virtual BACnet device. BACnet communication happens at the lowest +common denominator between two devices. For instance, if the BACnet proxy supports segmentation and the target device +does not communication will happen without segmentation support and will be subject to those limitations. Consequently, +there is little reason to change the default settings outside of the `max_apdu_length` (the default is not the largest +possible value). + +- **max_apdu_length** - (From bacpypes documentation) BACnet works on lots of different types of networks, from + high-speed Ethernet to “slower” and “cheaper” ARCNET or MS/TP (a serial bus protocol used for a field bus defined by + BACnet). For devices to exchange messages they have to know the maximum size message the device can handle. + (End BACpypes docs) + + This setting determines the largest APDU (Application Protocol Data Unit) accepted by the BACnet virtual + device. Valid options are 50, 128, 206, 480, 1024, and 1476. Defaults to 1024.(Optional) + +- **object_name** - Name of the object. Defaults to "Volttron BACnet driver". (Optional) +- **vendor_id** - Vendor ID of the virtual BACnet device. Defaults to 15. (Optional) +- **segmentation_supported** - (From bacpypes documentation) A vast majority of BACnet communications traffic fits into + one message, but there can be times when larger messages are convenient and more efficient. Segmentation allows + larger messages to be broken up into segments and spliced back together. It is not unusual for “low power” field + equipment to not support segmentation. (End BACpypes docs) + + Possible setting are "segmentedBoth" (default), "segmentedTransmit", "segmentedReceive", or "noSegmentation" + (Optional) + + +Device Addressing +----------------- + +In some cases, it will be needed to specify the subnet mask of the virtual device or a different port number to listen +on. The full format of the BACnet device address is: + +:: + +
/: + +where ```` is the port to use and ```` is the netmask length. The most common value is 24. See +http://www.computerhope.com/jargon/n/netmask.htm + +For instance, if you need to specify a subnet mask of ``255.255.255.0`` and the IP address bound to the network port is +``192.168.1.2`` you would use the address: + +:: + + 192.168.1.2/24 + +If your BACnet network is on a different port (47809) besides the default (47808) you would use the address: + +:: + + 192.168.1.2:47809 + +If you need to do both: + +:: + + 192.168.1.2/24:47809 + + +.. _bacnet-proxy-multiple-networks: + +Communicating With Multiple BACnet Networks +------------------------------------------- + +If two BACnet devices are connected to different ports they are considered to be on different BACnet networks. In order +to communicate with both devices, you will need to run one BACnet Proxy Agent per network. + +Each proxy will need to be bound to different ports appropriate for each BACnet network and will need a different VIP +identity specified. When configuring drivers you will need to specify which proxy to use by +:ref:`specifying the VIP identity `. + +For example, a proxy connected to the default BACnet network: + +.. code-block:: json + + { + "device_address": "192.168.1.2/24" + } + +and another on port 47809: + +.. code-block:: json + + { + "device_address": "192.168.1.2/24:47809" + } + +a device on the first network: + +.. code-block:: json + + { + "driver_config": {"device_address": "1002:12", + "proxy_address": "platform.bacnet_proxy_47808", + "timeout": 10}, + "driver_type": "bacnet", + "registry_config":"config://registry_configs/bacnet.csv", + "interval": 60, + "timezone": "UTC", + "heart_beat_point": "Heartbeat" + } + +and a device on the second network: + +.. code-block:: json + + { + "driver_config": {"device_address": "12000:5", + "proxy_address": "platform.bacnet_proxy_47809", + "timeout": 10}, + "driver_type": "bacnet", + "registry_config":"config://registry_configs/bacnet.csv", + "interval": 60, + "timezone": "UTC", + "heart_beat_point": "Heartbeat" + } + +Notice that both configs use the same registry configuration (config://registry_configs/bacnet.csv). This is perfectly +fine as long as the registry configuration is appropriate for both devices. For scraping large numbers of points from +a single BACnet device, there is an optional timeout parameter provided, to prevent the platform driver timing out while +the BACnet Proxy Agent is collecting points. + + +BACnet Change of Value Services +------------------------------- + +|BACnet Change of Value Communications| + +Change of Value Services added in version 0.5 of the BACnet Proxy and version 3.2 of the Platform Driver. + +There are a variety of scenarios in which a user may desire data from some BACnet device point values to be published +independently of the regular scrape interval. Bacpypes provides a "ChangeOfValueServices" (hereby referred to as 'COV') +module, which enables a device to push updates to the platform. + +The BACnet COV requires that points on the device be properly configured for COV. A point on the BACnet device can be +configured with the 'covIncrement' property, which determines the threshold for a COV notification (note: this property +must be configured by the device operator - VOLTTRON does not provide the ability to set or modify this property). + +Based on configuration options for BACnet drivers, the driver will instruct the BACnet Proxy to establish a COV +subscription with the device. The subscription will last for an amount of time specified in the driver configuration, +and will auto-renew the subscription. If the proxy loses communication with the device or the device driver is stopped +the subscription will be removed when the lifetime expires. + +While the subscription exists, the device will send (confirmed) notifications to which will be published, with the topic +based on the driver's configured publish topics. + +https://bacpypes.readthedocs.io/en/latest/modules/service/cov.html + +.. |BACnet Change of Value Communications| image:: files/bacnet_cov.png diff --git a/docs/source/driver-framework/bacnet/bacnet-router-addressing.rst b/docs/source/driver-framework/bacnet/bacnet-router-addressing.rst new file mode 100644 index 0000000000..dffb1ccc1b --- /dev/null +++ b/docs/source/driver-framework/bacnet/bacnet-router-addressing.rst @@ -0,0 +1,34 @@ +.. _BACnet-Router-Addressing: + +======================== +BACnet Router Addressing +======================== + +The underlying library that Volttron uses for BACnet supports IP to MS/TP routers. Devices behind the router use a +Remote Station address in the form: + +:: + + :
+ +where ```` is the configured network ID of the router and ``
`` is the address of the device behind the +router. + +For example to access the device at ``
`` 12 for a router configured for ```` 1002 can be accessed with +this address: + +:: + + 1002:12 + +```` must be number from **0 to 65534** and ``
`` must be a number from **0 to 255**. + +This type of address can be used anywhere an address is required in configuration of the :ref:`Volttron BACnet driver +`. + +Caveats +------- + +VOLTTRON uses a UDP broadcast mechanism to establish the route to the device. If the route cannot be established it +will fall back to a UDP broadcast for all communication with the device. If the IP network where the router is +connected blocks UDP broadcast traffic then these addresses will not work. diff --git a/docs/source/core_services/drivers/files/bacnet_cov.png b/docs/source/driver-framework/bacnet/files/bacnet_cov.png similarity index 100% rename from docs/source/core_services/drivers/files/bacnet_cov.png rename to docs/source/driver-framework/bacnet/files/bacnet_cov.png diff --git a/docs/source/core_services/drivers/driver_configuration/chargepoint-driver.rst b/docs/source/driver-framework/chargepoint/chargepoint-driver.rst similarity index 62% rename from docs/source/core_services/drivers/driver_configuration/chargepoint-driver.rst rename to docs/source/driver-framework/chargepoint/chargepoint-driver.rst index 70c2aebe8a..92c2f05c2e 100644 --- a/docs/source/core_services/drivers/driver_configuration/chargepoint-driver.rst +++ b/docs/source/driver-framework/chargepoint/chargepoint-driver.rst @@ -1,29 +1,36 @@ -.. _Chargepoint-config: +.. _Chargepoint-Driver: + +================== +Chargepoint Driver +================== + + +.. _Chargepoint-Config: Chargepoint Driver Configuration --------------------------------- +================================ -The chargepoint driver requires at least one additional python library and has its own ``requirements.txt``. -Make sure to run +The chargepoint driver requires at least one additional python library and has its own `requirements.txt`. +Make sure to run: -:: +.. code-block:: bash pip install -r /requirements.txt before using this driver. + driver_config -************* +------------- There are three arguments for the **driver_config** section of the device configuration file: - - **stationID** - Chargepoint ID of the station. This format is ususally '1:00001' + - **stationID** - Chargepoint ID of the station. This format is usually '1:00001' - **username** - Login credentials for the Chargepoint API - - **password** - Login credentials for the Chargepoint API + - **password**- Login credentials for the Chargepoint API -The Chargepoint login credentials are generated in the Chargepoint web portal and require -a chargepoint account with sufficient privileges. Station IDs are also available on -the web portal. +The Chargepoint login credentials are generated in the Chargepoint web portal and require a Chargepoint account with +sufficient privileges. Station IDs are also available on the web portal. Here is an example device configuration file: @@ -40,24 +47,30 @@ Here is an example device configuration file: "heart_beat_point": "heartbeat" } -A sample Chargepoint configuration file can be found in the VOLTTRON repository -in ``examples/configurations/drivers/chargepoint1.config`` +A sample Chargepoint configuration file can be found in the VOLTTRON repository in +`examples/configurations/drivers/chargepoint1.config` -.. _Chargepoint-Driver: + +.. _Chargepoint-Registry-Config: Chargepoint Registry Configuration File -*************************************** +--------------------------------------- -The registry configuration file is a `CSV `_ file. Each row configures a point on the device. +The registry configuration file is a `CSV `_ file. Each row +configures a point on the device. The following columns are required for each row: - - **Volttron Point Name** - The name by which the platform and agents running on the platform will refer to this point. - - **Attribute Name** - Chargepoint API attribute name. This determines the field that will be read from the API response and must be one of the allowed values. - - **Port #** - If the point describes a specific port on the Chargestation, it is defined here. (Note 0 and an empty value are equivalent.) + - **Volttron Point Name** - The name by which the platform and agents running on the platform will refer to this + point. + - **Attribute Name** - Chargepoint API attribute name. This determines the field that will be read from the API + response and must be one of the allowed values. + - **Port #** - If the point describes a specific port on the Chargestation, it is defined here. (Note 0 and an empty + value are equivalent.) - **Type** - Python type of the point value. - **Units** - Used for meta data when creating point information on the historian. - - **Writable** - Either "TRUE" or "FALSE". Determines if the point can be written to. Only points labeled TRUE can be written. + - **Writable** - Either "TRUE" or "FALSE". Determines if the point can be written to. Only points labeled TRUE can + be written. - **Notes** - Miscellaneous notes field. - **Register Name** - A string representing how to interpret the data register. Acceptable values are: * StationRegister @@ -67,7 +80,13 @@ The following columns are required for each row: * StationRightsRegister - **Starting Value** - Default value for writeable points. Read-only points should not have a value in this column. -Detailed descriptions for all available chargepoint registers may be found in the ``README.rst`` in the -chargepoint driver directory. +Detailed descriptions for all available Chargepoint registers may be found in the ``README.rst`` in the Chargepoint +driver directory. + +A sample Chargepoint registry file can be found in the VOLTTRON repository in +``examples/configurations/drivers/chargepoint.csv`` + + +.. toctree:: -A sample Chargepoint registry file can be found in the VOLTTRON repository in ``examples/configurations/drivers/chargepoint.csv`` + chargepoint-specification diff --git a/docs/source/specifications/chargepoint_driver.rst b/docs/source/driver-framework/chargepoint/chargepoint-specification.rst similarity index 78% rename from docs/source/specifications/chargepoint_driver.rst rename to docs/source/driver-framework/chargepoint/chargepoint-specification.rst index 6ff018b9aa..16c2b6d0ee 100644 --- a/docs/source/specifications/chargepoint_driver.rst +++ b/docs/source/driver-framework/chargepoint/chargepoint-specification.rst @@ -1,27 +1,28 @@ -.. _Chargepoint-Driver: +.. _Chargepoint-Specification: -Chargepoint API Driver -====================== +==================================== +Chargepoint API Driver Specification +==================================== Spec Version 1.1 `ChargePoint `_ operates the largest independently owned EV charging network in the US. -It sells charge stations to businesses and provides a web application to manage and report on these chargestations. +It sells charge stations to businesses and provides a web application to manage and report on these Chargestations. Chargepoint offers a `Web Services API `_ -that its customers may use to develop applications that integrate with the chargepoint network devices. +that its customers may use to develop applications that integrate with the Chargepoint network devices. The Chargepoint API Driver for VOLTTRON will enable real-time monitoring and control of Chargepoint EVSEs within the VOLTTRON platform by creating a standard VOLTTRON device driver on top of the Chargepoint Web Services API. -Each port on each managed chargestation will look like a standard VOLTTRON device, monitored and controlled through +Each port on each managed Chargestation will look like a standard VOLTTRON device, monitored and controlled through the VOLTTRON device driver interface. Driver Scope & Functions ------------------------- +======================== This driver will enable VOLTTRON to support the following use cases with Chargepoint EVSEs: - - Monitoring of chargestation status, load and energy consumption + - Monitoring of Chargestation status, load and energy consumption - Demand charge reduction - Time shifted charging - Demand response program participation @@ -46,7 +47,10 @@ getStations Returns description/address/nameplate of charg The Chargepoint Driver will implement version 5.0 Rev 7 of the Chargepoint API. While the developer's guide is not yet publicly available, the WSDL Schema is. -*Note: Station Reservation API has been removed from the 5.0 version of the API.* + +.. note:: + + Station Reservation API has been removed from the 5.0 version of the API.* WSDL for this API is located here: @@ -54,41 +58,43 @@ WSDL for this API is located here: Mapping VOLTTRON Device Interface to Chargepoint APIs ------------------------------------------------------ +===================================================== -The VOLTTRON driver interface represents a single device as a list of registers accessed through a simple get_point/ -set_point API. In contrast, the Chargepoint web services for real-time monitoring and control are spread across +The VOLTTRON driver interface represents a single device as a list of registers accessed through a simple `get_point`/ +`set_point` API. In contrast, the Chargepoint web services for real-time monitoring and control are spread across eight distinct APIs that return hierarchical XML. The Chargepoint driver is the adaptor that will make a suite of web services look like a single VOLTTRON device. Device Mapping -^^^^^^^^^^^^^^ +-------------- -The chargepoint driver will map a single VOLTTRON device (a driver instance) to one chargestation. Since -a chargestation can have multiple ports, each with their own set of telemetry, the registry will include a port +The Chargepoint driver will map a single VOLTTRON device (a driver instance) to one Chargestation. Since +a Chargestation can have multiple ports, each with their own set of telemetry, the registry will include a port index column on attributes that are specific to a port. This will allow deployments to use an indexing convention that has been followed with other drivers. (See Registry Configuration for more details) + Requirements ------------- +============ -The chargepoint driver requires at least one additional python library and has its own ``requirements.txt``. +The Chargepoint driver requires at least one additional Python library and has its own `requirements.txt`. Make sure to run -:: +.. code-block:: bash pip install -r /requirements.txt before using this driver. + Driver Configuration --------------------- +==================== -Each device must be configured with its own Driver Configuration File. The Driver Configuration must reference -the Registry Configuration File, defining the set of points that will be available from the device. For -chargestation devices, the ``driver_config`` entry of the Driver Configuration file will need to contain all +Each device must be configured with its own driver configuration file. The driver configuration must reference +the registry configuration file, defining the set of points that will be available from the device. For +Chargestation devices, the `driver_config` entry of the driver Configuration file will need to contain all parameters required by the web service API: @@ -100,7 +106,7 @@ password stationID Unique station ID assigned by chargepoint ======================= ========================================================================== -The ``driver_type`` must be ``chargepoint`` +The `driver_type` must be ``chargepoint`` A sample driver configuration file for a single device, looks like this: @@ -119,44 +125,43 @@ The ``driver_type`` must be ``chargepoint`` } - API Plans & Access Rights -^^^^^^^^^^^^^^^^^^^^^^^^^ +------------------------- Chargepoint offers API plans that vary in available features and access rights. Some of the API calls to be implemented here are not available across all plans. Furthermore, the attributes returned in response to an API call may be limited by the API plan and access rights associated with the userid. Runtime -exceptions related to plans and access rights will generate DriverInterfaceError exceptions. These can be +exceptions related to plans and access rights will generate `DriverInterfaceError` exceptions. These can be avoided by using a registry configuration that does not include APIs or attributes that are not -available to the . +available to the ``. Registry Configuration ----------------------- +====================== The registry file defines the individual points that will be exposed by the Chargepoint driver. It should only reference points that will actually be used since each point is potentially an additional web service call. The driver will be smart and limit API calls to those that are required to satisfy the points found in the CSV. -Naming of points will conform to the conventions established by the Chargepoint Web services API whenever possible. +Naming of points will conform to the conventions established by the Chargepoint web services API whenever possible. Note that Chargepoint naming conventions are camel-cased with no spaces or hyphens. Multi-word names start with a lowercase letter. Single word names start uppercase. -The available registry entries for each API method name are shown below along with a description of any -notable behavior associated with that register. Following that is a sample of the -associated XML returned by the API. +The available registry entries for each API method name are shown below along with a description of any notable behavior +associated with that register. Following that is a sample of the associated XML returned by the API. getStationStatus -^^^^^^^^^^^^^^^^ +---------------- -The getStationStatus query returns information for all ports on the chargestation. +The `getStationStatus` query returns information for all ports on the Chargestation. .. note:: In all the registry entries shown below, the **Attribute Name** column defines the unique name within the - chargepoint driver that must be used to reference this particular attribute and associated API. The - **VOLTTRON point name** usually matches the **Attribute Name** in these examples but may be changed during deployment. + Chargepoint driver that must be used to reference this particular attribute and associated API. The + **VOLTTRON point name** usually matches the **Attribute Name** in these examples but may be changed during + deployment. .. csv-table:: getStationStatus @@ -165,7 +170,7 @@ The getStationStatus query returns information for all ports on the chargestatio Status,Status,StationStatusRegister,1,string,,,FALSE,"AVAILABLE, INUSE, UNREACHABLE, UNKNOWN " Status.TimeStamp,TimeStamp,StationStatusRegister,1,datetime,,,FALSE,Timestamp of the last communication between the station and ChargePoint -Sample XML returned by getStationStatus. +Sample XML returned by `getStationStatus`. .. code-block:: xml @@ -190,11 +195,11 @@ Sample XML returned by getStationStatus. getLoad, shedLoad, clearShedState -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +--------------------------------- -Reading any of these values will return the result of a call to getLoad. Writing shedState=True will call -shedLoad and pass the last written value of allowedLoad or percentShed. The API allows only one of these -two values to be provided. Writing to allowedLoad will simultaneously set percentShed to None and vice +Reading any of these values will return the result of a call to `getLoad`. Writing ``shedState=True`` will call +`shedLoad` and pass the last written value of `allowedLoad` or `percentShed`. The API allows only one of these +two values to be provided. Writing to `allowedLoad` will simultaneously set `percentShed` to ``None`` and vice versa. .. csv-table:: getLoad, shedLoad, clearShedState @@ -205,7 +210,7 @@ versa. allowedLoad,allowedLoad,LoadRegister,1,float,kw,,TRUE,Allowed load in kw when shedState is True percentShed,percentShed,LoadRegister,1,integer,percent,,TRUE,Percent of max power shed when shedState is True -Sample XML returned by getLoad +Sample XML returned by `getLoad` .. code-block:: xml @@ -240,7 +245,7 @@ Sample XML returned by getLoad -Sample shedLoad XML query to set the allowed load on a port to 3.0kw. +Sample `shedLoad` XML query to set the allowed load on a port to 3.0kw. .. code-block:: xml @@ -261,16 +266,16 @@ Sample shedLoad XML query to set the allowed load on a port to 3.0kw. getAlarms, clearAlarms -^^^^^^^^^^^^^^^^^^^^^^ +---------------------- -The getAlarms query returns a list of all alarms since last cleared. The driver interface will only return -data for the most recent alarm, if present. While the getAlarm query provides various station identifying -attributes, these will be made available through registers associated with the getStations API. If an alarm is -not specific to a particular port, it will be associated with all chargestation ports and available through any +The `getAlarms` query returns a list of all alarms since last cleared. The driver interface will only return +data for the most recent alarm, if present. While the `getAlarm` query provides various station identifying +attributes, these will be made available through registers associated with the `getStations` API. If an alarm is +not specific to a particular port, it will be associated with all Chargestation ports and available through any of its device instances. -Write ``True`` to clearAlarms to submit the clearAlarms query to the **chargestation**. It will clear alarms -across all ports on that chargestation. +Write ``True`` to `clearAlarms` to submit the `clearAlarms` query to the **chargestation**. It will clear alarms +across all ports on that Chargestation. .. csv-table:: getAlarms, clearAlarms @@ -299,11 +304,11 @@ across all ports on that chargestation. getStationRights -^^^^^^^^^^^^^^^^ +---------------- Returns the name of the stations rights profile. A station may have multiple station rights profiles, each associated -with a different station group ID. For this reason, the stationRightsProfile register will return a dictionary of -(sgID, name) pairs. Since this is a chargestation level attribute, it will be returned for all ports. +with a different station group ID. For this reason, the `stationRightsProfile` register will return a dictionary of +`(sgID, name)` pairs. Since this is a Chargestation level attribute, it will be returned for all ports. .. csv-table:: getStationRights @@ -340,9 +345,9 @@ with a different station group ID. For this reason, the stationRightsProfile re getChargingSessionData -^^^^^^^^^^^^^^^^^^^^^^ +---------------------- -Like getAlarms, this query returns a list of session data. The driver interface implementation will make the +Like `getAlarms`, this query returns a list of session data. The driver interface implementation will make the last session data available. .. csv-table:: getChargingSessionData @@ -378,9 +383,9 @@ last session data available. getStations -^^^^^^^^^^^ +----------- -This API call returns a complete description of the chargestation in 40 fields. This information is essentially +This API call returns a complete description of the Chargestation in 40 fields. This information is essentially static and will change infrequently. It should not be scraped on a regular basis. The list of attributes will be included in the registry CSV but are only listed here: @@ -393,30 +398,36 @@ included in the registry CSV but are only listed here: Engineering Discussion ----------------------- +====================== Questions -^^^^^^^^^ +--------- - **Allowed python-type** - We propose a register with a `python-type` of dictionary. Is this OK? - - **Scrape Interval** - Scrape all should not return all registers defined in the CSV, we propose fine grained control with a scrape-interval on each register. Response: ok to add extra settings to registry but don't worry about pubishing static data with every scrape - - **Data currency** - Since devices are likely to share api calls, at least across ports, we need to think about the currency of the data and possibly allowing this to be a configurable parameter or derviced from the scrape interval. Response: add to CSV with default values if not present + - **Scrape Interval** - Scrape all should not return all registers defined in the CSV, we propose fine grained + control with a scrape-interval on each register. Response: ok to add extra settings to registry but don't worry + about publishing static data with every scrape + - **Data currency** - Since devices are likely to share api calls, at least across ports, we need to think about the + currency of the data and possibly allowing this to be a configurable parameter or derived from the scrape interval + . Response: add to CSV with default values if not present Performance -^^^^^^^^^^^ +----------- + Web service calls across the internet will be significantly slower than typical VOLTTRON Bacnet or Modbus devices. It -may be prohibitively expensive for each chargepoint sub-agent instance to make individual requests on behalf of -its own EVSE+port. We will need to examine the possibility of making a single request for all active chargestations +may be prohibitively expensive for each Chargepoint sub-agent instance to make individual requests on behalf of +its own EVSE+port. We will need to examine the possibility of making a single request for all active Chargestations and sharing that information across driver instances. This could be done through a separate agent that regularly -queries the chargepoint network and makes the data available to each sub-agent via an RPC call. +queries the Chargepoint network and makes the data available to each sub-agent via an RPC call. 3rd Party Library Dependencies -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -The chargepoint driver implementation will depend on one additional 3rd part library that is not part of a standard +------------------------------ + +The Chargepoint driver implementation will depend on one additional 3rd part library that is not part of a standard VOLTTRON installation: .. @@ -427,5 +438,3 @@ VOLTTRON installation: Is there a mechanism for drivers to specify their own requirements.txt ? Driver installation and configuration documentation can reference requirement.txt - - diff --git a/docs/source/core_services/drivers/driver_configuration/dnp3-driver.rst b/docs/source/driver-framework/dnp3-driver/dnp3-driver.rst similarity index 77% rename from docs/source/core_services/drivers/driver_configuration/dnp3-driver.rst rename to docs/source/driver-framework/dnp3-driver/dnp3-driver.rst index 38e68b6b4c..d35c51e056 100644 --- a/docs/source/core_services/drivers/driver_configuration/dnp3-driver.rst +++ b/docs/source/driver-framework/dnp3-driver/dnp3-driver.rst @@ -1,28 +1,31 @@ -.. _DNP3-Driver-Config: +.. _DNP3-Driver: -DNP3 Driver Configuration -------------------------- +=========== +DNP3 Driver +=========== -VOLTTRON's DNP3 driver enables the use -of `DNP3 `_ (Distributed Network Protocol) +VOLTTRON's DNP3 driver enables the use of `DNP3 `_ (Distributed Network Protocol) communications, reading and writing points via a DNP3 Outstation. -In order to use a DNP3 driver to read and write point data, VOLTTRON's DNP3Agent must also +In order to use a DNP3 driver to read and write point data, VOLTTRON's DNP3 Agent must also be configured and running. All communication between the VOLTTRON Outstation and a -DNP3 Master happens through this DNP3Agent. -For information about the DNP3Agent, please see the :ref:`DNP3 Platform Specification `. +DNP3 Master happens through the DNP3 Agent. + +For information about the DNP3 Agent, please see the :ref:`DNP3 Platform Specification `. + Requirements ------------- -The DNP3 driver requires the PyDNP3 package. This package can be installed in an -activated environment with: +============ -:: +The DNP3 driver requires the PyDNP3 package. This package can be installed in an activated environment with: + +.. code-block:: bash pip install pydnp3 -driver_config -************* + +Driver Configuration +==================== There is one argument for the "driver_config" section of the DNP3 driver configuration file: @@ -47,12 +50,11 @@ Here is a sample DNP3 driver configuration file: } A sample DNP3 driver configuration file can be found in the VOLTTRON repository -in ``services/core/MasterDriverAgent/example_configurations/test_dnp3.config``. +in ``services/core/PlatformDriverAgent/example_configurations/test_dnp3.config``. -.. _DNP3-Driver: DNP3 Registry Configuration File -******************************** +================================ The driver's registry configuration file, a `CSV `_ file, specifies which DNP3 points the driver will read and/or write. Each row configures a single DNP3 point. @@ -75,13 +77,13 @@ A sample data dictionary is available in ``services/core/DNP3Agent/dnp3/mesa_poi Point definitions in the DNP3 driver's registry should look something like this: -.. code-block:: csv +.. csv-table:: DNP3 + :header: Volttron Point Name,Group,Index,Scaling,Units,Writable - Volttron Point Name,Group,Index,Scaling,Units,Writable DCHD.WTgt,41,65,1.0,NA,FALSE DCHD.WTgt-In,30,90,1.0,NA,TRUE DCHD.WinTms,41,66,1.0,NA,FALSE DCHD.RmpTms,41,67,1.0,NA,FALSE A sample DNP3 driver registry configuration file is available -in ``services/core/MasterDriverAgent/example_configurations/dnp3.csv``. +in ``services/core/PlatformDriverAgent/example_configurations/dnp3.csv``. diff --git a/docs/source/driver-framework/drivers-overview.rst b/docs/source/driver-framework/drivers-overview.rst new file mode 100644 index 0000000000..074230e4ac --- /dev/null +++ b/docs/source/driver-framework/drivers-overview.rst @@ -0,0 +1,164 @@ +.. _Driver-Framework: + +========================= +Driver Framework Overview +========================= + +VOLTTRON drivers act as an interface between agents on the platform and a device. While running on the platform, +drivers are special purpose agents which instead of being run as a separate process, are run as a greenlet in the +Platform Driver process. + +Driver instances are created by the Platform Driver when a new driver configuration is added to the configuration store. +Drivers use the following topic pattern `devices///`. When a configuration file is added +to the Platform Driver's store using this pattern, the Platform Driver creates a Driver Agent. The Driver agent is in turn +instantiated with a instance of the Interface class corresponding to the `driver_type` parameter in the configuration +file. The Interface class is responsible for implementing the communication paradigms of a device or protocol. Once +configured, the Platform Driver periodically polls the Driver Agent for data which is collected from the interface class. +Additionally, points can be requested ad-hoc via the Platform Driver's JSON-RPC method "get_point". Points may be set +by using JSON-RPC with the Actuator agent to set up a schedule and calling the "set_point" method. + + +Driver Conventions +****************** + +- Drivers are polled by the Platform Driver agent and values can be set using the `Actuator Agent` +- Drivers should have a 1-to-1 relationship with a device +- Driver modules should be written in Python files in the `services/core/PlatformDriverAgent/platform_driver/interfaces` + directory in the VOLTTRON repository. The platform driver will search for a Python file in this directory matching the + name provided by the `driver_type` value from the driver configuration when creating the Driver agent. +- Driver code consists of an Interface class (exactly named), supported in most cases by one or more Register classes + + +.. _Driver_Communication: + +Agent-Driver Communication Patterns +*********************************** + +The VOLTTRON message bus has been developed to allow agents on the platform to interact with each other, as well as with +ICS (Industrial Control Systems) and IOT (Internet of Things) devices via the VOLTTRON driver framework. Agents and +drivers have the ability to publish data to the message bus and to subscribe to message bus topics to read in data as it +is published. Additionally, agents may implement JSONRPC calls and expose JSONRPC endpoints to communicate more directly +with other agents. The following diagram demonstrates typical platform communication patterns for a single platform +deployment. + + +Typical Single Platform Behavior +================================ + +The diagram features several entities that comprise the platform and its connected components: + +* The VOLTTRON message bus - The message bus is the means of transmission of information in VOLTTRON. The VOLTTRON + message bus is built around existing message bus software; currently VOLTTRON supports RabbitMQ and ZeroMQ. The + VOLTTRON integration includes Pub/Sub and JSON RPC interfaces for agent and driver communication. +* VOLTTRON Platform Agents and Subsystems - These agents and subsystems are installed on the platform to manage the + platform. They provide many user facing functions, aid in communication and manage other agents and drivers. +* User's Agents - These agents are either agents included in the core repository but installed by a user, or user built + agent modules. They may perform a huge variety of user specified tasks, including data collection, device control, + simulation, etc. +* Platform Driver Agent - This agent is installed by a user to facilitate communication with drivers. Drivers should not + communicated with directly - the platform driver implements several features for communicating with drivers to ensure + smooth operation and consistent driver behavior. +* Actuator agent - This agent is installed by user to provide scheduling capability for controlling drivers. The + Platform Driver does not include protections for race conditions, etc. It is always recommended to use the Actuator + agent to set values on a device. +* Device Driver - Drivers are special purpose agents which provide an interface between the platform driver and devices + such as Modbus, and BACnet devices. Drivers implement a specific set of features for protecting device communication + ensuring uniform behaviors across different devices. +* Device - Devices may be low level physical computers for controlling various systems such as PLCs (Programmable Logic + Controller), devices which communicate on the local network (such as a Smart T.V.), or devices which are accessed via + a remote web API (other smart devices). + + +Lines of Communication +---------------------- + +Connectivity of the platform follows the following paradigm: + +* Platform agents (including the Platform Driver and Actuator), subsystems, and user agents communicate with the message + bus via a publish/subscribe system. +* Agents can communicate "directly" to each other via JSONRPC calls - JSONRPC calls use the VOLTTRON message bus router + to "direct" messages to an intended recipient. RPC calls from an agent specify a function for the recipient to + perform including input parameters, and the response to the sender should contain the value output by the specified + function. +* The Platform Driver will periodically poll device drivers. This functionality is intentionally not user-facing. The + Platform Driver iterates over the configured drivers and calls their respective "scrape_all" methods. This will trigger + the drivers to collect point values. +* The Driver will communicate with its configured end devices to collect data points which it then returns to the + driver. The driver then publishes the point data to the bus under the `///all` topic. +* To get an individual device point, the user agent should send an RPC call to the Platform Driver for "get_point", + providing the point's corresponding topic. After the Platform Driver processes the request, communication happens very + similarly to polling, but rather than an "all" publish, the data is returned via the Platform Driver to the user agent. +* To set a point on a device, it is recommended to use an Actuator Agent. The user agent sends an RPC request to the + Actuator to schedule time for the agent to control the device. During that scheduled time the user agent may send it + a set point request. If the schedule has been created, the actuator will then forward that request to the Platform + Driver, at which point the communication happens similarly to a "get_point" request. + +The general paradigm for the device-driver relationship as specified by the VOLTTRON driver framework is a 1-to-1 +relationship. Each end device should be interacted with via a single device driver configured on one platform. To +distribute device data, the DataPuller and forwarder agents can be used at the platform level. Multiple platforms are +not intended to collect data or share control of a single device. + +The below diagram demonstrates driver communication on the platform in a typical case. + +.. image:: files/driver_flow.png + +1. Platform agents and agents developed and/or installed by users communicate with the platform via pub/sub or JSON-RPC. + Agents share data for a number of reasons including querying historians for data to use in control algorithms, + fetching data from remote web APIs and monitoring. +2. A user agent which wants to request data ad-hoc sends a JSON-RPC request to the Platform Driver to `get_point`, asking + the driver to fetch the most up-to-date point data for the point topic provided. + + .. note:: + + For periodic `scrape_all` data publishes, step 2 is not required. The Platform Driver is configured to + automatically collect all point data for a device on a regular interval and publish the data to the bus. + +3. A user agent sends a request to the actuator to establish a schedule for sending device control signals, and during + the scheduled time sends a `set_point` request to the Actuator. Given that the control signal arrives during the + scheduled period, the Actuator forwards the request to the Platform Driver. If the control signal arrives outside the + scheduled period or without an existing schedule, a LockError exception will be thrown. +4. The Platform Driver issues a `get_point`/`set_point` call to the Driver corresponding to the request it was sent. +5. The device driver uses the interface class it is configured for to send a data request or control signal to the + device (i.e. the BACnet driver issues a `readProperty` request to the device). +6. The device returns a response indicating the current state. +7. The the response is forwarded to the requesting device. In the case of a `scrape_all`, the device data is published + to the message bus. + + +Special Case Drivers +==================== + +Some drivers require a different communication paradigm. One common alternative is shown in the diagram below: + +.. image:: files/proxy_driver_flow.png + +This example describes an alternative pattern wherein BACnet drivers communicate via a BACnet proxy agent to communicate +with end devices. This behavior is derived from the networking requirements of the BACnet specification. BACnet +communication in the network layer requires that only one path exist between BACnet devices on a network. +In this case, the BACnet proxy acts as a virtual BACnet device, and device drivers forward their requests to this agent +which then implements the BACnet communication (whereas the typical pattern would have devices communicate directly with +the corresponding device). There are many other situations which may require this paradigm to be adopted (such as +working with remote APIs with request limits), and it is up to the party implementing the driver to determine if this +pattern or another pattern may be the most appropriate implementation pattern for their respective use case. + +.. note:: + + Other requirements for driver communication patterns may exist, but on an individual basis. Please refer to the + documentation for the driver of interest for more about any atypical pattern that must be adhered to. + + +Installing the Fake Driver +************************** + +The Fake Driver is included as a way to quickly see data published to the message bus in a format that mimics what a +true driver would produce. This is a simple implementation of the VOLTTRON driver framework. + +See :ref:`instructions for installing the fake driver ` + +To view data being published from the fake driver on the message bus, one can +:ref:`install the Listener Agent ` and read the VOLTTRON log file: + +.. code-block:: bash + + cd + tail -f volttron.log diff --git a/docs/source/driver-framework/ecobee/ecobee-web-driver.rst b/docs/source/driver-framework/ecobee/ecobee-web-driver.rst new file mode 100644 index 0000000000..8a7e727529 --- /dev/null +++ b/docs/source/driver-framework/ecobee/ecobee-web-driver.rst @@ -0,0 +1,578 @@ +.. _ecobee-web-driver: + +============= +Ecobee Driver +============= + +The Ecobee driver is an implementation of a :ref:`VOLTTRON driver framework ` Interface. +In this case, the Platform Driver issues commands to the Ecobee driver to collect data from and send control signals to +`Ecobee's remote web API `_ + +.. note:: + + Reading the driver framework and driver configuration documentation prior to following this guide will help the user + to understand drivers, driver communication, and driver configuration files. + +This guide covers: + +* Creating an Ecobee application via the web interface +* Creating an Ecobee driver configuration file, including finding the user's Ecobee API key and Ecobee thermostat serial + number +* Creating an Ecobee registry configuration file +* Installing the Platform Driver and loading Ecobee driver and registry configurations +* Starting the driver and viewing Ecobee data publishes + + +.. _Ecobee-Application: + +Ecobee Application +================== + +Connecting the Ecobee driver to the Ecobee API requires configuring your account with an Ecobee application. + +#. Log into the `Ecobee site `_ + +#. Click on the "hamburger" icon on the right to open the account menu, then click "Developer" + + .. image:: files/ecobee_developer_menu.png + +#. On the bottom-left corner of the screen that appears, click "Create New" + + .. image:: files/ecobee_create_app.png + +#. Fill out the name, summary, and description forms as desired. Click "Authorization Method" and from the drop-down + that appears, select "ecobee PIN" (this will enable an extra layer of authentication to protect your account) + +#. Record the API key for the Application from the Developer menu + + .. figure:: files/ecobee_api_key.png + + From Ecobee `authenication docs `_ + + +Configuration Files +=================== + +The Ecobee driver uses two configuration files, a driver configuration which sets the parameters of the behavior of the +driver, and registry configuration which instructs the driver on how to interact with each point. + +This is an example driver configuration: + +.. code-block:: JSON + + { + "driver_config": { + "API_KEY": "abc123", + "DEVICE_ID": 8675309 + }, + "driver_type": "ecobee", + "registry_config":"config://campus/building/ecobee.csv", + "interval": 180, + "timezone": "UTC" + } + +The driver configuration works as follows: + +.. csv-table:: Driver Configuration Description + :header: Config Field,Description + + driver_config,This section specifies values used by the driver agent during operation + API_KEY,This is the User's API key. This must be obtained by the user from the Ecobee web UI and provided in this part of the configuration. Notes on how to do this will be provided below + DEVICE_ID,This is the device number of the Ecobee thermostat the driver is responsible for operating. This must be obtained by the user from the Ecobee web UI. Notes on how to do this will be provided below + driver_type,This value should match the name of the python file which contains the interface class implementation for the Ecobee driver and should not change + registry_config,This should a user specified path of the form "config://. It is recommended to use the device topic string following "devices" with the file extension ("config:///`_ + +.. note:: + + Values for API_KEY and DEVICE_ID must be obtained by the user. DEVICE_ID should be added as an integer + representation of the thermostat's serial number. + + **Getting API Key** + + Ecobee API keys require configuring an application using the Ecobee web UI. For more information on configuring an + application and obtaining the API key, please refer to the :ref:`Ecobee Application ` heading in + this documentation. + + **Finding Device Identifier** + + To find your Ecobee thermostat's device identifier: + + 1. Log into the `Ecobee customer portal `_ + 2. From the Home screen click "About My Ecobee" + 3. The thermostat identifier is the serial number listed on the About screen + + +Registry Configuration +---------------------- + +This file specifies how data is read from Ecobee API response data as well as how points are set via the Platform Driver +and actuator. + +It is likely that more points may be added to obtain additional data, but barring implementation changes by Ecobee it is +unlikely that the values in this configuration will need to change substantially, as most thermostats provide the +same range of data in a similar format. + +This is an example registry configuration: + +.. csv-table:: Registry Configuration Example + :header: Point Name,Volttron Point Name,Units,Type,Writeable,Readble,Default Value,Notes + + fanMinOnTime,fanMinOnTime,seconds,setting,True,True,, + hvacMode,hvacMode,seconds,setting,True,True,, + humidity,humidity,%,setting,False,True,, + coolHoldTemp,coolHoldTemp,degF,hold,True,False,, + heatHoldTemp,heatHoldTemp,degf,hold,True,False,, + actualTemperature,ActualTemperature,degF,hold,False,True,, + +This configuration works as follows: + +.. csv-table:: Registry Configuration Description + :header: Config Field,Description + + Point Name,Name of a point as it appears in Ecobee response data (example below) + Volttron Point Name,Name of a point as a user would like it to be displayed in data publishes to the message bus + Units,Unit of measurement specified by remote API + Type,"The Ecobee driver registry configuration supports 'setting' and 'hold' register types, based on how the data is represented in Ecobee response data (example below)" + Writable,"Whether or not the point is able to be written to. This may be determined by what Ecobee allows, and by the operation of Ecobee's API (to set an Ecobee cool/heat hold, cool/HoldTemp is used, but to read other data points are used and therefore are not writable; this is a quirk of Ecobee's API)" + Readable,"Whether or not the point is able to be read as specified. This may be determined by what Ecobee allows, and by the operation of Ecobee's API (to set an Ecobee cool/heat hold, cool/HoldTemp is used, however the requested hold values are represented as desiredCool/Heat in Ecobee's response data; this is a quirk of Ecobee's API)" + Default Value,"Used to send device defaults to the Ecobee API, this is optional" + Notes,"Any user specified notes, this is optional" + +An example registry configuration containing all points from the development device is available in the +`examples/configurations/drivers/ecobee.csv` file in the VOLTTRON repository. + +For additional explanation on the quirks of Ecobee's readable/writable points, visit: +https://www.ecobee.com/home/developer/api/documentation/v1/functions/SetHold.shtml + + +Installation +============ + +The following instructions make up the minimal steps required to set up an instance of the Ecobee driver on the VOLTTRON +platform and connect it to the Ecobee remote API: + +#. Create a directory using the path $VOLTTRON_ROOT/configs and create two files, `ecobee.csv` and `ecobee.config`. + Copy the registry config to the `ecobee.csv` file and the driver config to the `ecobee.config file`. Modify the + `API_KEY` and `DEVICE_ID` fields from the driver config with your own API key and device serial number. + +#. If the platform has not been started: + + .. code-block:: Bash + + ./start-volttron + +#. Be sure that the environment has been activated - you should see (volttron) next to @ in your terminal + window. To activate an environment, use the following command. + + .. code-block:: Bash + + source env/bin/activate + +#. Install a Platform Driver if one is not yet installed + + .. code-block:: Bash + + python scripts/install-agent.py --agent-source services/core/PlatformDriverAgent --config \ + examples/configurations/drivers/platform-driver.agent --tag platform.driver + +#. Load the driver configuration into the configuration store ("vctl config list platform.driver" can be used to show + installed configurations) + + .. code-block:: Bash + + vctl config store platform.driver devices/campus/building/ecobee $VOLTTRON_ROOT/configs/ecobee.config + +#. Load the driver's registry configuration into the configuration store + + .. code-block:: Bash + + vctl config store platform.driver campus/building/ecobee.csv $VOLTTRON_ROOT/configs/ecobee.csv --csv + +#. Start the platform driver + + .. code-block:: Bash + + vctl start platform.driver + +At this point, the platform driver will start, configure the driver agent, and data should start to publish on the publish +interval. + +.. note:: + + If starting the driver for the first time, or if the authorization which is managed by the driver is out of date, + the driver will perform some additional setup internally to authenticate the driver with the Ecobee API. This stage + will require the user enter a pin provided in the `volttron.log` file to the Ecobee web UI. The Ecobee driver has + a wait period of 60 seconds to allow users to enter the pin code into the Ecobee UI. Instructions for pin + verification follow. + + +PIN Verification steps: +----------------------- + +#. Obtain the pin from the VOLTTRON logs. The pin is a 4 character long string in the logs flanked by 2 rows of + asterisks + + .. image:: files/ecobee_pin.png + +#. Log into the `Ecobee UI `_ . After logging in, the + customer dashboard will be brought up, which features a series of panels (where the serial number was found for + device configuration) and a "hamburger" menu. + + .. image:: files/ecobee_console.png + +#. Add the application: Click the "hamburger" icon which will display a list of items in a panel that becomes + visible on the right. Click "My Apps", then "Add application". A text form will appear, enter the pin provided in + VOLTTRON logs here, then click "validate" and "add application. + + .. image:: files/ecobee_verify_pin.png + +This will complete the pin verification step. + + +Ecobee Driver Usage +=================== + +At the configured interval, the platform driver will publish a JSON object +with data obtained from Ecobee based on the provided configuration files. + +To view the publishes in the `volttron.log` file, install and start a ListenerAgent: + +.. code-block:: Bash + + python scripts/install-agent.py -s examples/ListenerAgent + +The following is an example publish: + +.. code-block:: Bash + + 'Status': [''], + 'Vacations': [{'coolHoldTemp': 780, + 'coolRelativeTemp': 0, + 'drRampUpTemp': 0, + 'drRampUpTime': 3600, + 'dutyCyclePercentage': 255, + 'endDate': '2020-03-29', + 'endTime': '08:00:00', + 'fan': 'auto', + 'fanMinOnTime': 0, + 'heatHoldTemp': 660, + 'heatRelativeTemp': 0, + 'holdClimateRef': '', + 'isCoolOff': False, + 'isHeatOff': False, + 'isOccupied': False, + 'isOptional': True, + 'isTemperatureAbsolute': True, + 'isTemperatureRelative': False, + 'linkRef': '', + 'name': 'Skiing', + 'occupiedSensorActive': False, + 'running': False, + 'startDate': '2020-03-15', + 'startTime': '20:00:00', + 'type': 'vacation', + 'unoccupiedSensorActive': False, + 'vent': 'off', + 'ventilatorMinOnTime': 5}], + 'actualTemperature': 720, + 'desiredCool': 734, + 'desiredHeat': 707, + 'fanMinOnTime': 0, + 'humidity': '36', + 'hvacMode': 'off'}, + {'Programs': {'type': 'custom', 'tz': 'UTC', 'units': None}, + 'Status': {'type': 'list', 'tz': 'UTC', 'units': None}, + 'Vacations': {'type': 'custom', 'tz': 'UTC', 'units': None}, + 'actualTemperature': {'type': 'integer', 'tz': 'UTC', 'units': 'degF'}, + 'coolHoldTemp': {'type': 'integer', 'tz': 'UTC', 'units': 'degF'}, + 'desiredCool': {'type': 'integer', 'tz': 'UTC', 'units': 'degF'}, + 'desiredHeat': {'type': 'integer',S 'tz': 'UTC', 'units': 'degF'}, + 'fanMinOnTime': {'type': 'integer', 'tz': 'UTC', 'units': 'seconds'}, + 'heatHoldTemp': {'type': 'integer', 'tz': 'UTC', 'units': 'degF'}, + 'humidity': {'type': 'integer', 'tz': 'UTC', 'units': '%'}, + 'hvacMode': {'type': 'bool', 'tz': 'UTC', 'units': 'seconds'}}] + +Individual points can be obtained via JSON RPC on the VOLTTRON Platform. +In an agent: + +.. code-block:: Python + + self.vip.rpc.call("platform.driver", "get_point", , ) + + +Set_point Conventions +--------------------- + +.. note:: + + Examples from this section are from Ecobee's documentation. + +The Ecobee Web API requires a variety of objects to be supplied for the various functionalities: setting a hold, adding +a vacation and adding a program require creating a JSON object. Each object is described in its corresponding section +below. + +To set points using the Ecobee driver, it is recommended to use the actuator agent. If you are not familiar with the +Actuator, :ref:`read the documentation ` and check out the example agent code at +`examples/CSVDriver/CsvDriverAgent/agent.py` in the VOLTTRON repository. + + +Setting an Ecobee "Setting" +^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +Ecobee "Settings" points are simple points which are similar to a typical set point. Many settings are boolean values +for basic Ecobee configuration settings (such as whether the temperature should be in degrees Celsius or Fahrenheit). +Setting a "Setting" point is as simple as making an RPC request to the Actuator's `set_point` method with a supplied +point name and desired setting. Consider a "setting" point `useCelsius`; use the following code to send a `set_point` +RPC request: + +.. code-block:: python + + self.vip.rpc.call('platform.actuator', 'devices/campus/building/ecobee/useCelsius', True) + + +Setting a Hold +^^^^^^^^^^^^^^ + +Setting a Hold requires creating a `params` JSON object for the hold, many holds require setting more than one value +each. For example, setting a temperature hold requires setting the upper (coolHoldTemp) and lower (heatHoldTemp) bounds +desired. Create a Hold params object and send it as the contents of a `set_point` RPC call to the Actuator. + +Example Hold params object: + +.. code-block:: json + + { + "holdType":"nextTransition", + "heatHoldTemp":680, + "coolHoldTemp":720 + } + +Body of the HTTP request sent by the driver to Ecobee's Web API: + +:: + + { + "selection": { + "selectionType": "thermostats", + "selectionMatch": "" + }, + "functions": [ + { + "type": "setHold", + "params": { + # user-specified params object + } + } + ] + } + +.. note:: + + In a heat/coolHoldTemp hold, ``holdType``, ``heatHoldTemp``, and ``coolHoldTemp`` values are all required by the + Ecobee web API. In this case, the `holdType` describes how the hold should be applied, the `heatHoldTemp` is the + lower temperature bound for the hold, and the `coolHoldTemp` is the upper temperature bound. + +RPC request to the actuator: + +.. code-block:: python + + self.vip.rpc.call('platform.actuator', 'devices/campus/building/ecobee/heatHoldTemp', True) + +.. note:: + + In Ecobee data, a Hold set by the user is sometimes denoted as "desired" and the sensor reading for the held + value as "actual". For example, a Hold set by a user called `heatHoldTemp` can be found in Ecobee publishes + as `desiredHeat` and the actual temperature reading as `actualTemperature`. + +Ecobee's documentation on Hold objects can be found here: + + +Adding and Deleting a Vacation +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +To add a vacation, call the `set_point` JSON-RPC method of the Actuator, providing the vacation parameters object +required by Ecobee along with the Vacation point. The params object is sent inside a create vacation object sent to the +web API: + +:: + + { + "selection": { + "selectionType":"registered", + "selectionMatch":"" + }, + "functions": [ + { + "type":"createVacation", + "params":{ + # user-specified params object + } + } + ] + } + +It is possible to supply complex objects including values for fans, vents, occupation status, etc. but a basic vacation +requires only a name, cool and heat hold temperatures, start and end dates with start and end times. Example: + +.. code-block:: json + + { + "name": "Skiing", + "coolHoldTemp": 780, + "heatHoldTemp": 660, + "startDate": "2016-03-15", + "startTime": "20:00:00", + "endDate": "2016-03-29", + "endTime": "08:00:00" + } + +Providing a params object which does not contain these required values will result in the driver throwing an error. + +Example `set_point` RPC call for Vacation: + +.. code-block:: python + + self.vip.rpc.call('platform.actuator', 'set_point', 'devices/campus/building/ecobee/Vacation', params) + +It is also possible to delete a stored vacation object. To do so, supply the vacation name specified in the params +object with the delete keyword set to True. + +.. code-block:: python + + self.vip.rpc.call('platform.actuator', 'set_point', + 'devices/campus/building/ecobee/Vacation', "Skiing", True) + +A more in-depth example of using the Ecobee web API endpoint for setting a vacation can be found here: +https://www.ecobee.com/home/developer/api/examples/ex9.shtml + + +Adding a Program +^^^^^^^^^^^^^^^^ + +Programs can also be added using the Ecobee driver. To add a program, the user should supply an Ecobee program object +in the `set_point` JSON-RPC request: + +:: + + { + "selection": { + "selectionType":"registered", + "selectionMatch":"" + }, + "thermostat": { + "program": { + + } + } + } + +Program objects consist of a list of "climate" objects and "schedule" objects. Climate objects specify the climate +settings which correspond to a climate name (for example, a "Warm" climate may be set for a high heat and cool hold +temp). Schedule objects list the desired climate settings for every half hour of the day (48 total) for 7 days, each +referring to a climate name. + +Example climate: + +.. code-block:: json + + { + "name": "Warm", + "isOccupied": true, + "isOptimized": false, + "coolFan": "auto", + "heatFan": "auto", + "vent": "off", + "ventilatorMinOnTime": 20, + "owner": "system", + "type": "program", + "coolTemp": 752, + "heatTemp": 740 + } + +Example Schedule: + +:: + + [ + [ + "sleep", + "sleep", + "sleep", + "sleep", + "home", + "home", + "home", + ... + ], + ... + ] + +Program Body: + +:: + + { + "schedule": [ + [ + "sleep", + "sleep", + ... + "home", + "sleep" + ], + ... + ], + "climates": [ + { + "name": "Sleep", + "climateRef": "sleep", + "isOccupied": true, + "isOptimized": false, + "coolFan": "auto", + "heatFan": "auto", + "vent": "off", + "ventilatorMinOnTime": 20, + "owner": "system", + "type": "program", + "colour": 2179683, + "coolTemp": 752, + "heatTemp": 662 + }, + ... + ] + } + +Example `set_point` RPC call for Program: + +.. code-block:: python + + self.vip.rpc.call('platform.actuator', 'set_point', 'devices/campus/building/ecobee/Vacation', program_body) + +If the user would like to resume the existing program instead, it is possible to specify ``None`` for the program body +with the keyword ``resume_all`` set to ``True``. + +Example `set_point` RPC call to resume the Program: + +.. code-block:: python + + self.vip.rpc.call('platform.actuator', 'set_point', + 'devices/campus/building/ecobee/Vacation', None, True) + +A more in-depth example describing the objects required by the Ecobee web API endpoint for setting a program can be +found here: https://www.ecobee.com/home/developer/api/examples/ex11.shtml + + +Status +^^^^^^ + +The `Status` point is a read-only register supplying the running status of the HVAC systems the thermostat is +interfacing with. `set_point` is not available for this point; `set_point` RPC calls for this point will raise a +`NotImplementedError` exception. + + +Versioning +---------- + +The Ecobee driver has been tested using the May 2019 API release as well as device firmware version 4.5.73.24 diff --git a/docs/source/core_services/drivers/driver_configuration/files/ecobee_add_app.png b/docs/source/driver-framework/ecobee/files/ecobee_add_app.png similarity index 100% rename from docs/source/core_services/drivers/driver_configuration/files/ecobee_add_app.png rename to docs/source/driver-framework/ecobee/files/ecobee_add_app.png diff --git a/docs/source/core_services/drivers/driver_configuration/files/ecobee_api_key.png b/docs/source/driver-framework/ecobee/files/ecobee_api_key.png similarity index 100% rename from docs/source/core_services/drivers/driver_configuration/files/ecobee_api_key.png rename to docs/source/driver-framework/ecobee/files/ecobee_api_key.png diff --git a/docs/source/core_services/drivers/driver_configuration/files/ecobee_apps.png b/docs/source/driver-framework/ecobee/files/ecobee_apps.png similarity index 100% rename from docs/source/core_services/drivers/driver_configuration/files/ecobee_apps.png rename to docs/source/driver-framework/ecobee/files/ecobee_apps.png diff --git a/docs/source/core_services/drivers/driver_configuration/files/ecobee_console.png b/docs/source/driver-framework/ecobee/files/ecobee_console.png similarity index 100% rename from docs/source/core_services/drivers/driver_configuration/files/ecobee_console.png rename to docs/source/driver-framework/ecobee/files/ecobee_console.png diff --git a/docs/source/core_services/drivers/driver_configuration/files/ecobee_create_app.png b/docs/source/driver-framework/ecobee/files/ecobee_create_app.png similarity index 100% rename from docs/source/core_services/drivers/driver_configuration/files/ecobee_create_app.png rename to docs/source/driver-framework/ecobee/files/ecobee_create_app.png diff --git a/docs/source/core_services/drivers/driver_configuration/files/ecobee_developer_menu.png b/docs/source/driver-framework/ecobee/files/ecobee_developer_menu.png similarity index 100% rename from docs/source/core_services/drivers/driver_configuration/files/ecobee_developer_menu.png rename to docs/source/driver-framework/ecobee/files/ecobee_developer_menu.png diff --git a/docs/source/core_services/drivers/driver_configuration/files/ecobee_pin.png b/docs/source/driver-framework/ecobee/files/ecobee_pin.png similarity index 100% rename from docs/source/core_services/drivers/driver_configuration/files/ecobee_pin.png rename to docs/source/driver-framework/ecobee/files/ecobee_pin.png diff --git a/docs/source/core_services/drivers/driver_configuration/files/ecobee_verify_pin.png b/docs/source/driver-framework/ecobee/files/ecobee_verify_pin.png similarity index 100% rename from docs/source/core_services/drivers/driver_configuration/files/ecobee_verify_pin.png rename to docs/source/driver-framework/ecobee/files/ecobee_verify_pin.png diff --git a/docs/source/driver-framework/fake-driver/fake-driver.rst b/docs/source/driver-framework/fake-driver/fake-driver.rst new file mode 100644 index 0000000000..6f507d90f5 --- /dev/null +++ b/docs/source/driver-framework/fake-driver/fake-driver.rst @@ -0,0 +1,157 @@ +.. _Fake-Driver: + +=========== +Fake Driver +=========== + +The FakeDriver is included as a way to quickly see data published to the message bus in a format +that mimics what a true Driver would produce. This is an extremely simple implementation of the +:ref:`VOLTTRON driver framework `. + + +Fake Device Driver Configuration +================================ + +This driver does not connect to any actual device and instead produces random and or pre-configured values. + + +Driver Config +------------- + +There are no arguments for the `driver_config` section of the device configuration file. The `driver_config` entry must +still be present and should be left blank. + +Here is an example device configuration file: + +.. code-block:: json + + { + "driver_config": {}, + "driver_type": "bacnet", + "registry_config":"config://registry_configs/vav.csv", + "interval": 5, + "timezone": "UTC", + "heart_beat_point": "heartbeat" + } + +A sample fake device configuration file can be found in the VOLTTRON repository in +`examples/configurations/drivers/fake.config` + +Fake Device Registry Configuration File +--------------------------------------- + +The registry configuration file is a `CSV `_ file. Each row +configures a point on the device. + +The following columns are required for each row: + + - **Volttron Point Name** - The name by which the platform and agents running on the platform will refer to this + point. For instance, if the `Volttron Point Name` is `HeatCall1` (and using the example device configuration + above) then an agent would use `pnnl/isb2/hvac1/HeatCall1` to refer to the point when using the RPC interface of + the actuator agent. + - **Units** - Used for meta data when creating point information on the historian. + - **Writable** - Either `TRUE` or `FALSE`. Determines if the point can be written to. Only points labeled `TRUE` + can be written to through the ActuatorAgent. Points labeled `TRUE` incorrectly will cause an error to be returned + when an agent attempts to write to the point. + + +The following columns are optional: + + - **Starting Value** - Initial value for the point. If the point is reverted it will change back to this value. By + default, points will start with a random value (1-100). + - **Type** - Value type for the point. Defaults to "string". Valid types are: + + * string + * integer + * float + * boolean + +Any additional columns will be ignored. It is common practice to include a `Point Name` or `Reference Point Name` to +include the device documentation's name for the point and `Notes` and `Unit Details` for additional information +about a point. Please note that there is nothing in the driver that will enforce anything specified in the +`Unit Details` column. + +.. csv-table:: BACnet + :header: Volttron Point Name,Units,Units Details,Writable,Starting Value,Type,Notes + + Heartbeat,On/Off,On/Off,TRUE,0,boolean,Point for heartbeat toggle + OutsideAirTemperature1,F,-100 to 300,FALSE,50,float,CO2 Reading 0.00-2000.0 ppm + SampleWritableFloat1,PPM,10.00 (default),TRUE,10,float,Setpoint to enable demand control ventilation + SampleLong1,Enumeration,1 through 13,FALSE,50,int,Status indicator of service switch + SampleWritableShort1,%,0.00 to 100.00 (20 default),TRUE,20,int,Minimum damper position during the standard mode + SampleBool1,On / Off,on/off,FALSE,TRUE,boolean,Status indicator of cooling stage 1 + SampleWritableBool1,On / Off,on/off,TRUE,TRUE,boolean,Status indicator + +A sample fake registry configuration file can be found +`here `_ +or in the VOLTTRON repository in ``examples/configurations/drivers/fake.csv`` + + +.. _Fake-Driver-Install: + +Installation +============ + +Installing a Fake driver in the :ref:`Platform Driver Agent ` requires adding copies of the device +configuration and registry configuration files to the Platform Driver's :ref:`configuration store ` + +- Create a config directory (if one doesn't already exist) inside your Volttron repository: + +.. code-block:: bash + + mkdir config + +All local config files will be worked on here. + +- Copy over the example config file and registry config file from the VOLTTRON repository: + +.. code-block:: bash + + cp examples/configurations/drivers/fake.config config/ + cp examples/configurations/drivers/fake.csv config/ + +- Edit the driver config `fake.config` for the paths on your system: + +.. code-block:: json + + { + "driver_config": {}, + "registry_config": "config://fake.csv", + "interval": 5, + "timezone": "US/Pacific", + "heart_beat_point": "Heartbeat", + "driver_type": "fakedriver", + "publish_breadth_first_all": false, + "publish_depth_first": false, + "publish_breadth_first": false + } + +- Create a copy of the Platform Driver config from the VOLTTRON repository: + +.. code-block:: bash + + cp examples/configurations/drivers/platform-driver.agent config/fake-platform-driver.config + +- Add fake.csv and fake.config to the :ref:`configuration store `: + +.. code-block:: bash + + vctl config store platform.driver devices/campus/building/fake config/fake.config + vcfl config store platform.driver fake.csv config/fake.csv --csv + +- Edit `fake-platform-driver.config` to reflect paths on your system + +.. code-block:: json + + { + "driver_scrape_interval": 0.05 + } + +- Use the scripts/install-agent.py script to install the Platform Driver agent: + +.. code-block:: bash + + python scripts/install-agent.py -s services/core/PlatformDriverAgent -c config/fake-platform-driver.config + +- If you have a :ref:`Listener Agent` already installed, you should start seeing data being published to + the bus. diff --git a/docs/source/driver-framework/files/driver_flow.png b/docs/source/driver-framework/files/driver_flow.png new file mode 100755 index 0000000000..1ff7295ff2 Binary files /dev/null and b/docs/source/driver-framework/files/driver_flow.png differ diff --git a/docs/source/driver-framework/files/proxy_driver_flow.png b/docs/source/driver-framework/files/proxy_driver_flow.png new file mode 100755 index 0000000000..6606ac9cf0 Binary files /dev/null and b/docs/source/driver-framework/files/proxy_driver_flow.png differ diff --git a/docs/source/core_services/drivers/driver_configuration/ieee-2030-driver.rst b/docs/source/driver-framework/ieee-2030_5/ieee-2030_5-driver.rst similarity index 63% rename from docs/source/core_services/drivers/driver_configuration/ieee-2030-driver.rst rename to docs/source/driver-framework/ieee-2030_5/ieee-2030_5-driver.rst index 5346545b1e..dfc20ee9fd 100644 --- a/docs/source/core_services/drivers/driver_configuration/ieee-2030-driver.rst +++ b/docs/source/driver-framework/ieee-2030_5/ieee-2030_5-driver.rst @@ -1,16 +1,18 @@ -.. _IEEE2030_5-Driver-Config: +.. _IEEE-2030_5-Driver: -IEEE 2030.5 Driver Configuration --------------------------------- +============================ +IEEE 2030.5 (SEP 2.0) Driver +============================ Communicating with IEEE 2030.5 devices requires that the IEEE 2030.5 Agent is configured and running. All device communication happens through this agent. For information about the IEEE 2030.5 Agent, -please see :ref:`IEEE2030_5`. +please see :ref:`IEEE 2030.5 Agent ` docs. -driver_config -************* -There are two arguments for the "driver_config" section of the IEEE 2030.5 device configuration file: +Driver Config +============= + +There are two arguments for the `driver_config` section of the IEEE 2030.5 device configuration file: - **sfdi** - Short-form device ID of the IEEE 2030.5 device. - **ieee2030_5_agent_id** - ID of VOLTTRON's IEEE 2030.5 agent. @@ -35,14 +37,16 @@ Here is a sample IEEE 2030.5 device configuration file: } A sample IEEE 2030.5 driver configuration file can be found in the VOLTTRON repository -in ``services/core/MasterDriverAgent/example_configurations/test_ieee2030_5_1.config``. +in ``services/core/PlatformDriverAgent/example_configurations/test_ieee2030_5_1.config``. -.. _IEEE2030_5-Driver: -IEEE 2030.5 Registry Configuration File -*************************************** +Registry Configuration +====================== -For a description of IEEE 2030.5 registry values, see :ref:`IEEE2030_5`. +For a description of IEEE 2030.5 registry values, see :ref:`IEEE-2030_5-Agent`. A sample IEEE 2030.5 registry configuration file can be found in the VOLTTRON repository -in ``services/core/MasterDriverAgent/example_configurations/ieee2030_5.csv``. +in ``services/core/PlatformDriverAgent/example_configurations/ieee2030_5.csv``. + +View the :ref:`IEEE 2030.5 agent specification document ` to learn more about IEEE 2030.5 and +the IEEE 2030.5 agent and driver. diff --git a/docs/source/driver-framework/modbus/modbus-driver.rst b/docs/source/driver-framework/modbus/modbus-driver.rst new file mode 100644 index 0000000000..66c89099a0 --- /dev/null +++ b/docs/source/driver-framework/modbus/modbus-driver.rst @@ -0,0 +1,132 @@ +.. _Modbus-Driver: + +============= +Modbus Driver +============= + + + +VOLTTRON's modbus driver supports the Modbus over TCP/IP protocol only. For Modbus RTU support, see VOLTTRON's +`Modbus-TK driver `. + +`About Modbus protocol `_ + + +.. _Modbus-Config: + +Modbus Driver Configuration +=========================== + +Requirements +------------ +The Modbus driver requires the pymodbus package. This package can be installed in an activated environment with: + +.. code-block:: bash + + pip install pymodbus + +Alternatively this requirement can be installed using :ref:`bootstrap.py ` with the ``--drivers`` +option: + +.. code-block:: bash + + python3 bootstrap.py --drivers + + +Driver Configuration +-------------------- + +There are three arguments for the `driver_config` section of the device configuration file: + + - **device_address** - IP Address of the device. + - **port** - Port the device is listening on. Defaults to 502 which is the standard port for Modbus devices. + - **slave_id** - Slave ID of the device. Defaults to 0. Use 0 for no slave. + +The remaining values are as follows: + + + +Here is an example device configuration file: + +.. code-block:: json + + { + "driver_config": {"device_address": "10.1.1.2", + "port": 502, + "slave_id": 5}, + "driver_type": "modbus", + "registry_config":"config://registry_configs/hvac.csv", + "interval": 60, + "timezone": "UTC", + "heart_beat_point": "heartbeat" + } + +A sample MODBUS configuration file can be found in the VOLTTRON repository in +`examples/configurations/drivers/modbus.config` + + +.. _Modbus-Registry-Configuration: + +Modbus Registry Configuration File +---------------------------------- + +The registry configuration file is a `CSV `_ file. Each row +configures a point on the device. + +The following columns are required for each row: + + - **Volttron Point Name** - The name by which the platform and agents running on the platform will refer to this + point. For instance, if the Volttron Point Name is HeatCall1 (and using the example device configuration above) + then an agent would use `pnnl/isb2/hvac1/HeatCall1` to refer to the point when using the RPC interface of the + actuator agent. + - **Units** - Used for meta data when creating point information on the historian. + - **Modbus Register** - A string representing how to interpret the data register and how to read it from the device. + The string takes two forms: + + + "BOOL" for coils and discrete inputs. + + A format string for the Python struct module. See + `the Python3 Struct docs `_ for full documentation. The + supplied format string must only represent one value. See the documentation of your device to determine how to + interpret the registers. Some Examples: + + * ">f" - A big endian 32-bit floating point number. + * "l" - A big endian 32-bit integer. + + - **Writable** - Either `TRUE` or `FALSE`. Determines if the point can be written to. Only points labeled + **TRUE** can be written to through the ActuatorAgent. + - **Point Address** - Modbus address of the point. Cannot include any offset value, it must be the exact value of + the address. + - **Mixed Endian** - (Optional) Either `TRUE` or `FALSE`. For mixed endian values. This will reverse the order + of the Modbus registers that make up this point before parsing the value or writing it out to the device. Has no + effect on bit values. + +The following column is optional: + + - **Default Value** - The default value for the point. When the point is reverted by an agent it will change back + to this value. If this value is missing it will revert to the last known value not set by an agent. + +Any additional columns will be ignored. It is common practice to include a `Point Name` or `Reference Point Name` to +include the device documentation's name for the point and `Notes` and `Unit Details` for additional information +about a point. + +The following is an example of a Modbus registry configuration file: + +.. csv-table:: Catalyst 371 + :header: Reference Point Name,Volttron Point Name,Units,Units Details,Modbus Register,Writable,Point Address,Default Value,Notes + + CO2Sensor,ReturnAirCO2,PPM,0.00-2000.00,>f,FALSE,1001,,CO2 Reading 0.00-2000.0 ppm + CO2Stpt,ReturnAirCO2Stpt,PPM,1000.00 (default),>f,TRUE,1011,1000,Setpoint to enable demand control ventilation + Cool1Spd,CoolSupplyFanSpeed1,%,0.00 to 100.00 (75 default),>f,TRUE,1005,75,Fan speed on cool 1 call + Cool2Spd,CoolSupplyFanSpeed2,%,0.00 to 100.00 (90 default),>f,TRUE,1007,90,Fan speed on Cool2 Call + Damper,DamperSignal,%,0.00 - 100.00,>f,FALSE,1023,,Output to the economizer damper + DaTemp,DischargeAirTemperature,F,(-)39.99 to 248.00,>f,FALSE,1009,,Discharge air reading + ESMEconMin,ESMDamperMinPosition,%,0.00 to 100.00 (5 default),>f,TRUE,1013,5,Minimum damper position during the energy savings mode + FanPower,SupplyFanPower, kW,0.00 to 100.00,>f,FALSE,1015,,Fan power from drive + FanSpeed,SupplyFanSpeed,%,0.00 to 100.00,>f,FALSE,1003,,Fan speed from drive + HeatCall1,HeatCall1,On / Off,on/off,BOOL,FALSE,1113,,Status indicator of heating stage 1 need + HeartBeat,heartbeat,On / Off,on/off,BOOL,FALSE,1114,,Status indicator of heating stage 2 need + +A sample Modbus registry file can be found +`here `_ +or in the VOLTTRON repository in `examples/configurations/drivers/catalyst371.csv` diff --git a/docs/source/core_services/drivers/driver_configuration/modbus-tk-driver.rst b/docs/source/driver-framework/modbus/modbus-tk-driver.rst similarity index 58% rename from docs/source/core_services/drivers/driver_configuration/modbus-tk-driver.rst rename to docs/source/driver-framework/modbus/modbus-tk-driver.rst index d6b134bb0c..c8073a8458 100644 --- a/docs/source/core_services/drivers/driver_configuration/modbus-tk-driver.rst +++ b/docs/source/driver-framework/modbus/modbus-tk-driver.rst @@ -1,71 +1,92 @@ -.. _Modbus-TK-config: +.. _Modbus-TK-Driver: -Modbus-TK Driver Configuration ------------------------------- +================ +Modbus TK Driver +================ + +VOLTTRON's Modbus-TK driver, built on the Python Modbus-TK library, is an alternative to the original VOLTTRON modbus +driver. Unlike the original modbus driver, the Modbus-TK driver supports Modbus RTU as well as Modbus over TCP/IP. + +`About Modbus protocol `_ -.. warning:: Currently the modbus_tk library is not able to make connections from 2 masters on one host to 2 slaves +The Modbus-TK driver introduces a map library and configuration builder, intended as a way to streamline configuration +file creation and maintenance. + +.. warning:: Currently the modbus_tk library is not able to make connections from 2 Modbus masters on one host to 2 slaves on one host - this will will prevent a single platform from being able to communicate to 2 slaves on IP as each instance of a Modbus_Tk driver creates a new Modbus master. `Issue on Modbus_Tk Github `_. -VOLTTRON's Modbus-TK driver, built on the Python Modbus-TK library, is an alternative to the -original VOLTTRON modbus driver. Unlike the original modbus driver, the Modbus-TK driver -supports Modbus RTU as well as Modbus over TCP/IP. -The Modbus-TK driver introduces a map library and configuration builder, intended as a way -to streamline configuration file creation and maintenance. +.. _Modbus-TK-Config: + +Modbus-TK Driver Configuration +============================== + +The Modbus-TK driver is mostly backward-compatible with the parameter definitions in the original Modbus driver's +configuration (.config and .csv files). If the config file's parameter names use the Modbus driver's name conventions, +they are translated to the Modbus-TK name conventions, e.g. a Modbus CSV file's ``Point Address`` is interpreted as a +Modbus-TK "Address". Backward-compatibility exceptions are: -The Modbus-TK driver is mostly backward-compatible with the parameter definitions in the original -Modbus driver's configuration (.config and .csv files). -If the config file's parameter names use the Modbus driver's name conventions, they are -translated to the Modbus-TK name conventions, e.g. a Modbus CSV file's "Point Address" is -interpreted as a Modbus-TK "Address". Backward-compatibility exceptions are: + - If the config file has no ``port``, the default is 0, not 502. + - If the config file has no ``slave_id``, the default is 1, not 0. - - If the config file has no **port**, the default is 0, not 502. - - If the config file has no **slave_id**, the default is 1, not 0. Requirements ------------ The Modbus-TK driver requires the modbus-tk package. This package can be installed in an activated environment with: -:: +.. code-block:: bash pip install modbus-tk -driver_config -************* +Alternatively this requirement can be installed using :ref:`bootstrap.py ` with the ``--drivers`` +option: + +.. code-block:: bash + + python3 bootstrap.py --drivers + -The **driver_config** section of a Modbus-TK device configuration file supports a variety of parameter definitions, +Driver Configuration +-------------------- + +The ``driver_config`` section of a Modbus-TK device configuration file supports a variety of parameter definitions, but only **device_address** is required: - - **name** (Optional) - Name of the device. Defaults to "UNKNOWN". - - **device_type** (Optional) - Name of the device type. Defaults to "UNKNOWN". - - **device_address** (Required) - IP Address of the device. - - **port** (Optional) - Port the device is listening on. Defaults to 0 (no port). Use port 0 for RTU transport. - - **slave_id** (Optional) - Slave ID of the device. Defaults to 1. Use ID 0 for no slave. - - **baudrate** (Optional) - Serial (RTU) baud rate. Defaults to 9600. - - **bytesize** (Optional) - Serial (RTU) byte size: 5, 6, 7, or 8. Defaults to 8. - - **parity** (Optional) - Serial (RTU) parity: none, even, odd, mark, or space. Defaults to none. - - **stopbits** (Optional) - Serial (RTU) stop bits: 1, 1.5, or 2. Defaults to 1. - - **xonxoff** (Optional) - Serial (RTU) flow control: 0 or 1. Defaults to 0. - - **addressing** (Optional) - Data address table: offset, offset_plus, or address. Defaults to offset. + - ``name`` (Optional) - Name of the device. Defaults to "UNKNOWN". + - ``device_type`` (Optional) - Name of the device type. Defaults to "UNKNOWN". + - ``device_address`` (Required) - IP Address of the device. + - ``port`` (Optional) - Port the device is listening on. Defaults to 0 (no port). Use port 0 for RTU transport. + - ``slave_id`` (Optional) - Slave ID of the device. Defaults to 1. Use ID 0 for no slave. + - ``baudrate`` (Optional) - Serial (RTU) baud rate. Defaults to 9600. + - ``bytesize`` (Optional) - Serial (RTU) byte size: 5, 6, 7, or 8. Defaults to 8. + - ``parity`` (Optional) - Serial (RTU) parity: none, even, odd, mark, or space. Defaults to none. + - ``stopbits`` (Optional) - Serial (RTU) stop bits: 1, 1.5, or 2. Defaults to 1. + - ``xonxoff`` (Optional) - Serial (RTU) flow control: 0 or 1. Defaults to 0. + - ``addressing`` (Optional) - Data address table: offset, offset_plus, or address. Defaults to offset. + - address: The exact value of the address without any offset value. - offset: The value of the address plus the offset value. - offset_plus: The value of the address plus the offset value plus one. - : If an offset value is to be added, it is determined based on a point's properties in the CSV file: + - Type=bool, Writable=TRUE: 0 - Type=bool, Writable=FALSE: 10000 - Type!=bool, Writable=TRUE: 30000 - Type!=bool, Writable=FALSE: 40000 - - **endian** (Optional) - Byte order: big or little. Defaults to big. - - **write_multiple_registers** (Optional) - Write multiple coils or registers at a time. Defaults to true. - - : If write_multiple_registers is set to false, only register types unsigned short (uint16) and boolean (bool) + + - ``endian`` (Optional) - Byte order: big or little. Defaults to big. + - ``write_multiple_registers`` (Optional) - Write multiple coils or registers at a time. Defaults to true. + + - If write_multiple_registers is set to false, only register types unsigned short (uint16) and boolean (bool) are supported. The exception raised during the configure process. - - **register_map** (Optional) - Register map csv of unchanged register variables. Defaults to registry_config csv. -Sample Modbus-TK configuration files are checked into the VOLTTRON repository -in ``services/core/MasterDriverAgent/master_driver/interfaces/modbus_tk/maps``. + - ``register_map`` (Optional) - Register map csv of unchanged register variables. Defaults to registry_config csv. + +Sample Modbus-TK configuration files are checked into the VOLTTRON repository in +``services/core/PlatformDriverAgent/platform_driver/interfaces/modbus_tk/maps``. Here is a sample TCP/IP Modbus-TK device configuration: @@ -126,29 +147,35 @@ Here is a sample RTU Modbus-TK device configuration, with completely-specified s "timezone": "UTC" } -.. _Modbus-TK-Driver: + +.. _Modbus-TK-Register-Map: Modbus-TK Register Map CSV File -******************************* +=============================== -The registry configuration file is a `CSV `_ file. +Modbus TK requires an additional registry configuration file compared to the paradigm of most other drivers. The +registry map file is an analogue to the typical registry configuration file. The +:ref:`registry configuration file ` is a simple file which maps device point names to user +specified point names. + +The registry map file is a `CSV `_ file. Each row configures a register definition on the device. - - **Register Name** (Required) - The field name in the modbus client. This field is distinct and unchangeable. - - **Address** (Required) - The point's modbus address. The **addressing** option in the driver configuration + - ``Register Name`` (Required) - The field name in the modbus client. This field is distinct and unchangeable. + - ``Address`` (Required) - The point's modbus address. The ``addressing`` option in the driver configuration controls whether this is interpreted as an exact address or an offset. - - **Type** (Required) - The point's data type: bool, string[length], float, int16, int32, int64, uint16, + - ``Type`` (Required) - The point's data type: bool, string[length], float, int16, int32, int64, uint16, uint32, or uint64. - - **Units** (Optional) - Used for metadata when creating point information on a historian. Default is an + - ``Units`` (Optional) - Used for metadata when creating point information on a historian. Default is an empty string. - - **Writable** (Optional) - TRUE/FALSE. Only points for which Writable=TRUE can be updated by a VOLTTRON agent. + - ``Writable`` (Optional) - TRUE/FALSE. Only points for which Writable=TRUE can be updated by a VOLTTRON agent. Default is FALSE. - - **Default Value** (Optional) - The point's default value. If it is reverted by an agent, it changes back + - ``Default Value`` (Optional) - The point's default value. If it is reverted by an agent, it changes back to this value. If this value is missing, it will revert to the last known value not set by an agent. - - **Transform** (Optional) - Scaling algorithm: scale(multiplier), scale_int(multiplier), scale_reg(register_name), + - ``Transform`` (Optional) - Scaling algorithm: scale(multiplier), scale_int(multiplier), scale_reg(register_name), scale_reg_power10(register_name), scale_decimal_int_signed(multiplier), mod10k(reverse), mod10k64(reverse), mod10k48(reveres) or none. Default is an empty string. - - **Table** (Optional) - Standard modbus table name defining how information is stored in slave device. + - ``Table`` (Optional) - Standard modbus table name defining how information is stored in slave device. There are 4 different tables: - discrete_output_coils: read/write coil numbers 1-9999 @@ -159,18 +186,18 @@ Each row configures a register definition on the device. If this field is empty, the modbus table will be defined by **type** and **writable** fields. By that, when user sets read only for read/write coils/registers or sets read/write for read only coils/registers, it will select wrong table, and therefore raise exception. - - **Mixed Endian** (Optional) - TRUE/FALSE. If Mixed Endian is set to TRUE, the order of the MODBUS registers will + - ``Mixed Endian`` (Optional) - TRUE/FALSE. If Mixed Endian is set to TRUE, the order of the Modbus registers will be reversed before parsing the value or writing it out to the device. By setting mixed endian, transform must be None (no op). Defaults to FALSE. - - **Description** (Optional) - Additional information about the point. Default is an empty string. + - ``Description`` (Optional) - Additional information about the point. Default is an empty string. Any additional columns are ignored. -Sample Modbus-TK registry CSV files are checked into the VOLTTRON repository -in ``services/core/MasterDriverAgent/master_driver/interfaces/modbus_tk/maps``. +Sample Modbus-TK registry map CSV files are checked into the VOLTTRON repository in +``services/core/PlatformDriverAgent/platform_driver/interfaces/modbus_tk/maps``. -Here is a sample Modbus-TK registry configuration: +Here is a sample Modbus-TK registry map: .. csv-table:: :header: Register Name,Address,Type,Units,Writable,Default Value,Transform,Table @@ -185,24 +212,27 @@ Here is a sample Modbus-TK registry configuration: sample_bool,16,bool,None,TRUE,False,,analog_output_holding_registers sample_str,17,string[12],None,TRUE,hello world!,,analog_output_holding_registers -Modbus-TK Registry Configuration CSV File -***************************************** + +.. _Modbus-TK-Registry-Config: + +Modbus-TK Registry Configuration +================================ The registry configuration file is a `CSV `_ file. Each row configures a point on the device. - - **Volttron Point Name** (Required) - The name by which the platform and agents refer to the point. - For instance, if the Volttron Point Name is HeatCall1, then an agent would use ``my_campus/building2/hvac1/HeatCall1`` - to refer to the point when using the RPC interface of the actuator agent. - - **Register Name** (Required) - The field name in the modbus client. - It must be matched with the field name from **register_map**. + - ``Volttron Point Name`` (Required) - The name by which the platform and agents refer to the point. For instance, + if the Volttron Point Name is HeatCall1, then an agent would use ``my_campus/building2/hvac1/HeatCall1`` to refer + to the point when using the RPC interface of the actuator agent. + - ``Register Name`` (Required) - The field name in the modbus client. It must be matched with the field name from + ``register_map``. -Any additional columns will override the existed fields from **register_map**. +Any additional columns will override the existed fields from ``register_map``. Sample Modbus-TK registry CSV files are checked into the VOLTTRON repository -in ``services/core/MasterDriverAgent/master_driver/interfaces/modbus_tk/maps``. +in ``services/core/PlatformDriverAgent/platform_driver/interfaces/modbus_tk/maps``. -Here is a sample Modbus-TK registry configuration with defined **register_map**: +Here is a sample Modbus-TK registry configuration with defined ``register_map``: .. csv-table:: :header: Volttron Point Name,Register Name @@ -217,32 +247,32 @@ Here is a sample Modbus-TK registry configuration with defined **register_map**: sample bool,sample_bool sample str,sample_str + .. _Modbus-TK-Maps: -Modbus-TK Driver Maps -********************* +Modbus-TK Driver Maps Repository +================================ -To help facilitate the creation of VOLTTRON device configuration entries (.config files) for Modbus-TK -devices, a library of device type definitions is now maintained -in ``services/core/MasterDriverAgent/master_driver/interfaces/modbus_tk/maps/maps.yaml``. A -command-line tool (described below under **MODBUS TK Config Command Tool**) uses the contents -of ``maps.yaml`` while generating .config files. +To help facilitate the creation of VOLTTRON device configuration entries (.config files) for Modbus-TK devices, a +library of device type definitions is now maintained in +``services/core/PlatformDriverAgent/platform_driver/interfaces/modbus_tk/maps/maps.yaml``. A command-line tool (described +below under ``MODBUS TK Config Command Tool``) uses the contents of ``maps.yaml`` while generating ``.config`` files. Each device type definition in ``maps.yaml`` consists of the following properties: - - **name** (Required) - Name of the device type (see the driver_config parameters). - - **file** (Required) - The name of the CSV file that defines all of the device type's supported points, + - ``name`` (Required) - Name of the device type (see the driver_config parameters). + - ``file`` (Required) - The name of the CSV file that defines all of the device type's supported points, e.g. watts_on.csv. - - **description** (Optional) - A description of the device type. - - **addressing** (Optional) - Data address type: offset, offset_plus, or address (see the driver_config parameters). - - **endian** (Optional) - Byte order: big or little (see the driver_config parameters). - - **write_multiple_registers** (Optional) - Write multiple registers at a time. Defaults to true. + - ``description`` (Optional) - A description of the device type. + - ``addressing`` (Optional) - Data address type: offset, offset_plus, or address (see the driver_config parameters). + - ``endian`` (Optional) - Byte order: big or little (see the driver_config parameters). + - ``write_multiple_registers`` (Optional) - Write multiple registers at a time. Defaults to true. -A device type definition is a template for a device configuration. Some additional data must -be supplied when a specific device's configuration is generated. In particular, the device_address must be supplied. +A device type definition is a template for a device configuration. Some additional data must be supplied when a specific +device's configuration is generated. In particular, the device_address must be supplied. -A sample ``maps.yml`` file is checked into the VOLTTRON repository -in ``services/core/MasterDriverAgent/master_driver/interfaces/modbus_tk/maps/maps.yaml``. +A sample ``maps.yml`` file is checked into the VOLTTRON repository in +``services/core/PlatformDriverAgent/platform_driver/interfaces/modbus_tk/maps/maps.yaml``. Here is a sample ``maps.yaml`` file: @@ -265,55 +295,58 @@ Here is a sample ``maps.yaml`` file: description: ION 8600 meter file: ion8600_map.csv + .. _Modbus-TK-Config-Cmd: Modbus-TK Config Command Tool -***************************** +============================= ``config_cmd.py`` is a command-line tool for creating and maintaining VOLTTRON driver configurations. The tool runs from the command line: .. code-block:: shell - $ cd services/core/MasterDriverAgent/master_driver/interfaces/modbus_tk/maps + $ cd services/core/PlatformDriverAgent/platform_driver/interfaces/modbus_tk/maps $ python config_cmd.py ``config_cmd.py`` supports the following commands: - - **help** - List all commands. - - **quit** - Quit the command-line tool. - - **list_directories** - List all setup directories, with an option to edit their paths. + - ``help`` - List all commands. + - ``quit`` - Quit the command-line tool. + - ``list_directories`` - List all setup directories, with an option to edit their paths. + + By default, all directories are in the VOLTTRON repository - in ``services/core/MasterDriverAgent/master_driver/interfaces/modbus_tk/maps``. + in ``services/core/PlatformDriverAgent/platform_driver/interfaces/modbus_tk/maps``. + It is important to use the correct directories when adding/editing device types and driver configs, and when loading configurations into VOLTTRON. * map_dir: directory in which ``maps.yaml`` is stored. * config_dir: directory in which driver config files are stored. * csv_dir: directory in which registry config CSV files are stored. - - **edit_directories** - Add/Edit map directory, driver config directory, and/or CSV config directory. + + - ``edit_directories`` - Add/Edit map directory, driver config directory, and/or CSV config directory. Press if no change is needed. Exits if the directory does not exist. - - **list_device_type_description** - List all device type descriptions in ``maps.yaml``. + - ``list_device_type_description`` - List all device type descriptions in ``maps.yaml``. Option to edit device type descriptions. - - **list_all_device_types** - List all device type information in ``maps.yaml``. Option to add more device types. - - **device_type** - List information for a selected device type. Option to select another device type. - - **add_device_type** - Add a device type to ``maps.yaml``. Option to add more than one device type. + - ``list_all_device_types`` - List all device type information in ``maps.yaml``. Option to add more device types. + - ``device_type`` - List information for a selected device type. Option to select another device type. + - ``add_device_type`` - Add a device type to ``maps.yaml``. Option to add more than one device type. Each device type includes its name, CSV file, description, addressing, and endian, as explained - in **MODBUS-TK Driver Maps**. If an invalid value is entered for addressing or endian, + in ``MODBUS-TK Driver Maps``. If an invalid value is entered for addressing or endian, the default value is used instead. - - **edit_device_type** - Edit an existing device type. If an invalid value is entered for addressing or endian, + - ``edit_device_type`` - Edit an existing device type. If an invalid value is entered for addressing or endian, the previous value is left unchanged. - - **list_drivers** - List all driver config names in ``config_dir``. - - **driver_config ** - Get a driver config from ``config_dir``. + - ``list_drivers`` - List all driver config names in ``config_dir``. + - ``driver_config `` - Get a driver config from ``config_dir``. Option to select the driver if no driver is found with that name. - - **add_driver_config ** - Add/Edit ``/.config``. + - ``add_driver_config `` - Add/Edit ``/.config``. Option to select the driver if no driver is found with that name. Press to exit. - - **load_volttron** - Load a driver config and CSV into VOLTTRON. Option to add the config or CSV file + - ``load_volttron`` - Load a driver config and CSV into VOLTTRON. Option to add the config or CSV file to config_dir or to csv_dir. VOLTTRON must be running when this command is used. - - **delete_volttron_config** - Delete a driver config from VOLTTRON. VOLTTRON must be running + - ``delete_volttron_config`` - Delete a driver config from VOLTTRON. VOLTTRON must be running when this command is used. - - **delete_volttron_csv** - Delete a registry csv config from VOLTTRON. VOLTTRON must be running + - ``delete_volttron_csv`` - Delete a registry csv config from VOLTTRON. VOLTTRON must be running when this command is used. -The ``config_cmd.py`` module is checked into the VOLTTRON repository -as ``services/core/MasterDriverAgent/master_driver/interfaces/modbus_tk/config_cmd.py``. +The ``config_cmd.py`` module is checked into the VOLTTRON repository as +``services/core/PlatformDriverAgent/platform_driver/interfaces/modbus_tk/config_cmd.py``. diff --git a/docs/source/core_services/drivers/driver_configuration/obix.rst b/docs/source/driver-framework/obix/obix.rst similarity index 52% rename from docs/source/core_services/drivers/driver_configuration/obix.rst rename to docs/source/driver-framework/obix/obix.rst index 5717703f00..3306023d59 100644 --- a/docs/source/core_services/drivers/driver_configuration/obix.rst +++ b/docs/source/driver-framework/obix/obix.rst @@ -1,21 +1,29 @@ -.. _Obix-config: +.. _Obix-Driver: + +=========== +Obix Driver +=========== + + +.. _Obix-Config: Obix Driver Configuration -------------------------- +========================= + VOLTTRON's uses Obix's restful interface to facilitate communication. -This driver does *not* handle reading data from the history section of the interface. -If the user wants data published from the management systems historical data use -the :ref:`Obix-history` agent. +This driver does *not* handle reading data from the history section of the interface. If the user wants data published +from the management systems historical data use the :ref:`Obix History ` agent. -driver_config -************* -There are three arguments for the **driver_config** section of the device configuration file: +Driver Configuration +-------------------- - - **url** - URL of the interface. - - **username** - User name for site.. - - **password** - Password for username. +There are three arguments for the ``driver_config`` section of the device configuration file: + + - ``url`` - URL of the Obix remote API interface + - ``username`` - User's username for the Obix remote API + - ``password`` - Users' password corresponding to the username Here is an example device configuration file: @@ -31,31 +39,41 @@ Here is an example device configuration file: "timezone": "UTC" } -A sample Obix configuration file can be found in the VOLTTRON repository in ``examples/configurations/drivers/obix.config`` +A sample Obix configuration file can be found in the VOLTTRON repository in +`examples/configurations/drivers/obix.config` -.. _Obix-Driver: +.. _Obix-Registry-Config: Obix Registry Configuration File -******************************** +-------------------------------- -The registry configuration file is a `CSV `_ file. Each row configures a point on the device. +The registry configuration file is a `CSV `_ file. Each row +configures a point on the device. The following columns are required for each row: - - **Volttron Point Name** - The name by which the platform and agents running on the platform will refer to this point. For instance, if the Volttron Point Name is HeatCall1 then an agent would use ``/HeatCall1`` to refer to the point when using the RPC interface of the actuator agent. - - **Obix Point Name** - Name of the point on the obix interface. Escaping of spaces and dashes for use with the interface is handled internaly. - - **Obix Type** - One of "bool", "int", or "real" without quotes. + - **Volttron Point Name** - The name by which the platform and agents running on the platform will refer to this + point. For instance, if the Volttron Point Name is HeatCall1 then an agent would use `/HeatCall1` + to refer to the point when using the RPC interface of the actuator agent. + - **Obix Point Name** - Name of the point on the Obix interface. Escaping of spaces and dashes for use with the + interface is handled internally. + - **Obix Type** - One of `bool`, `int`, or `real` - **Units** - Used for meta data when creating point information on the historian. - - **Writable** - Either "TRUE" or "FALSE". Determines if the point can be written to. Only points labeled TRUE can be written to through the ActuatorAgent. This can be used to protect points that should not be accessed by the platform. + - **Writable** - Either `TRUE` or `FALSE`. Determines if the point can be written to. Only points labeled + **TRUE** can be written to through the ActuatorAgent. This can be used to protect points that should not be + accessed by the platform. The following column is optional: - - **Default Value** - The default value for the point. When the point is reverted by an agent it will change back to this value. If this value is missing it will revert to the last known value not set by an agent. + - **Default Value** - The default value for the point. When the point is reverted by an agent it will change back to + this value. If this value is missing it will revert to the last known value not set by an agent. -Any additional columns will be ignored. It is common practice to include a **Point Name** or **Reference Point Name** to include the device documentation's name for the point and **Notes** and **Unit Details** for additional information about a point. +Any additional columns will be ignored. It is common practice to include a `Point Name` or `Reference Point Name` to +include the device documentation's name for the point and `Notes` and `Unit Details` for additional information +about a point. -The following is an example of a Obix registry confugration file: +The following is an example of a Obix registry configuration file: .. csv-table:: Obix :header: Volttron Point Name,Obix Point Name,Obix Type,Units,Writable,Notes @@ -76,22 +94,27 @@ The following is an example of a Obix registry confugration file: HomeImportSPFromCDH,HomeImportSPFromCDH,real,kilowatt,FALSE,Precision: 0 ThermalFollowingAlarm,ThermalFollowingAlarm,bool,,FALSE, -A sample Obix configuration can be found in the VOLTTRON repository in ``examples/configurations/drivers/obix.csv`` +A sample Obix configuration can be found in the VOLTTRON repository in `examples/configurations/drivers/obix.csv` + -.. _Obix-AutoConfiguration: +.. _Obix-Auto-Configuration: Automatic Obix Configuration File Creation -****************************************** -A script that will automatically create both a device and register -configuration file for a site is located in the repository at ``scripts/obix/get_obix_driver_config.py``. +------------------------------------------ + +A script that will automatically create both a device and register configuration file for a site is located in the +repository at `scripts/obix/get_obix_driver_config.py`. The utility is invoked with the command: - ``python get_obix_driver_config.py -u -p `` +.. code-block:: bash + + python get_obix_driver_config.py -u -p -If either the registry_file or driver_file is omitted the script will output those files to stdout. +If either the `registry_file` or `driver_file` is omitted the script will output those files to stdout. -If either the username or password arguments are left out the script will ask for them on the command line before proceeding. +If either the username or password arguments are left out the script will ask for them on the command line before +proceeding. -The registry file produced by this script assumes that the `Volttron Point Name` and the `Obix Point Name` have the same value. -Also, it is assumed that all points should be read only. Users are expected to fix this as appropriate. +The registry file produced by this script assumes that the `Volttron Point Name` and the `Obix Point Name` have the same +value. Also, it is assumed that all points should be read only. Users are expected to fix this as appropriate. diff --git a/docs/source/driver-framework/platform-driver/platform-driver.rst b/docs/source/driver-framework/platform-driver/platform-driver.rst new file mode 100644 index 0000000000..716e2b3d1e --- /dev/null +++ b/docs/source/driver-framework/platform-driver/platform-driver.rst @@ -0,0 +1,584 @@ +.. _Platform-Driver: + +=============== +Platform Driver +=============== + +The Platform Driver agent is a special purpose agent a user can install on the platform to manage communication of +the platform with devices. The Platform driver features a number of endpoints for collecting data and sending control +signals using the message bus and automatically publishes data to the bus on a specified interval. + + +How does it work? +================= + +The Platform Driver creates a number of driver instances based on the contents of its config store; for each +combination of driver configuration, registry configuration and other referenced config files, a driver instance is +created by the Platform Driver. When configuration files are removed, the corresponding driver instance is removed by the +Platform Driver. + +Drivers are special-purpose agents for device communication, and unlike most agents, run +as separate threads under the Platform Driver (typically agents are spawned as their own process). While running, the +driver periodically "scrapes" device data and publishes the scrape to the message bus, as well as handling ad-hoc data +collection and control signalling commands issued from the Platform Driver. The actual commands are issued to devices by +the driver's "Interface" class. + +An Interface class is a Python class which serves as the interface between the driver and the device. The Interface +does this by implementing a set of well-defined actions using the communication paradigms and protocols used by the +device. For devices such as BACnet and Modbus devices, interfaces wrap certain protocol functions in Python code to be +used by the driver. In other cases, interfaces interact with web-API's, etc. + + +Device/Driver Communication +--------------------------- + +Device communication with the Platform Driver typically occurs using the following steps: + +#. Platform agents and the user's agents communicate between themselves and the message bus using publish/subscribe or + JSON-RPC +#. The user's agent sends a JSON-RPC request to the Platform Driver to `get_point` +#. And/Or the user's agent sends a JSON-RPC request to the Actuator to `set_point` +#. The Platform Driver forwards the request to the driver instance specified in the request +#. The device driver communicates with the end device +#. The end device returns a response to the driver indicating its current status +#. The driver publishes the device's response to the message bus using a publish + +For more in-depth descriptions and coverage of atypical scenarios, read up on +:ref:`the driver communication patterns `. + + +.. _Platform-Driver-Configuration: + +Configuration and Installation +============================== + +Configuration for each device consists of 3 parts: + +* Platform Driver Agent configuration file - lists all driver configuration files to load +* Driver configuration file - contains the general driver configuration and device settings +* Device Register configuration file - contains the settings for each individual data point on the device + +For each device, you must create a driver configuration file, device register configuration file, and an entry in the +Platform Driver Agent configuration file. + +Once configured, the Platform Driver Agent is configured and deployed in a manner similar to any other agent: + +.. code-block:: bash + + python scripts/install-agent.py -s services/core/PlatformDriverAgent -c + + +Requirements +------------ + +VOLTTRON drivers operated by the platform driver may have additional requirements for installation. +Required libraries: + +:: + + BACnet driver - bacpypes + Modbus driver - pymodbus + Modbus_TK driver - modbus-tk + DNP3 and IEEE 2030.5 drivers - pydnp3 + +The easiest way to install the requirements for drivers included in the VOLTTRON repository is to use ``bootstrap.py`` +(see :ref:`platform installation for more detail `) + + +Platform Driver Configuration +============================= + +The Platform Driver Agent configuration consists of general settings for all devices. The default values of the Master +Driver should be sufficient for most users. The user may optionally change the interval between device scrapes with the +driver_scrape_interval. + +The following example sets the driver_scrape_interval to 0.05 seconds or 20 devices per second: + +.. code-block:: json + + { + "driver_scrape_interval": 0.05, + "publish_breadth_first_all": false, + "publish_depth_first": false, + "publish_breadth_first": false, + "publish_depth_first_all": true, + "group_offset_interval": 0.0 + } + +* **driver_scrape_interval** - Sets the interval between devices scrapes. Defaults to 0.02 or 50 devices per second. + Useful for when the platform scrapes too many devices at once resulting in failed scrapes. +* **group_offset_interval** - Sets the interval between when groups of devices are scraped. Has no effect if all devices + are in the same group. + +In order to improve the scalability of the platform unneeded device state publishes for all devices can be turned off. +All of the following setting are optional and default to `True`. + +* **publish_depth_first_all** - Enable "depth first" publish of all points to a single topic for all devices. +* **publish_breadth_first_all** - Enable "breadth first" publish of all points to a single topic for all devices. +* **publish_depth_first** - Enable "depth first" device state publishes for each register on the device for all devices. +* **publish_breadth_first** - Enable "breadth first" device state publishes for each register on the device for all + devices. + +An example platform driver configuration file can be found in the VOLTTRON repository in +`services/core/PlatformDriverAgent/platform-driver.agent`. + + +.. _Driver-Configuration-File: + +Driver Configuration File +------------------------- + +.. note:: + + The terms `register` and `point` are used interchangeably in the documentation and in the configuration setting + names. They have the same meaning in the context of VOLTTRON drivers. + +Each device configuration has the following form: + +.. code-block:: json + + { + "driver_config": {"device_address": "10.1.1.5", + "device_id": 500}, + "driver_type": "bacnet", + "registry_config":"config://registry_configs/vav.csv", + "interval": 60, + "heart_beat_point": "heartbeat", + "group": 0 + } + +The following settings are required for all device configurations: + + - **driver_config** - Driver specific setting go here. See below for driver specific settings. + - **driver_type** - Type of driver to use for this device: bacnet, modbus, fake, etc. + - **registry_config** - Reference to a configuration file in the configuration store for registers + on the device. See the `Registry-Configuration-File`_ section below or + and the :ref:`Adding Device Configurations to the Configuration Store ` section in + the driver framework docs. + +These settings are optional: + + - **interval** - Period which to scrape the device and publish the results in seconds. Defaults to 60 seconds. + - **heart_beat_point** - A Point which to toggle to indicate a heartbeat to the device. A point with this ``Volttron + Point Name`` must exist in the registry. If this setting is missing the driver will not send a heart beat signal + to the device. Heart beats are triggered by the :ref:`Actuator Agent ` which must be running to + use this feature. + - **group** - Group this device belongs to. Defaults to 0 + +These settings are used to create the topic that this device will be referenced by following the VOLTTRON convention of +``{campus}/{building}/{unit}``. This will also be the topic published on, when the device is periodically scraped for +it's current state. + +The topic used to reference the device is derived from the name of the device configuration in the store. See the +:ref:`Adding Device Configurations to the Configuration Store ` section of the driver +framework docs. + + +Device Grouping +^^^^^^^^^^^^^^^ + +Devices may be placed into groups to separate them logically when they are scraped. This is done by setting the `group` +in the device configuration. `group` is a number greater than or equal to 0. Only number of devices in the same group +and the `group_offset_interval` are considered when determining when to scrape a device. + +This is useful in two cases: + +* If you need to ensure that certain devices are scraped in close proximity to each other you can put them in their own + group. If this causes devices to be scraped too quickly the groups can be separated out time wise using the + `group_offset_interval` setting. +* You may scrape devices on different networks in parallel for performance. For instance BACnet devices behind a single + MSTP router need to be scraped slowly and serially, but devices behind different routers may be scraped in parallel. + Grouping devices by router will do this automatically. + +The `group_offset_interval` is applied by multiplying it by the `group` number. If you intend to use +`group_offset_interval` only use consecutive `group` values that start with 0. + + +.. _Registry-Configuration-File: + +Registry Configuration File +--------------------------- +Registry configuration files setup each individual point on a device. Typically this file will be in CSV format, but the +exact format is driver specific. See the section for a particular driver for the registry configuration format. + +The following is a simple example of a Modbus registry configuration file: + +.. csv-table:: Catalyst 371 + :header: Reference Point Name,Volttron Point Name,Units,Units Details,Modbus Register,Writable,Point Address,Default Value,Notes + + CO2Sensor,ReturnAirCO2,PPM,0.00-2000.00,>f,FALSE,1001,,CO2 Reading 0.00-2000.0 ppm + CO2Stpt,ReturnAirCO2Stpt,PPM,1000.00 (default),>f,TRUE,1011,1000,Setpoint to enable demand control ventilation + HeatCall2,HeatCall2,On / Off,on/off,BOOL,FALSE,1114,,Status indicator of heating stage 2 need + + +.. _Adding-Devices-To-Config-Store: + +Adding Device Configurations to the Configuration Store +------------------------------------------------------- + +Configurations are added to the Configuration Store using the command line: + +.. code-block:: bash + + volttron-ctl config store platform.driver + +* **name** - The name used to refer to the file from the store. +* **file name** - A file containing the contents of the configuration. +* **file type** - ``--raw``, ``--json``, or ``--csv``. Indicates the type of the file. Defaults to ``--json``. + +The main configuration must have the name ``config`` + +Device configuration but **not** registry configurations must have a name prefixed with ``devices/``. Scripts that +automate the process will prefix registry configurations with ``registry_configs/``, but that is not a requirement for +registry files. + +The name of the device's configuration in the store is used to create the topic used to reference the device. For +instance, a configuration named `devices/PNNL/ISB1/vav1` will publish scrape results to `devices/PNNL/ISB1/vav1` and +is accessible with the Actuator Agent via `PNNL/ISB1/vav1`. + +The name of a registry configuration must match the name used to refer to it in the driver configuration. The reference +is not case sensitive. + +If the Platform Driver Agent is running any changes to the configuration store will immediately affect the running devices +according to the changes. + +Example +^^^^^^^ + +Consider the following three configuration files: A platform driver configuration called `platform-driver.agent`, a +Modbus device configuration file called `modbus_driver.config` and corresponding Modbus registry configuration file called +`modbus_registry.csv` + +To store the platform driver configuration run the command: + +.. code-block:: bash + + volttron-ctl config store platform.driver config platform-driver.agent + +To store the registry configuration run the command (note the ``--csv`` option): + +.. code-block:: bash + + volttron-ctl config store platform.driver registry_configs/modbus_registry.csv modbus_registry.csv --csv + +.. Note:: + + The `registry_configs/modbus_registry.csv` argument in the above command must match the reference to the + `registry_config` found in `modbus_driver.config`. + +To store the driver configuration run the command: + +.. code-block:: bash + + volttron-ctl config store platform.driver devices/my_campus/my_building/my_device modbus_config.config + + +Converting Old Style Configuration +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +The new Platform Driver no longer supports the old style of device configuration. The old `device_list` setting is +ignored. + +To simplify updating to the new format `scripts/update_platform_driver_config.py` is provide to automatically update to +the new configuration format. + +With the platform running run: + +.. code-block:: bash + + python scripts/update_platform_driver_config.py + +old_configuration`` is the main configuration file in the old format. The script automatically modifies the driver +files to create references to CSV files and adds the CSV files with the appropriate name. + +`output` is the target output directory. + +If the ``--keep-old`` switch is used the old configurations in the output directory (if any) will not be deleted before +new configurations are created. Matching names will still be overwritten. + +The output from `scripts/update_platform_driver_config.py` can be automatically added to the configuration store +for the Platform Driver agent with `scripts/install_platform_driver_configs.py`. + +Creating and naming configuration files in the form needed by `scripts/install_platform_driver_configs.py` can speed up +the process of changing and updating a large number of configurations. See the ``--help`` message for +`scripts/install_platform_driver_configs.py` for more details. + + +Device Scalability Settings +--------------------------- + +In order to improve the scalability of the platform unneeded device state publishes for a device can be turned off. +All of the following setting are optional and will override the value set in the main platform driver configuration. + + - **publish_depth_first_all** - Enable "depth first" publish of all points to a single topic. + - **publish_breadth_first_all** - Enable "breadth first" publish of all points to a single topic. + - **publish_depth_first** - Enable "depth first" device state publishes for each register on the device. + - **publish_breadth_first** - Enable "breadth first" device state publishes for each register on the device. + +It is common practice to set `publish_breadth_first_all`, `publish_depth_first`, and +`publish_breadth_first` to `False` unless they are specifically needed by an agent running on +the platform. + + +.. note:: + + All Historian Agents require `publish_depth_first_all` to be set to `True` in order to capture data. + + +Usage +===== + +After installing the Platform Driver and loading driver configs into the config store, the installed drivers begin +polling and JSON-RPC endpoints become usable. + + +.. _Device-State-Publish: + +Polling +------- + +Once running, the Platform Driver will spawn drivers using the `driver_type` parameter of the +:ref:`driver configuration ` and periodically poll devices for all point data specified in +the :ref:`registry configuration ` (at the interval specified by the interval parameter +of the driver configuration). + +By default, the value of each register on a device is published 4 different ways when the device state is published. +Consider the following settings in a driver configuration stored under the name ``devices/pnnl/isb1/vav1``: + +.. code-block:: json + + { + "driver_config": {"device_address": "10.1.1.5", + "device_id": 500}, + + "driver_type": "bacnet", + "registry_config":"config://registry_configs/vav.csv", + } + +In the `vav.csv` file is a register with the name `temperature`. For these examples the current value of the +register on the device happens to be 75.2 and the meta data is + +.. code-block:: json + + {"units": "F"} + +When the driver publishes the device state the following 2 things will be published for this register: + + A "depth first" publish to the topic `devices/pnnl/isb1/vav1/temperature` with the following message: + + .. code-block:: python + + [75.2, {"units": "F"}] + + A "breadth first" publish to the topic `devices/temperature/vav1/isb1/pnnl` with the following message: + + .. code-block:: python + + [75.2, {"units": "F"}] + + These publishes can be turned off by setting `publish_depth_first` and `publish_breadth_first` to `false` + respectively. + +Also these two publishes happen once for all registers: + + A "depth first" publish to the topic `devices/pnnl/isb1/vav1/all` with the following message: + + .. code-block:: python + + [{"temperature": 75.2, ...}, {"temperature":{"units": "F"}, ...}] + + A "breadth first" publish to the topic `devices/all/vav1/isb1/pnnl` with the following message: + + .. code-block:: python + + [{"temperature": 75.2, ...}, {"temperature":{"units": "F"}, ...}] + + These publishes can be turned off by setting `publish_depth_first_all` and `publish_breadth_first_all` to + ``false`` respectively. + + +JSON-RPC Endpoints +------------------ + +**get_point** - Returns the value of specified device set point + + Parameters + - **path** - device topic string (typical format is devices/campus/building/device) + - **point_name** - name of device point from registry configuration file + +**set_point** - Set value on specified device set point. If global override is condition is set, raise OverrideError + exception. + + Parameters + - **path** - device topic string (typical format is devices/campus/building/device) + - **point_name** - name of device point from registry configuration file + - **value** - desired value to set for point on device + + .. warning:: + + It is not recommended to call the `set_point` method directly. It is recommended to instead use the + :ref:`Actuator ` agent to set points on a device, using its scheduling capability. + +**scrape_all** - Returns values for all set points on the specified device. + + Parameters + - **path** - device topic string (typical format is devices/campus/building/device) + +**get_multiple_points** - return values corresponding to multiple points on the same device + + Parameters + - **path** - device topic string (typical format is devices/campus/building/device) + - **point_names** - iterable of device point names from registry configuration file + +**set_multiple_points** - Set values on multiple set points at once. If global override is condition is set, raise + OverrideError exception. + + Parameters + - **path** - device topic string (typical format is devices/campus/building/device) + - **point_names_value** - list of tuples consisting of (point_name, value) pairs for setting a series of + points + +**heart_beat** - Send a heartbeat/keep-alive signal to all devices configured for Platform Driver + +**revert_point** - Revert the set point of a device to its default state/value. If global override is condition is + set, raise OverrideError exception. + + Parameters + - **path** - device topic string (typical format is devices/campus/building/device) + - **point_name** - name of device point from registry configuration file + +**revert_device** - Revert all the set point values of the device to default state/values. If global override is + condition is set, raise OverrideError exception. + + Parameters + - **path** - device topic string (typical format is devices/campus/building/device) + +**set_override_on** - Turn on override condition on all the devices matching the specified pattern ( + :ref:`override docs `) + + Parameters + - **pattern** - Override pattern to be applied. For example, + - If pattern is `campus/building1/*` - Override condition is applied for all the devices under + `campus/building1/`. + - If pattern is `campus/building1/ahu1` - Override condition is applied for only `campus/building1/ahu1` + The pattern matching is based on bash style filename matching semantics. + - **duration** - Duration in seconds for the override condition to be set on the device (default 0.0, + duration <= 0.0 imply indefinite duration) + - **failsafe_revert** - Flag to indicate if all the devices falling under the override condition must to be + set + to its default state/value immediately. + - **staggered_revert** - + +**set_override_off** - Turn off override condition on all the devices matching the pattern. + + Parameters + - **pattern** - device topic pattern for devices on which the override condition should be removed. + +**get_override_devices** - Get a list of all the devices with override condition. + +**clear_overrides** - Turn off override condition for all points on all devices. + +**get_override_patterns** - Get a list of all override condition patterns currently set. + + +.. _Platform-Driver-Override: + +Driver Override Condition +========================= + +By default, every user is allowed write access to the devices by the platform driver. The override feature will allow the +user (for example, building administrator) to override this default behavior and enable the user to lock the write +access on the devices for a specified duration of time or indefinitely. + + +Set Override On +--------------- + +The Platform Driver's ``set_override_on`` RPC method can be used to set the override condition for all drivers with topic +matching the provided pattern. This can be specific devices, groups of devices, or even all configured devices. The +pattern matching is based on bash style filename matching semantics. + +Parameters: + + - pattern: Override pattern to be applied. For example, + * If the pattern is ``campus/building1/*`` the override condition is applied for all the devices under + `campus/building1/`. + * If the pattern is ``campus/building1/ahu1`` the override condition is applied for only the + `campus/building1/ahu1` device. The pattern matching is based on bash style filename matching semantics. + - duration: Time duration for the override in seconds. If duration <= 0.0, it implies an indefinite duration. + - failsafe_revert: Flag to indicate if all the devices falling under the override condition has to be set to its + default state/value immediately. + - staggered_revert: If this flag is set, reverting of devices will be staggered. + +Example ``set_override_on`` RPC call: + +.. code-block:: python + + self.vip.rpc.call(PLATFORM_DRIVER, "set_override_on", , ) + + +Set Override Off +---------------- + +The override condition can also be toggled off based on a provided pattern using the Platform Driver's +``set_override_off`` RPC call. + +Parameters: + + - pattern: Override pattern to be applied. For example, + * If the pattern is ``campus/building1/*`` the override condition is removed for all the devices under + `campus/building1/`. + * If the pattern is ``campus/building1/ahu1`` the override condition is removed for only for the + `campus/building1/ahu1` device. The pattern matching is based on bash style filename matching semantics. + +Example ``set_override_off`` RPC call: + +.. code-block:: python + + self.vip.rpc.call(PLATFORM_DRIVER, "set_override_off", ) + + +Get Override Devices +-------------------- + +A list of all overridden devices can be obtained with the Platform Driver's ``get_override_devices`` RPC call. + +This method call has no additional parameters. + +Example ``get_override_devices`` RPC call: + +.. code-block:: python + + self.vip.rpc.call(PLATFORM_DRIVER, "get_override_devices") + + +Get Override Patterns +--------------------- + +A list of all patterns which have been requested for override can be obtained with the Platform Driver's +``get_override_patterns`` RPC call. + +This method call has no additional parameters + +Example "get_override_patterns" RPC call: + +.. code-block:: python + + self.vip.rpc.call(PLATFORM_DRIVER, "get_override_patterns") + + +Clear Overrides +--------------- + +All overrides set by RPC calls described above can be toggled off at using a single ``clear_overrides`` RPC call. + +This method call has no additional parameters + +Example "clear_overrides" RPC call: + +.. code-block:: python + + self.vip.rpc.call(PLATFORM_DRIVER, "clear_overrides") + +For information on the global override feature specification, view the +:ref:`Global Override Specification ` doc. diff --git a/docs/source/core_services/drivers/files/ted-spyders.png b/docs/source/driver-framework/ted-driver/files/ted-spyders.png similarity index 100% rename from docs/source/core_services/drivers/files/ted-spyders.png rename to docs/source/driver-framework/ted-driver/files/ted-spyders.png diff --git a/docs/source/core_services/drivers/driver_configuration/the-energy-detective-driver.rst b/docs/source/driver-framework/ted-driver/the-energy-detective-driver.rst similarity index 76% rename from docs/source/core_services/drivers/driver_configuration/the-energy-detective-driver.rst rename to docs/source/driver-framework/ted-driver/the-energy-detective-driver.rst index 82c4e116e5..2d63208fad 100644 --- a/docs/source/core_services/drivers/driver_configuration/the-energy-detective-driver.rst +++ b/docs/source/driver-framework/ted-driver/the-energy-detective-driver.rst @@ -1,25 +1,20 @@ .. _The-Energy-Detective-Driver: +================================= The Energy Detective Meter Driver ------------------------------------- +================================= +The TED-Pro is an energy monitoring system that can measure energy consumption of multiple mains and supports +sub-metering of individual circuits. This driver connects to a TED Pro Energy Control Center (ECC) and can collect +information from multiple Measuring Transmitting Units (MTUs) and Spyder sub-metering devices connected to the ECC. -Introduction ------------- -The TED-Pro is an energy monitoring system that can measure energy consumption -of multiple mains and supports submetering of individual circuits. -This driver connects to a TED Pro Energy Control Center (ECC) and can collect -information from multiple Measuring Transmiting Units (MTUs) and Spyder submetering -devices connected to the ECC. +Configuration +============= -configuration -------------- - -The TED Pro device interface is configured as follows. You'll need the ip address -or hostname of the ECC on a network segment accessible from the Volttron instance, -if configured to use a port other than 80, you can provide it as shown below, -following a colon after the host address. +The TED Pro device interface is configured as follows. You'll need the ip address or hostname of the ECC on a network +segment accessible from the VOLTTRON instance, if configured to use a port other than 80, you can provide it as shown +below, following a colon after the host address. .. code-block:: json @@ -34,39 +29,40 @@ following a colon after the host address. } } + Parameters -********** +---------- - **username** - Username if the TED Pro is configured with Basic Authentication - **password** - Password if the TED Pro is configured with Basic Authentication - **device_address** - Hostname or IP address of the TED Pro ECC, a non-standard port can be included if needed - - **scrape_spyder** - Default true, enables or disables collection of the submetering data from spyder devices - connected to the TED Pro + - **scrape_spyder** - Default true, enables or disables collection of the sub-metering data from spyder devices + connected to the TED Pro - **track_totalizers** - Default true, enables or disables tracking of lifetime totals in the VOLTTRON Driver .. note:: - The TED Pro does not expose its internal lifetime totalized metering, instead offering month to date (MTD) - and daily totals (TDY). Using the "track_totalizers" setting, the ted-meter driver will attempt to maintain - monotonically increasing lifetime totalizers. To do so, it must retain state regarding the running total and - the last read value. The driver makes use of the VOLTTRON Config subsystem to store this state. - To reset these totals, delete the state/ted_meter/ config from the master driver config store and restart the - master driver. + The TED Pro does not expose its internal lifetime "totalized" metering, instead offering month to date (MTD) + and daily totals (TDY). Using the "track_totalizers" setting, the ted-meter driver will attempt to maintain + monotonically increasing lifetime totalizers. To do so, it must retain state regarding the running total and + the last read value. The driver makes use of the VOLTTRON Config subsystem to store this state. To reset these + totals, delete the 1state/ted_meter/1 config from the platform driver config store and restart the + platform driver. .. note:: This driver does not make use of the registry config. Because it is able to determine the configuration of the TED Pro Device via the API, it simply creates registers for each data source on the TED Pro - .. note:: - This driver is internally aware of the appropriate HayStack Tags for its registers, however, the - MasterDriver Framework makes no provision for publishing those tags during a scrape. Therefore, - integration of the tagging data is left to the end user. + This driver is internally aware of the appropriate HayStack Tags for its registers, however, the Platform Driver makes + no provision for publishing those tags during a scrape. Therefore, integration of the tagging data is left to the + end user. + Examples -******** +-------- |TED Pro showing spyder outputs| @@ -137,4 +133,4 @@ The above configuration in the TED will result in the following scrape from the } ] -.. |TED Pro showing spyder outputs| image:: ../files/ted-spyders.png \ No newline at end of file +.. |TED Pro showing spyder outputs| image:: files/ted-spyders.png diff --git a/docs/source/setup/images/VOLLTRON_Logo_Black_Horizontal_with_Tagline.png b/docs/source/files/VOLLTRON_Logo_Black_Horizontal_with_Tagline.png similarity index 100% rename from docs/source/setup/images/VOLLTRON_Logo_Black_Horizontal_with_Tagline.png rename to docs/source/files/VOLLTRON_Logo_Black_Horizontal_with_Tagline.png diff --git a/docs/source/files/dashboard-blank.png b/docs/source/files/dashboard-blank.png deleted file mode 100755 index 2e62a2331d..0000000000 Binary files a/docs/source/files/dashboard-blank.png and /dev/null differ diff --git a/docs/source/files/volttron_diagram.png b/docs/source/files/volttron_diagram.png new file mode 100755 index 0000000000..310f99dd75 Binary files /dev/null and b/docs/source/files/volttron_diagram.png differ diff --git a/docs/source/images/VOLTTRON_User_Guide.pdf b/docs/source/images/VOLTTRON_User_Guide.pdf deleted file mode 100755 index 4c8b77a890..0000000000 Binary files a/docs/source/images/VOLTTRON_User_Guide.pdf and /dev/null differ diff --git a/docs/source/images/clone-existing.png b/docs/source/images/clone-existing.png deleted file mode 100755 index 465f351bf0..0000000000 Binary files a/docs/source/images/clone-existing.png and /dev/null differ diff --git a/docs/source/images/dashboard-blank.png b/docs/source/images/dashboard-blank.png deleted file mode 100755 index 2e62a2331d..0000000000 Binary files a/docs/source/images/dashboard-blank.png and /dev/null differ diff --git a/docs/source/images/eclipse-marketplace.png b/docs/source/images/eclipse-marketplace.png deleted file mode 100755 index cf6a0136b2..0000000000 Binary files a/docs/source/images/eclipse-marketplace.png and /dev/null differ diff --git a/docs/source/images/eclipse-marketplace2.png b/docs/source/images/eclipse-marketplace2.png deleted file mode 100755 index 91b4b5f706..0000000000 Binary files a/docs/source/images/eclipse-marketplace2.png and /dev/null differ diff --git a/docs/source/images/finish-import.png b/docs/source/images/finish-import.png deleted file mode 100755 index 5a32e44151..0000000000 Binary files a/docs/source/images/finish-import.png and /dev/null differ diff --git a/docs/source/images/general-project.png b/docs/source/images/general-project.png deleted file mode 100755 index 106228a41d..0000000000 Binary files a/docs/source/images/general-project.png and /dev/null differ diff --git a/docs/source/images/git-view.png b/docs/source/images/git-view.png deleted file mode 100755 index 0b59e8fcc3..0000000000 Binary files a/docs/source/images/git-view.png and /dev/null differ diff --git a/docs/source/images/import-project.png b/docs/source/images/import-project.png deleted file mode 100755 index 5ac524f41d..0000000000 Binary files a/docs/source/images/import-project.png and /dev/null differ diff --git a/docs/source/images/listener-all-vars.png b/docs/source/images/listener-all-vars.png deleted file mode 100755 index f4fdde5296..0000000000 Binary files a/docs/source/images/listener-all-vars.png and /dev/null differ diff --git a/docs/source/images/logout-button.png b/docs/source/images/logout-button.png deleted file mode 100755 index 81fab0beff..0000000000 Binary files a/docs/source/images/logout-button.png and /dev/null differ diff --git a/docs/source/images/new-python-run.png b/docs/source/images/new-python-run.png deleted file mode 100755 index 580b462c8f..0000000000 Binary files a/docs/source/images/new-python-run.png and /dev/null differ diff --git a/docs/source/images/pick-python.png b/docs/source/images/pick-python.png deleted file mode 100755 index 2d3e8eafaf..0000000000 Binary files a/docs/source/images/pick-python.png and /dev/null differ diff --git a/docs/source/images/pin-to-dashboard.png b/docs/source/images/pin-to-dashboard.png deleted file mode 100755 index b4041b15df..0000000000 Binary files a/docs/source/images/pin-to-dashboard.png and /dev/null differ diff --git a/docs/source/images/platform-run-config.png b/docs/source/images/platform-run-config.png deleted file mode 100755 index 083e157d83..0000000000 Binary files a/docs/source/images/platform-run-config.png and /dev/null differ diff --git a/docs/source/images/pydev-python.png b/docs/source/images/pydev-python.png deleted file mode 100755 index e20f45bdb2..0000000000 Binary files a/docs/source/images/pydev-python.png and /dev/null differ diff --git a/docs/source/images/register-new-platform-authorization.png b/docs/source/images/register-new-platform-authorization.png deleted file mode 100755 index 5a03488d0b..0000000000 Binary files a/docs/source/images/register-new-platform-authorization.png and /dev/null differ diff --git a/docs/source/images/run-results.png b/docs/source/images/run-results.png deleted file mode 100755 index 5568a59585..0000000000 Binary files a/docs/source/images/run-results.png and /dev/null differ diff --git a/docs/source/images/select-path.png b/docs/source/images/select-path.png deleted file mode 100755 index 7f690f5696..0000000000 Binary files a/docs/source/images/select-path.png and /dev/null differ diff --git a/docs/source/images/select-repo.png b/docs/source/images/select-repo.png deleted file mode 100755 index d74fd20b45..0000000000 Binary files a/docs/source/images/select-repo.png and /dev/null differ diff --git a/docs/source/images/set-as-pydev.png b/docs/source/images/set-as-pydev.png deleted file mode 100755 index 681eeefd60..0000000000 Binary files a/docs/source/images/set-as-pydev.png and /dev/null differ diff --git a/docs/source/images/setup-python.png b/docs/source/images/setup-python.png deleted file mode 100755 index 6e90e4537c..0000000000 Binary files a/docs/source/images/setup-python.png and /dev/null differ diff --git a/docs/source/images/start-agent.png b/docs/source/images/start-agent.png deleted file mode 100755 index 57b3b0006a..0000000000 Binary files a/docs/source/images/start-agent.png and /dev/null differ diff --git a/docs/source/images/volttron-console.png b/docs/source/images/volttron-console.png deleted file mode 100755 index 1a63411b9a..0000000000 Binary files a/docs/source/images/volttron-console.png and /dev/null differ diff --git a/docs/source/images/volttron-main.png b/docs/source/images/volttron-main.png deleted file mode 100755 index 2443671f96..0000000000 Binary files a/docs/source/images/volttron-main.png and /dev/null differ diff --git a/docs/source/images/volttron-pick-main.png b/docs/source/images/volttron-pick-main.png deleted file mode 100755 index 13168ba066..0000000000 Binary files a/docs/source/images/volttron-pick-main.png and /dev/null differ diff --git a/docs/source/index.rst b/docs/source/index.rst index bad977bc7e..82490ca042 100644 --- a/docs/source/index.rst +++ b/docs/source/index.rst @@ -1,61 +1,170 @@ -.. VOLTTRON documentation master file, created by - sphinx-quickstart on Thu Feb 4 21:15:08 2016. - You can adapt this file completely to your liking, but it should at least - contain the root `toctree` directive. +.. VOLTTRON documentation root file ========================== |VOLTTRON| documentation! ========================== +|VOLTTRON Tagline| +|VOLTTRON| is an open source, scalable, and distributed platform that seamlessly integrates data, devices, and +systems for sensing and control applications. It is built on extensible frameworks allowing contributors to easily +expand the capabilities of the platform to meet their use cases. Features are implemented as loosely coupled software +components, called agents, enabling flexible deployment options and easy customization. -|VOLTTRON Tagline| -|VOLTTRON| is an open-source platform for distributed sensing and control. The platform provides services for collecting and storing data from buildings and devices and provides an environment for developing applications -that interact with that data. +Key Use-Cases +============= + +- Developing scalable, reusable applications to deploy in the field without spending development resources on + operational components not specific to the application +- Low-cost data collection deployable on commodity hardware +- Integration hub for connecting a diverse set of devices together in a common interface +- Testbed for developing applications for a simulated environment + +.. image:: files/volttron_diagram.png Features --------- +======== -Out of the box VOLTTRON provides: +- A :ref:`message bus ` allowing connectivity between agents on individual platforms and + between platform instances in large scale deployments +- Integrated security features enabling the management of secure communication between agents and platform instances +- A flexible :ref:`agent framework ` allowing users to adapt the platform to their unique use-cases +- A configurable :ref:`driver framework ` for collecting data from and sending control + signals to buildings and devices +- automatic data capture and retrieval through our :ref:`historian framework ` +- An extensible :ref:`web framework ` allowing users and services to securely connect to the platform + from anywhere +- Capability to interface with simulation engines and applications to evaluate applications prior to deployment -- a secure :ref:`message bus ` allowing agents to subscribe to data sources and publish results and messages. -- secure connectivity between multiple VOLTTRON instances. -- BACnet, ModBus and other device/system protocol connectivity through our :ref:`driver framework ` for collecting data from and sending control actions to buildings and devices. -- automatic data capture and retrieval through our :ref:`historian framework `. -- platform based :ref:`agent lifecycle management `. -- a :ref:`web based management ` tool for managing several instances from a central instance. -- the ability to easily extend the functionality of existing agents or create new ones for your specific purposes. +VOLTTRON™ is publicly available from `GitHub `_. The project +is supported by the U.S. Department of Energy and receives ongoing updates from a team of core developers at PNNL. The +VOLTTRON team encourages and appreciates community involvement including issues and pull requests on Github, meetings +at our bi-weekly office-hours and on Slack. To be invited to office-hours or slack, please `send the team an email +`_. -Background ----------- -|VOLTTRON| is written in Python 3.6 and runs on Linux Operating Systems. For users unfamiliar with those technologies, the following resources are recommended: +.. toctree:: + :caption: Introduction + :hidden: + :titlesonly: + :maxdepth: 1 -- https://docs.python.org/3.6/tutorial/ -- http://ryanstutorials.net/linuxtutorial/ + introduction/platform-install + introduction/definitions + introduction/license -License -------- -The project is :ref:`licensed ` under Apache 2 license. +.. toctree:: + :caption: Developing in VOLTTRON + :hidden: + :titlesonly: + :maxdepth: 1 + developing-volttron/community + developing-volttron/development-environment/index + developing-volttron/developing-agents/agent-development + developing-volttron/developing-drivers/driver-development + developing-volttron/contributing-code + developing-volttron/contributing-documentation + developing-volttron/jupyter/jupyter-notebooks + developing-volttron/python-for-matlab-users -Contents: .. toctree:: + :caption: Deploying VOLTTRON + :hidden: + :titlesonly: + :maxdepth: 1 + + deploying-volttron/bootstrap-process + deploying-volttron/platform-configuration + deploying-volttron/deployment-planning-options + deploying-volttron/single-machine + deploying-volttron/multi-platform/index + deploying-volttron/secure-deployment-considerations + deploying-volttron/linux-system-hardening + deploying-volttron/recipe-deployment + + +.. toctree:: + :caption: Agent Framework + :hidden: + :titlesonly: + :maxdepth: 1 + + agent-framework/agents-overview + agent-framework/core-service-agents/index + agent-framework/operations-agents/index + agent-framework/historian-agents/historian-framework + agent-framework/web-framework + agent-framework/integrating-simulations/index + agent-framework/platform-service-standardization + agent-framework/third-party-agents + + +.. toctree:: + :caption: Driver Framework + :hidden: + :titlesonly: + :maxdepth: 1 + + driver-framework/drivers-overview + driver-framework/platform-driver/platform-driver + driver-framework/actuator/actuator-agent + driver-framework/fake-driver/fake-driver + driver-framework/bacnet/bacnet-driver + driver-framework/chargepoint/chargepoint-driver + driver-framework/dnp3-driver/dnp3-driver + driver-framework/ecobee/ecobee-web-driver + driver-framework/ieee-2030_5/ieee-2030_5-driver + driver-framework/modbus/modbus-driver + driver-framework/modbus/modbus-tk-driver + driver-framework/obix/obix + driver-framework/ted-driver/the-energy-detective-driver + + +.. toctree:: + :caption: Platform Features + :hidden: + :titlesonly: + :maxdepth: 1 + + platform-features/message-bus/index + platform-features/control/index + platform-features/config-store/configuration-store + platform-features/security/volttron-security + +.. toctree:: + :caption: VOLTTRON Core Service Agents + :hidden: + :titlesonly: + :maxdepth: 2 + :glob: + + volttron-api/services/*/modules + +.. toctree:: + :caption: VOLTTRON Core Operations Agents + :hidden: + :titlesonly: :maxdepth: 2 + :glob: + + volttron-api/ops/*/modules + +.. toctree:: + :caption: VOLTTRON Topics + :hidden: + :titlesonly: + :maxdepth: 1 + + volttron-topics/troubleshooting/index + volttron-topics/volttron-applications/index + volttron-topics/change-log/index - overview/index - community_resources/index - setup/index - devguides/index - core_services/index - specifications/index - volttron_applications/index - VOLTTRON Platform API Indices and tables @@ -63,9 +172,6 @@ Indices and tables * :ref:`genindex` * :ref:`modindex` -* :ref:`search` - -.. |VOLTTRON Logo| image:: images/volttron-webimage.jpg .. |VOLTTRON| unicode:: VOLTTRON U+2122 -.. |VOLTTRON Tagline| image:: images/VOLLTRON_Logo_Black_Horizontal_with_Tagline.png +.. |VOLTTRON Tagline| image:: files/VOLLTRON_Logo_Black_Horizontal_with_Tagline.png diff --git a/docs/source/introduction/definitions.rst b/docs/source/introduction/definitions.rst new file mode 100644 index 0000000000..4c9ac91507 --- /dev/null +++ b/docs/source/introduction/definitions.rst @@ -0,0 +1,149 @@ +.. _Definitions: + +=================== +Definition of Terms +=================== + +This page lays out a common terminology for discussing the components and underlying technologies used by the platform. +The first section discusses capabilities and industry standards that VOLTTRON conforms to while the latter is specific +to the VOLTTRON domain. + + +Industry Terms +============== + +.. glossary:: + + Agent + Software which acts on behalf of a user to perform a set of tasks. + + BACNet + Building Automation and Control network that leverages ASHRAE, ANSI, and IOS 16484-5 standard protocols + + DNP3 (Distributed Network Protocol 3) + Communications protocol used to coordinate processes in distributed automation systems + + JSON (JavaScript Object Notation) + JavaScript object notation is a text-based, human-readable, open data interchange format, similar to XML but less + verbose + + IEEE 2030.5 + Utilities communication standard for managing energy demand and load (previously Smart Energy Profile version 2, + SEP2) + + JSON-RPC (JSON-Remote Procedure Call) + JSON-encoded Remote Procedure Call + + Modbus + Communications protocol for talking with industrial electronic devices + + PLC (Programmable Logic Controller) + Computer used in industrial applications to manage processes of groups of industrial devices + + Python Virtual Environment + The `Python-VENV` library allows users to create a virtualized copy of the local environment. A virtual environment + allows the user to isolate the dependencies for a project which helps prevent conflicts between dependencies across + projects. + + Publish/Subscribe + A message delivery pattern where senders (publishers) and receivers (subscribers) do not communicate directly nor + necessarily have knowledge of each other, but instead exchange messages through an intermediary based on a mutual + class or topic. + + .. note:: + + The Publish/Subscribe paradigm is often notated as ``pub/sub`` in VOLTTRON documentation. + + RabbitMQ + Open-Source message brokering system used by VOLTTRON for sending messages between services on the platform. + + Remote Procedure Call + Protocol used to request services of another computer located elsewhere on the network or on a different network. + + SSH + `Secure Shell` is a network protocol providing encryption and authentication of data using public-key cryptography. + + SSL + `Secure Sockets Layer` is a technology for encryption and authentication of network traffic based on a chain of + trust. + + TLS + `Transport Layer Security` is the successor to SSL. + + ZeroMQ + (also ØMQ) A library used for inter-process and inter-computer communication. + + +VOLTTRON Terms +============== + +.. glossary:: + + Activated Environment + An activated environment is the environment a VOLTTRON instance is run in. The bootstrap process creates the + environment from the shell. + + AIP (Agent Instantiation and Packaging) + This is the module responsible for creating agent wheels, the agent execution environment and running agents. Found + in the VOLTTRON repository in the `volttron/platform` directory. + + Agent Framework + Framework which provides connectivity to the VOLTTRON platform and subsystems for software agents. + + Bootstrap the Environment + The process by which an operating environment (activated environment) is produced. From the :term:`VOLTTRON_ROOT` + directory, executing `python bootstrap.py` will start the bootstrap process. + + Config Store + Agent data store used by the platform for storing configuration files and automating the management of agent + configuration + + Driver + Module that implements communication paradigms of a device to provide an interface to devices for the VOLTTRON + platform. + + Driver Framework + Framework for implementing communication between the VOLTTRON platform and devices on the network (or a remote + network) + + Historian + Historians in VOLTTRON are special purpose agents for automatically collecting data from the platform message bus + and storing in a persistent data store. + + VIP + VOLTTRON Interconnect Protocol is a secure routing protocol that facilitates communications between agents, + controllers, services, and the supervisory :term:`VOLTTRON_INSTANCE`. + + VIP address + Public address bound to by a VOLTTRON platform instance for communication (Example: ``tcp:///192.168.1.20:22916``). + Communication to external platform instances requires that the address be in range for external communication from + the host. + + VIP Identity + Unique identifier for an agent connected to an instance. Used for messaging, routing and security. + + VOLTTRON Central + VOLTTRON Central (VC) is a special purpose agent for managing multiple platforms in a distributed VOLTTRON + deployment + + VOLTTRON_HOME + The location for a specific :term:`VOLTTRON_INSTANCE` to store its specific information. There can be many + `VOLTTRON_HOME`s on a single computing resource such as a VM, machine, etc. Each `VOLTTRON_HOME` will correspond to + a single instance of VOLTTRON. + + VOLTTRON_INSTANCE + A single VOLTTRON process executing instructions on a computing resource. For each `VOLTTRON_INSTANCE`, there WILL + BE only one :term:`VOLTTRON_HOME` associated with it. For a `VOLTTRON_INSTANCE` to participate outside its computing + resource, it must be bound to an external IP address. + + VOLTTRON_ROOT + The cloned directory from Github. When executing the command: + + .. code-block:: bash + + git clone https://github.com/VOLTTRON/volttron.git + + the top level volttron folder is the VOLTTRON_ROOT. + + Web Framework + Framework used by VOLTTRON agents to implement web services with HTTP and HTTPS diff --git a/docs/source/overview/files/overview.png b/docs/source/introduction/files/overview.png similarity index 100% rename from docs/source/overview/files/overview.png rename to docs/source/introduction/files/overview.png diff --git a/docs/source/overview/license.rst b/docs/source/introduction/license.rst similarity index 98% rename from docs/source/overview/license.rst rename to docs/source/introduction/license.rst index cbeb07bb95..ccbc5801e1 100644 --- a/docs/source/overview/license.rst +++ b/docs/source/introduction/license.rst @@ -1,4 +1,4 @@ -.. _license: +.. _License: ======= License @@ -14,9 +14,9 @@ The patent license grant shall only be applicable to the following patent and pa Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. -======= + Terms -======= +===== This material was prepared as an account of work sponsored by an agency of the United States Government. Neither the United States Government nor the United States Department of Energy, nor Battelle, nor any of their employees, nor any jurisdiction or organization that has cooperated in the development of these materials, makes any warranty, express or implied, or assumes any legal liability or responsibility for the accuracy, completeness, or usefulness or any information, apparatus, product, software, or process disclosed, or represents that its use would not infringe privately owned rights. Reference herein to any specific commercial product, process, or service by trade name, trademark, manufacturer, or otherwise does not necessarily constitute or imply its endorsement, recommendation, or favoring by the United States Government or any agency thereof, or Battelle Memorial Institute. The views and opinions of authors expressed herein do not necessarily state or reflect those of the United States Government or any agency thereof. diff --git a/docs/source/introduction/platform-install.rst b/docs/source/introduction/platform-install.rst new file mode 100644 index 0000000000..8038901335 --- /dev/null +++ b/docs/source/introduction/platform-install.rst @@ -0,0 +1,504 @@ +.. _Platform-Installation: + +.. role:: bash(code) + :language: bash + +======================= +Installing the Platform +======================= + +VOLTTRON is written in Python 3.6+ and runs on Linux Operating Systems. For users unfamiliar with those technologies, +the following resources are recommended: + +- `Python 3.6 Tutorial `_ +- `Linux Tutorial `_ + +This guide will specify commands to use to successfully install the platform on supported Linux distributions, but a +working knowledge of Linux will be helpful for troubleshooting and may improve your ability to get more out of your +deployment. + +.. note:: + + Volttron version 7.0rc1 is currently tested for Ubuntu versions 18.04 and 18.10 as well as Linux Mint version 19.3. + Version 6.x is tested for Ubuntu versions 16.04 and 18.04 as well as Linux Mint version 19.1. + + +.. _Platform-Prerequisites: + +Step 1 - Install prerequisites +============================== + +The following packages will need to be installed on the system: + +* git +* build-essential +* python3.6-dev +* python3.6-venv +* openssl +* libssl-dev +* libevent-dev + +On **Debian-based systems**, these can all be installed with the following command: + +.. code-block:: bash + + sudo apt-get update + sudo apt-get install build-essential python3-dev python3-venv openssl libssl-dev libevent-dev git + +On Ubuntu-based systems, available packages allow you to specify the Python3 version, 3.6 or greater is required +(Debian itself does not provide those packages). + +.. code-block:: bash + + sudo apt-get install build-essential python3.6-dev python3.6-venv openssl libssl-dev libevent-dev git + + +On arm-based systems (including, but not limited to, Raspbian), you must also install libffi-dev, you can do this with: + +.. code-block:: bash + + sudo apt-get install libffi-dev + +.. note:: + + On arm-based systems, the available apt package repositories for Raspbian versions older than buster (10) do not + seem to be able to be fully satisfied. While it may be possible to resolve these dependencies by building from + source, the only recommended usage pattern for VOLTTRON 7 and beyond is on raspberry pi OS 10 or newer. + +On **Redhat or CENTOS systems**, these can all be installed with the following +command: + +.. code-block:: bash + + sudo yum update + sudo yum install make automake gcc gcc-c++ kernel-devel python3-devel openssl openssl-devel libevent-devel git + +.. warning:: + Python 3.6 or greater is required, please ensure you have installed a supported version with :bash:`python3 --version` + +If you have an agent which requires the pyodbc package, install the following additional requirements: + +* freetds-bin +* unixodbc-dev + +On **Debian-based systems** these can be installed with the following command: + +.. code-block:: bash + + sudo apt-get install freetds-bin unixodbc-dev + +On **Redhat or CentOS systems**, these can be installed from the Extra Packages for Enterprise Linux (EPEL) repository: + +.. code-block:: bash + + sudo yum install https://dl.fedoraproject.org/pub/epel/epel-release-latest-8.noarch.rpm + sudo yum install freetds unixODBC-devel + +.. note:: + The above command to install the EPEL repository is for Centos/Redhat 8. Change the number to match your OS version. + EPEL packages are included in Fedora repositories, so installing EPEL is not required on Fedora. + +It may be possible to deploy VOLTTRON on a system not listed above but may involve some troubleshooting and dependency +management on the part of the user. + +In order to support historians, the python installation must include the built-in sqlite3 support (a compile time option). +This is included in all of the linux distribution packages referenced above, which is the recommended and supported way of running python. +In cases where a user needs to compile their own python (not an officially supported configuration), make sure that the sqlite3 option is enabled. + +Step 2 - Clone VOLTTRON code +============================ + + +.. _Repository-Structure: + +Repository Structure +-------------------- + +There are several options for using the VOLTTRON code depending on whether you require the most stable version of the +code or want the latest updates as they happen. In order of decreasing stability and increasing currency: + +* `Main` - Most stable release branch, current major release is 7.0. This branch is default. +* `develop` - contains the latest `finished` features as they are developed. When all features are stable, this branch + will be merged into `Main`. + + .. note:: + + This branch can be cloned by those wanting to work from the latest version of the platform but should not be + used in deployments. + +* Features are developed on “feature” branches or developers' forks of the main repository. It is not recommended to + clone these branches except for exploring a new feature. + +.. note:: + + VOLTTRON versions 6.0 and newer support two message buses - ZMQ and RabbitMQ. + +.. code-block:: bash + + git clone https://github.com/VOLTTRON/volttron --branch + + +Step 3 - Setup virtual environment +================================== + +The :ref:`bootstrap.py ` script in the VOLTTRON root directory will create a +`virtual environment `_ and install the package's Python dependencies. +Options exist for upgrading or rebuilding existing environments, and for adding additional dependencies for optional +drivers and agents included in the repository. + +.. note:: + + The :bash:`--help` option for `bootstrap.py` can specified to display all available optional parameters. + + +.. _ZeroMQ-Install: + +Steps for ZeroMQ +---------------- + +Run the following command to install all required packages: + +.. code-block:: bash + + cd + python3 bootstrap.py + +Then activate the Python virtual environment: + +.. code-block:: bash + + source env/bin/activate + +Proceed to step 4. + +.. note:: + + You can deactivate the environment at any time by running `deactivate`. + + +.. _RabbitMQ-Install: + +Steps for RabbitMQ +------------------ + +Step 1 - Install Required Packages and Activate the Virtual Environment +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +Setting up RabbmitMQ requires additional steps; but before running those steps we still need to install the required +packages and activate the virtual environment just as we did in the Steps for ZeroMQ. To do so, see :ref:`ZeroMQ-Install`. +Once finished, proceed to the next step. + + +Step 2 - Install Erlang packages +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +For RabbitMQ based VOLTTRON, some of the RabbitMQ specific software packages have to be installed. + + +On Debian based systems and CentOS 6/7 +"""""""""""""""""""""""""""""""""""""" + +If you are running a Debian or CentOS system, you can install the RabbitMQ dependencies by running the +"rabbit_dependencies.sh" script, passing in the OS name and appropriate distribution as parameters. The +following are supported: + +* `debian bionic` (for Ubuntu 18.04) + +* `debian xenial` (for Ubuntu 16.04 or Linux Mint 18.04) + +* `debian stretch` (for Debian Stretch) + +* `debian buster` (for Debian Buster) + +* `raspbian buster` (for Raspbian/Raspberry Pi OS Buster) + +Example command: + +.. code-block:: bash + + ./scripts/rabbit_dependencies.sh debian xenial + + +Alternatively +""""""""""""" + +You can download and install Erlang from `Erlang Solutions `_. +Please include OTP/components - ssl, public_key, asn1, and crypto. +Also lock your version of Erlang using the `yum-plugin-versionlock `_. + +.. note:: + Currently VOLTTRON only officially supports specific versions of Erlang for each operating system: + * 1:22.1.8.1-1 for Debian + * 1:21.2.6+dfsg-1 for Raspbian + * Specific Erlang 21.x versions correspond to CentOS versions 6, 7, and 8, these can be found + `here `_ + + +Step 3 - Configure hostname +^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +Make sure that your hostname is correctly configured in /etc/hosts. +See this `StackOverflow post `_. +If you are testing with VMs make please make sure to provide unique host names for each of the VMs you are using. + +The hostname should be resolvable to a valid IP when running on bridged mode. RabbitMQ checks for this during initial +boot. Without this (for example, when running on a VM in NAT mode) RabbitMQ start-up would fail with the error "unable +to connect to empd (port 4369) on ." + +.. note:: + + RabbitMQ startup error would show up in the VM's syslog (/var/log/messages) file and not in RabbitMQ logs + (/var/log/rabbitmq/rabbitmq@hostname.log) + + +Step 4 - Bootstrap the environment +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +.. code-block:: bash + + cd volttron + python3 bootstrap.py --rabbitmq [optional install directory. defaults to /rabbitmq_server] + +This will build the platform and create a virtual Python environment and dependencies for RabbitMQ. It also installs +RabbitMQ server as the current user. If an install path is provided, that path should exist and the user should have +write permissions. RabbitMQ will be installed under `/rabbitmq_server-3.7.7`. The rest of the +documentation refers to the directory `/rabbitmq_server-3.7.7` as `$RABBITMQ_HOME`. + +.. note:: + + There are many additional :ref:`options for bootstrap.py ` for including dependencies, altering + the environment, etc. + +By bootstrapping the environment for RabbitMQ, an environmental variable $RABBITMQ_HOME is created for your convenience. +Thus, you can use $RABBITMQ_HOME to see if the RabbitMQ server is installed by checking its status: + +.. code-block:: bash + + $RABBITMQ_HOME/sbin/rabbitmqctl status + +.. note:: + + The `RABBITMQ_HOME` environment variable can be set in ~/.bashrc. If doing so, it needs to be set to the RabbitMQ + installation directory (default path is `/rabbitmq_server/rabbitmq_server-3.7.7`) + +.. code-block:: bash + + echo 'export RABBITMQ_HOME=$HOME/rabbitmq_server/rabbitmq_server-3.7.7'|sudo tee --append ~/.bashrc + source ~/.bashrc + $RABBITMQ_HOME/sbin/rabbitmqctl status + + +Step 5 - Configure RabbitMQ setup for VOLTTRON +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +.. code-block:: bash + + vcfg --rabbitmq single [optional path to rabbitmq_config.yml] + +A sample configuration file can be found in the VOLTTRON repository in **examples/configurations/rabbitmq/rabbitmq_config.yml**. +At a minimum you will need to provide the host name and a unique common-name (under certificate-data) in the configuration file. + +.. note:: + + common-name must be unique and the general convention is to use `-root-ca`. + +Running the above command without the optional configuration file parameter will cause the user user to be prompted for +all the required data in the command prompt. "vcfg" will use that data to generate a rabbitmq_config.yml file in the +:term:`VOLTTRON_HOME` directory. + +.. note:: + + If the above configuration file is being used as a basis for creating your own configuration file, be sure to update + it with the hostname of the deployment (this should be the fully qualified domain name of the system). + +This script creates a new virtual host and creates SSL certificates needed for this VOLTTRON instance. These +certificates get created under the subdirectory "certificates" in your VOLTTRON home (typically in ~/.volttron). It +then creates the main VIP exchange named "volttron" to route message between the platform and agents and alternate +exchange to capture unrouteable messages. + +.. note:: + + We configure the RabbitMQ instance for a single :term:`VOLTTRON_HOME` and :term:`VOLTTRON_INSTANCE`. This script + will confirm with the user the volttron_home to be configured. The VOLTTRON instance name will be read from + `volttron_home/config` if available, if not the user will be prompted for VOLTTRON instance name. To run the + scripts without any prompts, save the the VOLTTRON instance name in volttron_home/config file and pass the VOLTTRON + home directory as a command line argument. For example: + + .. code-block:: bash + + vcfg --vhome /home/vdev/.new_vhome --rabbitmq single + +.. note:: + + The default behavior generates a certificate which is valid for a period of 1 year. + +The Following are the example inputs for `vcfg --rabbitmq single` command. Since no config file is passed the script +prompts for necessary details. + +.. code-block:: console + + Your VOLTTRON_HOME currently set to: /home/vdev/new_vhome2 + + Is this the volttron you are attempting to setup? [Y]: + Creating rmq config yml + RabbitMQ server home: [/home/vdev/rabbitmq_server/rabbitmq_server-3.7.7]: + Fully qualified domain name of the system: [cs_cbox.pnl.gov]: + + Enable SSL Authentication: [Y]: + + Please enter the following details for root CA certificates + Country: [US]: + State: Washington + Location: Richland + Organization: PNNL + Organization Unit: Volttron-Team + Common Name: [volttron1-root-ca]: + Do you want to use default values for RabbitMQ home, ports, and virtual host: [Y]: N + Name of the virtual host under which RabbitMQ VOLTTRON will be running: [volttron]: + AMQP port for RabbitMQ: [5672]: + http port for the RabbitMQ management plugin: [15672]: + AMQPS (SSL) port RabbitMQ address: [5671]: + https port for the RabbitMQ management plugin: [15671]: + INFO:rmq_setup.pyc:Starting rabbitmq server + Warning: PID file not written; -detached was passed. + INFO:rmq_setup.pyc:**Started rmq server at /home/vdev/rabbitmq_server/rabbitmq_server-3.7.7 + INFO:requests.packages.urllib3.connectionpool:Starting new HTTP connection (1): localhost + INFO:requests.packages.urllib3.connectionpool:Starting new HTTP connection (1): localhost + INFO:requests.packages.urllib3.connectionpool:Starting new HTTP connection (1): localhost + INFO:rmq_setup.pyc: + Checking for CA certificate + + INFO:rmq_setup.pyc: + Root CA (/home/vdev/new_vhome2/certificates/certs/volttron1-root-ca.crt) NOT Found. Creating root ca for volttron instance + Created CA cert + INFO:requests.packages.urllib3.connectionpool:Starting new HTTP connection (1): localhost + INFO:requests.packages.urllib3.connectionpool:Starting new HTTP connection (1): localhost + INFO:rmq_setup.pyc:**Stopped rmq server + Warning: PID file not written; -detached was passed. + INFO:rmq_setup.pyc:**Started rmq server at /home/vdev/rabbitmq_server/rabbitmq_server-3.7.7 + INFO:rmq_setup.pyc: + + ####################### + + Setup complete for volttron home /home/vdev/new_vhome2 with instance name=volttron1 + Notes: + + - Please set environment variable `VOLTTRON_HOME` to `/home/vdev/new_vhome2` before starting volttron + + - On production environments, restrict write access to + /home/vdev/new_vhome2/certificates/certs/volttron1-root-ca.crt to only admin user. For example: sudo chown root /home/vdev/new_vhome2/certificates/certs/volttron1-root-ca.crt + + - A new admin user was created with user name: volttron1-admin and password=default_passwd. + You could change this user's password by logging into Please update /home/vdev/new_vhome2/rabbitmq_config.yml if you change password + + ####################### + + +Test the VOLTTRON Deployment +============================ + +We are now ready to start VOLTTRON instance. If configured with RabbitMQ message bus a config file would have been +generated in `$VOLTTRON_HOME/config` with the entry ``message-bus=rmq``. If you need to revert back to ZeroMQ based +VOLTTRON, you will have to either remove the ``message-bus`` parameter or set it to the default "zmq" in +`$VOLTTRON_HOME/config`. + +The following command starts volttron process in the background: + +.. code-block:: bash + + volttron -vv -l volttron.log& + +This enters the virtual Python environment and then starts the platform in debug (vv) mode with a log file +named volttron.log. Alternatively you can use the utility script start-volttron script that does the same. + +.. code-block:: bash + + ./start-volttron + +To stop the platform, use the `vct` command: + +.. code-block:: bash + + volttron-ctl shutdown --platform + +or use the included `stop-volttron` script: + +.. code-block:: bash + + ./stop-volttron + + +.. warning:: + If you plan on running VOLTTRON in the background and detaching it from the + terminal with the ``disown`` command be sure to redirect stderr and stdout to ``/dev/null``. + Some libraries which VOLTTRON relies on output directly to stdout and stderr. + This will cause problems if those file descriptors are not redirected to ``/dev/null`` + + :: + + #To start the platform in the background and redirect stderr and stdout + #to /dev/null + volttron -vv -l volttron.log > /dev/null 2>&1& + + +Installing and Running Agents +----------------------------- + +VOLTTRON platform comes with several built in services and example agents out of the box. To install a agent +use the script `install-agent.py` + +.. code-block:: bash + + python scripts/install-agent.py -s [-c ] + + +For example, we can use the command to install and start the Listener Agent - a simple agent that periodically publishes +heartbeat message and listens to everything on the message bus. Install and start the Listener agent using the +following command: + +.. code-block:: bash + + python scripts/install-agent.py -s examples/ListenerAgent --start + + +Check volttron.log to ensure that the listener agent is publishing heartbeat messages. + +.. code-block:: bash + + tail volttron.log + +.. code-block:: console + + 2016-10-17 18:17:52,245 (listeneragent-3.2 11367) listener.agent INFO: Peer: 'pubsub', Sender: 'listeneragent-3.2_1':, Bus: u'', Topic: 'heartbeat/listeneragent-3.2_1', Headers: {'Date': '2016-10-18T01:17:52.239724+00:00', 'max_compatible_version': u'', 'min_compatible_version': '3.0'}, Message: {'status': 'GOOD', 'last_updated': '2016-10-18T01:17:47.232972+00:00', 'context': 'hello'} + + +You can also use the `volttron-ctl` (or `vctl`) command to start, stop or check the status of an agent + +.. code-block:: console + + (volttron)volttron@volttron1:~/git/rmq_volttron$ vctl status + AGENT IDENTITY TAG STATUS HEALTH + 6 listeneragent-3.2 listeneragent-3.2_1 running [13125] GOOD + f platform_driveragent-3.2 platform.driver platform_driver + +.. code-block:: bash + + vctl stop + + +.. note:: + + The default working directory is ~/.volttron. The default directory for creation of agent packages is + `~/.volttron/packaged` + + +Next Steps +========== + +There are several walk-throughs and detailed explanations of platform features to explore additional aspects of the +platform: + +* :ref:`Agent Framework ` +* :ref:`Driver Framework ` +* Demonstration of the :ref:`management UI ` +* :ref:`RabbitMQ setup ` with Federation and Shovel plugins diff --git a/docs/source/overview/DefinitionOfTerms.rst b/docs/source/overview/DefinitionOfTerms.rst deleted file mode 100644 index 44298a6b50..0000000000 --- a/docs/source/overview/DefinitionOfTerms.rst +++ /dev/null @@ -1,83 +0,0 @@ -.. _definitions: - -=================== -Definition of Terms -=================== - -This page lays out a common terminology for discussing the components and -underlying technologies used by the platform. The first -section discusses capabilities and industry standards that volttron -conforms to while the latter is specific to the VOLTTRON domain. - -Industry Terms -~~~~~~~~~~~~~~ - -- **BACNet**: Building Automation and Control network, that leverages ASHRAE, ANSI, and IOS 16484-5 standard protocols. -- **JSON-RPC**: JSON-encoded remote procedure call -- **JSON**: JavaScript object notation is a text-based, human-readable, open data interchange format, similar to XML, but less verbose -- **Publish/subscribe**: A message delivery pattern where senders (publishers) and receivers (subscribers) do not communicate directly nor necessarily have knowledge of each other, but instead exchange messages through an intermediary based on a mutual class or topic -- **ZeroMQ or ØMQ**: A library used for inter-process and inter-computer communication -- **Modbus**: Communications protocol for talking with industrial electronic devices -- **SSH**: Secure shell is a network protocol providing encryption and authentication of data using public-key cryptography -- **SSL**: Secure sockets layer is a technology for encryption and authentication of network traffic based on a chain of trust -- **TLS**: Transport layer security is the successor to SSL - - -VOLTTRON Terms -~~~~~~~~~~~~~~ - - .. _activated-environment: - - Activated Environment - An activated environment is the environment a VOLTTRON instance is run in. - The bootstrap process creates the environment from the shell and to activate - it the following command is executed. - - .. code-block:: bash - - user@computer> source env/bin/activate - - # Note once the above command has been run the prompt will have changed - (volttron)user@computer> - - .. _bootstrap-environment: - - Bootstrap Environment - The process by which an operating environment (activated environment) - is produced. From the :ref:`VOLTTRON_ROOT` directory executing - ``python bootstrap.py`` will start the bootstrap process. - - .. _VOLTTRON_HOME: - - VOLTTRON_HOME - The location for a specific :ref:`VOLTTRON_INSTANCE` to store its specific - information. There can be many VOLTTRON_HOMEs on a single computing - resource(VM, machine, etc.) - - .. _VOLTTRON_INSTANCE: - - VOLTTRON_INSTANCE - A single volttron process executing instructions on a computing resource. - For each VOLTTRON_INSTANCE there WILL BE only one :ref:`VOLTTRON_HOME` - associated with it. In order for a VOLTTRON_INSTANCE to be able to - participate outside its computing resource it must be bound to an - external ip address. - - .. _VOLTTRON_ROOT: - - VOLTTRON_ROOT - The cloned directory from github. When executing the command - - .. code-block:: bash - - git clone http://github.com/VOLTTRON/volttron - - the top volttron folder is the VOLTTRON_ROOT - - .. _VIP: - - VIP - VOLTTRON Interconnect Protocol is a secure routing protocol that facilitates - communications between agents, controllers, services and the supervisory - :ref:`VOLTTRON_INSTANCE`. - diff --git a/docs/source/overview/agents-overview.rst b/docs/source/overview/agents-overview.rst deleted file mode 100644 index 7bca29594f..0000000000 --- a/docs/source/overview/agents-overview.rst +++ /dev/null @@ -1,20 +0,0 @@ -.. _agents-overview: - -====================== -Agents in the Platform -====================== - -Agents deployed on VOLTTRON can perform one or more roles which can be broadly classified into the following groups: - -- Platform Agents: Agents which are part of the platform and provide a service to other agents. Examples are agents which interface with devices to publish readings and handle control signals from other agents. -- Cloud Agents: These agents represent a remote application which needs access to the messages and data on the platform. This agent would subscribe to topics of interest to the remote application and would also allow it publish data to the platform. -- Control Agents: These agents control the devices of interest and interact with other resources to achieve some goal. - -Platform Services: - -- Message Bus: All agents and services publish and subscribe to topics on the message bus. This provides a single interface that abstracts the details of devices and agents from each other. Components in the platform basically produce and consume events. -- Weather Information: This agent periodically retrieves data from the Weather Underground site. It then reformats it and publishes it out to the platform on a weather topic. -- Modbus-based device interface: The Modbus driver publishes device data onto the message bus. It also handles the locking of devices to prevent multiple conflicting directives. -- Application Scheduling: This service allows the scheduling of agents’ access to devices in order to prevent conflicts. -- Logging service: Agents can publish arbitrary strings to a logging topic and this service will push them to a historian for later analysis. - diff --git a/docs/source/overview/components.rst b/docs/source/overview/components.rst deleted file mode 100644 index 88db8e3743..0000000000 --- a/docs/source/overview/components.rst +++ /dev/null @@ -1,25 +0,0 @@ -.. _components: - -========== -Components -========== - -An overview of the VOLTTRON platform components is illustrated in the figure below. The platform -comprises several components -and agents that provide services to other agents. Of these components, the Information Exchange Bus (IEB), -or :ref:`Message Bus ` is central to the -platform. All other VOLTTRON components communicate through it using the publish/subscribe paradigm over a variety of -topics. - -:ref:`Drivers ` communicate with devices allowing their data to be published on the IEB. -Agents can control devices by interacting with the :ref:`Actuator Agent ` to schedule and send commands. -The :ref:`Historian ` framework takes data published on the messages bus and stores it to a database, -file, or sends it to another location. - -The agent lifecycle is controlled by the Agent Instantiation and Packaging (AIP) component which launches agents in an -Agent Execution Environment. This isolates agents from the platform while allowing them to interact with the IEB. - - -|Overview of the VOLTTRON platform| - -.. |Overview of the VOLTTRON platform| image:: files/overview.png diff --git a/docs/source/overview/index.rst b/docs/source/overview/index.rst deleted file mode 100644 index 36a259fce3..0000000000 --- a/docs/source/overview/index.rst +++ /dev/null @@ -1,33 +0,0 @@ -.. _overview: - -======== -Overview -======== - -VOLTTRON™ is an open-source distributed control and sensing platform for integrating buildings -and the power grid. VOLTTRON connects devices, agents in the platform, agents in the Cloud, and -signals from the power grid. The platform also supports use cases such as demand response and -integration of distributed renewable energy sources. - -VOLTTRON provides an environment for agent execution and serves as a single point of contact for -interfacing with devices (rooftop units, building systems, meters, etc.), external resources, and platform -services such as data archival and retrieval. VOLTTRON applications are referred to as agents since -VOLTTRON provides an agent-based programming paradigm to ease application development and -minimize the lines of code that need to be written by domain experts such as buildings engineers. -VOLTTRON provides a collection of utility and helper classes that simplifies agent development. - -The VOLTTRON white paper provides an overview of the capabilities of the platform: -https://volttron.org/sites/default/files/publications/PNNL-25499_VOLTTRON_2016.pdf - - -.. toctree:: - :glob: - :maxdepth: 1 - - background - components - agents-overview - DefinitionOfTerms - version-history - license - diff --git a/docs/source/overview/version-history.rst b/docs/source/overview/version-history.rst deleted file mode 100644 index db0ce7f199..0000000000 --- a/docs/source/overview/version-history.rst +++ /dev/null @@ -1,96 +0,0 @@ -.. _version-history: - -=============== -Version History -=============== - -VOLTTRON 1.0 – 1.2 - -- Agent execution platform -- Message bus -- Modbus and BACnet drivers -- Historian -- Data logger -- Device scheduling -- Device actuation -- Multi-node communication -- Weather service - -VOLTTRON 2.0 - -- Advanced Security Features -- Guaranteed resource allocation to agents using execution contracts -- Signing and verification of agent packaging -- Agent mobility -- Admin can send agents to another platform -- Agent can request to move -- Enhanced command framework - -VOLTTRON 3.0 - -- Modularize Data Historian -- Modularize Device Drivers -- Secure and accountable communication using the VIP -- Web Console for Monitoring and Administering VOLTTRON Deployments - -VOLTTRON 4.0 - -- Documentation moved to ReadTheDocs -- VOLTTRON Configuration Wizard -- Configuration store to dynamically configure agents -- Aggregator agent for aggregating topics -- More reliable remote install mechanism -- UI for device configuration -- Automatic registration of VOLTTRON instances with management agent - - -VOLTTRON 5.0 - -- Tagging service for attaching metadata to topics for simpler retrieval -- Message bus performance improvement -- Multi-platform publish/subscribe for simpler coordination across platforms -- Drivers contributed back for SEP 2.0 and ChargePoint EV - -VOLTTRON 6.0 - -- Maintained backward compatibility with communication between zmq and rmq deployments. -- Added DarkSky Weather Agent -- Web Based Additions -- Added CSR support for multiplatform communication -- Added SSL support to the platform for secure communication -- Backported SSL support to zmq based deployments. -- Upgraded VC to use the platform login. -- Added docker support to the test environment for easier Rabbitmq testing. -- Updated volttron-config (vcfg) to support both RabbitMQ and ZMQ including https based instances. -- Added test support for RabbitMQ installations of all core agents. -- Added multiplatform (zmq and rmq based platform) testing. -- Integrated RabbitMQ documentation into the core documentation. - -VOLTTRON 7.0rc1 - -Python3 Upgrade - - - Update libraries to appropriate and compatible versions - - String handling efficiency - - Encode/Decode of strings has been simplified and centralized - - Added additional test cases for frame serialization in ZMQ - - Syntax updates such difference in handling exceptions, dictionaries, sorting lists, pytest markers etc. - - Made bootstrap process simpler - - Resolved gevent monkey patch issues when using third party libraries - -RabbitMQ Message Bus - - - Client code for integrating non-VOLTTRON applications with the message bus - available at: https://github.com/VOLTTRON/external-clients-for-rabbitmq - - Includes support for MQTT, non-VOLTTRON Python, and Java-based RabbitMQ - clients - -Config store secured - - - Agents can prevent other agents from modifying their configuration store entry - -Known Issues which will be dealt with for the final release: - - - Python 3.7 has conflicts with some libraries such as gevent - - The VOLTTRON Central agent is not fully integrated into Python3 - - CFFI library has conflicts on the Raspian OS which interferes with bootstrapping diff --git a/docs/source/platform-features/config-store/agent-configuration-store.rst b/docs/source/platform-features/config-store/agent-configuration-store.rst new file mode 100644 index 0000000000..fbf55ecd3c --- /dev/null +++ b/docs/source/platform-features/config-store/agent-configuration-store.rst @@ -0,0 +1,373 @@ +.. _Agent-Configuration-Store: + +========================= +Agent Configuration Store +========================= + +This document describes the configuration store feature and explains how an agent uses it. + +The configuration store enables users to store agent configurations on the platform and allows the agent to +automatically retrieve them during runtime. Users may update the configurations and the agent will automatically be +informed of the changes. + + +Compatibility +============= + +Supporting the configuration store will *not* be required by Agents, however the usage will be strongly encouraged as it +should substantially improve user experience. + +The previous method for configuring an agent will still be available to agents (and in some cases required), however +agents can be created to only work with the configuration store and not support the old method at all. + +It will be possible to create an agent to use the traditional method for configuration to establish defaults if no +configuration exist in the platform configuration store. + + +Configuration Names and Paths +============================= + +Any valid OS file path name is a valid configuration name. Any leading or trailing "/", "\" and whitespace is removed +by the store. + +The canonical name for the main agent configuration is `config`. + +The configuration subsystem remembers the case of configuration names. Name matching is case insensitive both on the +Agent and platform side. Configuration names are reported to agent callbacks in the original case used when adding them +to the configuration. If a new configuration is store with a different case of an existing name the new name case is +used. + + +Configuration Ownership +======================= + +Each configuration belongs to one agent and one agent only. When an agent refers to a configuration file via it's path +it does not need to supply any information about its identity to the platform in the file path. The only configurations +an agent has direct access to are it's own. The platform will only inform the owning agent configuration changes. + + +Configuration File Types +======================== + +Configurations files come in three types: `json`, `csv`, and `raw`. The type of a configuration file is declared when +it is added to or changed in the store. + +The parser assumes the first row of every CSV file is a header. + +Invalid JSON or CSV files are rejected at the time they are added to the store. + +Raw files are unparsed and accepted as is. + +Other parsed types may be added in the future. + + +Configuration File Representation to Agents +=========================================== + +JSON +---- + +A JSON file is parsed and represented as appropriate data types to the requester. + +Consider a file with the following contents: + +.. code-block:: json + + { + "result": "PREEMPTED", + "info": null, + "data": { + "agentID": "my_agent", + "taskID": "my_task" + } + } + +The file will be parsed and presented as a dictionary with 3 values to the requester. + + +CSV +--- + +A CSV file is represented as a list of objects. Each object represents a row in the CSV file. + +For instance this (simplified) CSV file: + +.. csv-table:: Example CSV + :header: Volttron Point Name,Modbus Register,Writable,Point Address + + ReturnAirCO2,>f,FALSE,1001 + ReturnAirCO2Stpt,>f,TRUE,1011 + +will be represented like this: + +.. code-block:: json + + [ + { + "Volttron Point Name": "ReturnAirCO2", + "Modbus Register": ">f", + "Writable": "FALSE", + "Point Address": "1001" + }, + { + "Volttron Point Name": "ReturnAirCO2Stpt", + "Modbus Register": ">f", + "Writable": "TRUE", + "Point Address": "1011" + } + ] + + +Raw +--- + +Raw files are represented as a string containing the contents of the file. + + +File references +=============== + +The `Platform Configuration Store` supports referencing one configuration file from another. If a referenced file +exists the contents of that file will replace the file reference when the file is sent to the owning agent. Otherwise +the reference will be replaced with None. + +Only configurations that are parsed by the platform (currently "json" or "csv") will be examined for references. If the +file referenced is another parsed file type (JSON or CSV, currently) then the replacement will be the parsed contents of +the file. + +In a JSON object the name of a value will never be considered a reference. + +A file reference is any value string that starts with ``config://``. The rest of the string is the path in the config +store to that configuration. The config store path is converted to lower case for comparison purposes. + +Consider the following configuration files named `devices/vav1.config` and `registries/vav.csv`, respectively: + +.. code-block:: json + + { + "driver_config": {"device_address": "10.1.1.5", + "device_id": 500}, + + "driver_type": "bacnet", + "registry_config":"config://registries/vav.csv", + "campus": "pnnl", + "building": "isb1", + "unit": "vav1" + } + +.. csv-table:: vav.csv + :header: Volttron Point Name,Modbus Register,Writable,Point Address + + ReturnAirCO2,>f,FALSE,1001 + ReturnAirCO2Stpt,>f,TRUE,1011 + +The resulting configuration returns when an agent asks for `devices/vav1.config`. The Python object will have the +following configuration: + +.. code-block:: python + + { + "driver_config": {"device_address": "10.1.1.5", + "device_id": 500}, + + "driver_type": "bacnet", + "registry_config":[ + { + "Volttron Point Name": "ReturnAirCO2", + "Modbus Register": ">f", + "Writable": "FALSE", + "Point Address": "1001" + }, + { + "Volttron Point Name": "ReturnAirCO2Stpt", + "Modbus Register": ">f", + "Writable": "TRUE", + "Point Address": "1011" + } + ], + "campus": "pnnl", + "building": "isb1", + "unit": "vav1" + } + +Circular references are not allowed. Adding a file that creates a circular reference will cause that file to be +rejected by the platform. + +If a file is changed in anyway (`NEW`, `UPDATE`, or `DELETE`) and that file is referred to by another file then the +platform considers the referring configuration as changed. The configuration subsystem on the Agent will call every +callback listening to a file or any file referring to that file either directly or indirectly. + + +Agent Configuration Sub System +============================== + +The configuration store shall be implemented on the Agent(client) side in the form of a new subsystem called config. + +The subsystem caches configurations as the platform updates the state to the agent. Changes to the cache triggered by +an RPC call from the platform will trigger callbacks in the agent. + +No callback methods are called until the `onconfig` phase of agent startup. A new phase to agent startup called +`onconfig` will be added to the `Core `class. Originally it was planned to have this run after the `onstart` phase has +completed but that is currently not possible. Ideally if an agent is using the config store feature it will not need +any `onstart` methods. + +When the `onconfig` phase is triggered the subsystem will retrieve the current configuration state from the platform and +call all callbacks registered to a configuration in the store to the `NEW` action. No callbacks are called before this +point in agent startup. + +The first time callbacks are called at agent startup any callbacks subscribed to a configuration called `config` are +called first. + + +Configuration Subsystem Agent Methods +------------------------------------- + +These methods are part of the interface available to the Agent. + + **config.get( config_name="config")** - Get the contents of a configuration. + If no name is provided the contents of the main agent configuration "config" is returned. This may not be called + before `onstart` methods are called. If called during the `onstart` phase it will trigger the subsystem to + initialize early but will not trigger any callbacks. + + **config.subscribe(callback, action=("NEW", "UPDATE", "DELETE"), pattern="*")** - Sets up a callback for handling a + configuration change. The platform will automatically update the agent when a configuration changes ultimately + triggering all callbacks that match the pattern specified. The action argument describes the types of configuration + change action that will trigger the callback. Possible actions are `NEW`, `UPDATE`, and `DELETE` or a tuple of any + combination of actions. If no action is supplied the callback happens for all changes. A list of actions can be + supplied if desired. If no file name pattern is supplied then the callback is called for all configurations. The + pattern is an regex used match the configuration name. + + The callback will also be called if any file referenced by a configuration file is changed. + + The signature of the callback method is ``callback(config_name, action, contents)`` where `file_name` is the file + that triggered the callback, action is the action that triggered the callback, and contents are the new contents of + the configuration. Contents will be ``None`` on a `DELETE` action. All callbacks registered for `NEW` events will + be called at agent startup after all `osntart` methods have been called. Unlike pubsub subscriptions, this may be + called at any point in an agent's lifetime. + + **config.unsubscribe(callback=None, config_name_pattern=None)** - Unsubscribe from configuration changes. + Specifying a callback only will unsubscribe that callback from all config name patterns they have been bound to. + If a pattern only is specified then all callbacks bound to that pattern will be removed. Specifying both will + remove that callback from that pattern. Calling with no arguments will remove all subscriptions. + + **config.unsubscribe_all()** - Unsubscribe from all configuration changes. + + **config.set( config_name, contents, trigger_callback=False )** - Set the contents of a configuration. This may not + be called before `onstart` methods are called. This can be used by an agent to store agent state across agent + installations. This will *NOT* trigger any callbacks unless `trigger_callback` is set to `True`. To prevent + deadlock with the platform this method may not be called from a configuration callback function. Doing so will + raise a `RuntimeError` exception. + + This will not modify the local configuration cache the Agent maintains. It will send the configuration change to + the platform and rely on the subsequent `update_config` call. + + **config.delete( config_name, trigger_callback=False)** - Remove the configuration from the store. This will *NOT* + trigger any callbacks unless trigger_callback is `True`. To prevent deadlock with the platform this method may not + be called from a configuration callback function. Doing so will raise a `RuntimeError` exception. + + **config.list( )** - Returns a list of configuration names. + + **config.set_default(config_name, contents, trigger_callback=False)** - Set a default value for a configuration. + *DOES NOT* modify the platform's configuration store but creates a default configuration that is used for agent + configuration callbacks if the configuration does not exist in the store or the configuration is deleted from the + store. The callback will only be triggered if `trigger_callback` is true and the configuration store subsystem on + the agent is not aware of a configuration with that name from the platform store. + + Typically this will be called in the `__init__` method of an agent with the parsed contents of the packaged + configuration file. This may not be called from a configuration callback. Doing so will raise a `RuntimeError`. + + **config.delete_default(config_name, trigger_callback=False)** - Delete a default value for a configuration. This + method is included for for completeness and is unlikely to be used in agent code. This may not be called from a + configuration callback. Doing so will raise a `RuntimeError`. + + +Configuration Sub System RPC Methods +------------------------------------ + +These methods are made available on each agent to allow the platform to communicate changes to a configuration to the +affected agent. As these methods are not part of the exposed interface they are subject to change. + +**config.update( config_name, action, contents=None, trigger_callback=True)** - called by the platform when a +configuration was changed by some method other than the Agent changing the configuration itself. Trigger callback tells +the agent whether or not to call any callbacks associate with the configuration. + + +Notes on trigger_callback +------------------------- + +As the configuration subsystem calls all callbacks in the `onconfig` phase and none are called beforehand the +`trigger_callback` setting is effectively ignored if an agent sets a configuration or default configuration before the +end of the `onstart` phase. + + +Platform Configuration Store +============================ + +The platform configuration store handles the storage and maintenance of configuration states on the platform. + +As these methods are not part of the exposed interface they are subject to change. + + +Platform RPC Methods +-------------------- + + +Methods for Agents +^^^^^^^^^^^^^^^^^^ + +Agent methods that change configurations do not trigger any callbacks unless trigger_callback is True. + +**set_config(config_name, contents, trigger_callback=False)** - Change/create a configuration file on the platform. + +**get_configs()** - Get all of the configurations for an Agent. + +**delete_config(config_name, trigger_callback=False)** - Delete a configuration. + + +Methods for Management +^^^^^^^^^^^^^^^^^^^^^^ + +**manage_store_config(identity, config_name, contents, config_type="raw")** - Change/create a configuration on the +platform for an agent with the specified identity + +**manage_delete_config(identity, config_name)** - Delete a configuration for an agent with the specified identity. +Calls the agent's update_config with the action `DELETE_ALL` and no configuration name. + +**manage_delete_store(identity)** - Delete all configurations for a :term:`VIP Identity`. + +**manage_list_config(identity)** - Get a list of configurations for an agent with the specified identity. + +**manage_get_config(identity, config_name, raw=True)** - Get the contents of a configuration file. If raw is set to +`True` this function will return the original file, otherwise it will return the parsed representation of the file. + +**manage_list_stores()** - Get a list of all the agents with configurations. + + +Direct Call Methods +^^^^^^^^^^^^^^^^^^^ + +Services local to the platform who wish to use the configuration store may use two helper methods on the agent class +created for this purpose. This allows the auth service to use the config store before the router is started. + +**delete(self, identity, config_name, trigger_callback=False)** - Same as functionality as `delete_config`, but the +caller must specify the identity of the config store. + +**store(self, identity, config_name, contents, trigger_callback=False)** - Same functionality as set_config, but the +caller must specify the identity of the config store. + + +Command Line Interface +^^^^^^^^^^^^^^^^^^^^^^ + +The command line interface will consist of a new commands for the `volttron-ctl` program called `config` with four +sub-commands called `store`, `delete`, `list`, `get`. These commands will map directly to the management RPC functions +in the previous section. + + +Disabling the Configuration Store +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +Agents may optionally disable support for the configuration store by passing ``enable_store=False`` to the `__init__` +method of the Agent class. This allows temporary agents to not spin up the subsystem when it is not needed. Platform +service agents that do not yet support the configuration store and the temporary agents used by `volttron-ctl` will set +this value. diff --git a/docs/source/core_services/config_store/Commandline-Interface.rst b/docs/source/platform-features/config-store/commandline-interface.rst similarity index 95% rename from docs/source/core_services/config_store/Commandline-Interface.rst rename to docs/source/platform-features/config-store/commandline-interface.rst index ea9c729027..a8b3d10970 100644 --- a/docs/source/core_services/config_store/Commandline-Interface.rst +++ b/docs/source/platform-features/config-store/commandline-interface.rst @@ -1,8 +1,12 @@ -Configuration Store Command Line Tools -====================================== +.. _Commandline-Interface: + +=============================== +Config Store Command Line Tools +=============================== Command line management of the Configuration Store is done with the `vctl config` sub-commands. + Store Configuration ------------------- @@ -22,6 +26,7 @@ Optionally you may specify the file type of the file. Defaults to ``--json``. - ``--csv`` - Interpret the file as CSV. - ``--raw`` - Interpret the file as raw data. + Delete Configuration -------------------- @@ -58,6 +63,7 @@ By default this command will return the json representation of what is stored. - ``--raw`` - Return the raw version of the file. + List Configurations ------------------- @@ -94,6 +100,3 @@ The configuration must exist in the store to be edited. By default `edit` will try to open the file with the `nano` editor. The `edit` command will respect the `EDITOR` environment variable. You may override this with the `--editor` option. - - - diff --git a/docs/source/core_services/config_store/Configuration-Store.rst b/docs/source/platform-features/config-store/configuration-store.rst similarity index 62% rename from docs/source/core_services/config_store/Configuration-Store.rst rename to docs/source/platform-features/config-store/configuration-store.rst index a3bd520196..99e430abd6 100644 --- a/docs/source/core_services/config_store/Configuration-Store.rst +++ b/docs/source/platform-features/config-store/configuration-store.rst @@ -1,57 +1,60 @@ -============================ -Platform Configuration Store -============================ +.. _Configuration-Store: + +=================== +Configuration Store +=================== The Platform Configuration Store is a mechanism provided by the platform to facilitate the dynamic configuration -of agents. The Platform Configuration Store works by informing agents of changes to their configuration store and +of agents. The Platform Configuration Store works by informing agents of changes to their configuration store and the agent responding to those changes by updating any settings, subscriptions, or processes that are affected by the configuration of the Agent. -**Support for the Configuration Store is not automatically available in existing agents.** An agent must be updated -in order to support this feature. Currently only the Master Driver Agent, the Aggregation Agent, and the Actuator -Agent support the Configuration Store. Configurations and Agents -************************* +========================= Each agent has it's own configuration store (or just store). Agents are not given access to any other agent's store. The existence of a store is not dependent on the existence of an agent installed on the platform. -Each store has a unique identity. Stores are matched to agents at agent runtime via the agent's VIP IDENTITY. +Each store has a unique identity. Stores are matched to agents at agent runtime via the agent's :term:`VIP Identity`. Therefore the store for an agent is the store with the same identity as the agent's VIP IDENTITY. -When a user updates a configuration in the store the platform immediately informs the agent of the change. -The platform will not send another update until the Agent finishes processing the first. The platform -will send updates to the agent, one file at a time, in the order the changes were received. +When a user updates a configuration in the store the platform immediately informs the agent of the change. The platform +will not send another update until the Agent finishes processing the first. The platform will send updates to the +agent, one file at a time, in the order the changes were received. + Configuration Names -******************* +=================== -Every configuration in an agent's store has a unique name. When a configuration is added to an agent's store -with the same name as an existing configuration it will replace the existing configuration. The store will +Every configuration in an agent's store has a unique name. When a configuration is added to an agent's store +with the same name as an existing configuration it will replace the existing configuration. The store will remove any leading or trailing whitespace, "/", and "\\" from the name. + Configuration File Types -************************ +======================== -The configuration store will automatically parse configuration files before presenting them to an agent. Additionally, the -configuration store does support storing raw data and giving to the agent unparsed. Most Agents will require the -configuration to be parsed. Any Agent that requires raw data will specifically mention the requirement in its documentation. +The configuration store will automatically parse configuration files before presenting them to an agent. Additionally, +the configuration store does support storing raw data and giving to the agent unparsed. Most Agents will require the +configuration to be parsed. Any Agent that requires raw data will specifically mention the requirement in its +documentation. -This system removes the requirement that configuration files for an agent be in a specific format. For instance -a registry configuration for a driver may be JSON instead of CSV if that is more convenient for the user. This +This system removes the requirement that configuration files for an agent be in a specific format. For instance +a registry configuration for a driver may be JSON instead of CSV if that is more convenient for the user. This will work as long as the JSON parses into an equivalent set of objects as an appropriate CSV file. Currently the store supports parsing JSON and CSV files with support for more files types to come. + JSON ---- The store uses the same JSON parser that agents use to parse their configuration files. Therefore it supports Python style comments and must create an object or list when parsed. -.. code-block:: json +:: { "result": "PREEMPTED", #This is a comment. @@ -94,23 +97,24 @@ Is the equivalent to this JSON file: } ] + File references -*************** +=============== -The Platform Configuration Store supports referencing one configuration file from another. If a referenced -file exists the contents of that file will replace the file reference when the file is processed by the -agent. Otherwise the reference will be replaced with null (or in Python, None). +The Platform Configuration Store supports referencing one configuration file from another. If a referenced file exists +the contents of that file will replace the file reference when the file is processed by the agent. Otherwise the +reference will be replaced with null (or in Python, ``None``). -Only configurations that are parsed by the platform (currently JSON or CSV) will be examined for -references. If the file referenced is another parsed file type (JSON or CSV, currently) then the replacement -will be the parsed contents of the file, otherwise it will be the raw contents of the file. +Only configurations that are parsed by the platform (currently JSON or CSV) will be examined for references. If the +file referenced is another parsed file type (JSON or CSV, currently) then the replacement will be the parsed contents of +the file, otherwise it will be the raw contents of the file. In a JSON object the name of a value will never be considered a reference. -A file reference is any value string that starts with "config://". The rest of the string is the name -of another configuration. The configuration name is converted to lower case for comparison purposes. +A file reference is any value string that starts with ``config://``. The rest of the string is the name of another +configuration. The configuration name is converted to lower case for comparison purposes. -Consider the following configuration files named "devices/vav1.config" and "registries/vav.csv", respectively: +Consider the following configuration files named `devices/vav1.config` and `registries/vav.csv`, respectively: .. code-block:: json @@ -131,7 +135,7 @@ Consider the following configuration files named "devices/vav1.config" and "regi ReturnAirCO2,>f,FALSE,1001 ReturnAirCO2Stpt,>f,TRUE,1011 -The resulting configuration returns when an agent asks for "devices/vav1.config". +The resulting configuration returns when an agent asks for `devices/vav1.config`. .. code-block:: python @@ -159,17 +163,24 @@ The resulting configuration returns when an agent asks for "devices/vav1.config" "unit": "vav1" } -Circular references are not allowed. Adding a file that creates a circular reference will cause -that file to be rejected by the platform. +Circular references are not allowed. Adding a file that creates a circular reference will cause that file to be rejected +by the platform. If a configuration is changed in any way and that configuration is referred to by another configuration then -the agent considers the referring configuration as changed. Thus a set of configurations with references +the agent considers the referring configuration as changed. Thus a set of configurations with references can be considered one large configuration broken into pieces for the users convenience. -Multiple configurations may all reference a single configuration. For instance, when configuring drivers -in the Master Driver you may have multiple drivers reference the same registry if appropriate. +Multiple configurations may all reference a single configuration. For instance, when configuring drivers +in the Platform Driver you may have multiple drivers reference the same registry if appropriate. + Modifying the Configuration Store -********************************* +================================= + +Currently the configuration store must be modified through the command line. See +:ref:`Commandline Interface `. + +.. toctree:: -Currently the configuration store must be modified through the command line. See :doc:`Commandline-Interface`. + commandline-interface + agent-configuration-store diff --git a/docs/source/platform-features/control/agent-management-control.rst b/docs/source/platform-features/control/agent-management-control.rst new file mode 100644 index 0000000000..e61de3bcac --- /dev/null +++ b/docs/source/platform-features/control/agent-management-control.rst @@ -0,0 +1,243 @@ +.. _Agent-Control-Commands: + +====================== +Agent Control Commands +====================== + +The VOLTTRON platform has several commands for controlling the lifecycle of agents. This page discusses how to use +them, for details of operation please see :ref:`Platform Configuration ` + +.. note:: + + These examples assume the VOLTTRON environment has been activated + + .. code-block:: bash + + . env/bin/activate + + If not activating the VOLTTRON virtual environment, add "bin/" to all commands + + +Agent Packaging +=============== + +The `vpkg` command is used for packaging and configuring agents. It is not necessary to have the platform running to +use this command. The platform uses `Python Wheel `__ for its packaging and follows +the Wheel naming `convention `__. + +To create an agent package, call: + +.. code-block:: bash + + vpkg + +For instance: ``vpkg package examples/ListenerAgent`` + +The ``package`` command uses the `setup.py` in the agent directory to create the package. The name and version number +portion of the Wheel filename come from this. The resulting wheels are created at `~/.volttron/packaged`. For example: +``~/.volttron/packaged/listeneragent-3.0-py2-none-any.whl``. + + +Agent Configuration +=================== + +Agent packages are configured with: + +.. code-block:: bash + + vpkg configure + +It is suggested that this file use JSON formatting but the agent can be written to interpret any format it requires. +The configuration of a particular agent is opaque to the VOLTTRON platform. The location of the agent config file is +passed as an environmental variable `AGENT_CONFIG` which the provided utilities read in and pass to the agent. + +An example config file passing in some parameters: + +.. code-block:: json + + { + + "agentid": "listener1", + "message": "hello" + } + + +Agent Installation and Removal +============================== + +Agents are installed into the platform using: + +.. code-block:: bash + + vctl install + +When agents are installed onto a platform, it creates a uuid for that instance of an agent. This allows multiple +instances of the same agent package to be installed on the platform. + +This allows the user to refer to the agent with ``--tag `` instead of the uuid when issuing commands. This tag can +also distinguish instances of an agent from each other. + +A stopped agent can be removed with: + +- ``vctl remove `` +- ``vctl remove --tag `` +- ``vctl remove --name `` + + +.. _Agent-Tag: + +Tagging Agents +-------------- + +Agents can be tagged as they are installed with: + +``vctl install =`` + +Agents can be tagged after installation with: + +``vctl tag `` + +Agents can be "tagged" to provide a meaningful user defined way to reference the agent instead of the uuid or the name. +This allows users to differentiate between instances of agents which use the same codebase but are configured +differently. + + +Example +^^^^^^^ + +A user installs two instances of the Listener Agent, tagged with `listen1` and `listen2` respectively: + +.. code-block:: bash + + python scripts/install-agent.py -s examples/ListenerAgent --tag listener1 + python scripts/install-agent.py -s examples/ListenerAgent --tag listener2 + +``vctl status`` displays: + +.. code-block:: console + + AGENT IDENTITY TAG STATUS HEALTH + a listeneragent-3.3 listeneragent-3.3_2 listener2 + 6 listeneragent-3.3 listeneragent-3.3_1 listener1 + +Commands which operate off an agent's UUID can optionally operate off the tag by using "--tag ". This can use wildcards +to catch multiple agents at once. For example, ``vctl start --tag listener*`` will start both `listener1` and +`listener2`. + +.. warning:: + + Removal by tag and name potentially allows multiple agents to be removed at once and should be used with caution. A + "-f" option is required to delete more than one agent at a time. + + +Agent Control +============= + +Starting and Stopping an Agent +------------------------------ + +Agent that are installed in the platform can be launched with the `start` command. By default this operates off the +agent's UUID but can be used with ``--tag`` or ``--name`` to launch agents by those attributes. + +This can allow multiple agents to be started at once. For instance: ``vctl start --name myagent-0.1`` would start all +instances of that agent regardless of their uuid, tag, or configuration information. + +After an agent is started, it will show up in :ref:`Agent Status ` as "running" with a process id. + +Similarly, ``volttron-ctl stop `` can also operate off the tag and name of agent(s). After an agent is stopped, +it will show an exit code of 0 in :ref:`Agent Status ` + +Running an agent +---------------- + +For testing purposes, an agent package not installed in the platform can +be run by using: + +.. code-block:: bash + + vctl run + + +.. _Agent-Status: + +Agent Status +============ + +``vctl list`` shows the agents which have been installed on the platform along with their uuid, associated +:ref:`tag ` and :ref:`priority `. + +- `uuid` is the first column of the display and is displayed as the shorted unique portion. Using this portion, agents + can be started, stopped, removed, etc. +- `AGENT` is the "name" of this agent based on the name of the wheel file which was installed. Agents can be + controlled with this using ``--name``. + + .. note:: + + If multiple instances of a wheel are installed they will all have the same name and can be controlled as a group. + +- IDENTITY is the VIP platform identity assigned to the agent which can be used to make RPC calls, etc. with the + platform +- :ref:`TAG ` is a user provided tag which makes it simpler to track and refer to agents. ``--tag `` + can used in most agent control commands instead of the UUID to control that agent or multiple agents with a pattern. +- PRI is the priority for agents which have been "enabled" using the ``vctl enable`` command. When enabled, agents + will be automatically started in priority order along with the platform. + + +.. code-block:: console + + AGENT IDENTITY TAG PRI + a listeneragent-3.3 listeneragent-3.3_2 listener2 + 6 listeneragent-3.3 listeneragent-3.3_1 listener1 + + +The ``vctl status`` command shows the list of installed agents and whether they are running or have exited. + +.. code-block:: console + + AGENT IDENTITY TAG STATUS HEALTH + a listeneragent-3.3 listeneragent-3.3_2 listener2 running [12872] GOOD + 6 listeneragent-3.3 listeneragent-3.3_1 listener1 running [12873] GOOD + +- `AGENT`, `IDENTITY` and `TAG` are the same as in the ``vctl list`` command +- `STATUS` is the current condition of the agent. If the agent is currently executing, it has "running" and the process + id of the agent. If the agent is not running, the exit code is shown. +- `HEALTH` represents the current state of the agent. `GOOD` health is displayed while the agent is operating as + expected. If an agent enters an error state the health will display as `BAD` + +To get more information about a current agents' health one can execute + +.. code-block:: console + + # vctl health agent_uuid + vctl health a + +The above command will output json such as the following: + +.. code-block:: json + + { + "peer": "listeneragent-3.2_1", + "service_agent": false, + "connected": "2020-11-02T14:26:07.749003", + "last_heartbeat": "2020-11-02T14:26:12.762268", + "message": "GOOD" + } + +.. note:: + + When an agent sets its health it can set the message to any serializable string. + + +.. _Agent-Autostart: + +Agent Autostart +=============== + +An agent can be setup to start when the platform is started with the `enable` command. This command also allows a +priority to be set (0-100, default 50) so that agents can be started after any dependencies. This command can also be +used with the ``--tag`` or ``--name`` options. + +.. code-block:: bash + + vctl enable + diff --git a/docs/source/devguides/walkthroughs/Agent-Authentication-Walkthrough.rst b/docs/source/platform-features/control/authentication-commands.rst similarity index 62% rename from docs/source/devguides/walkthroughs/Agent-Authentication-Walkthrough.rst rename to docs/source/platform-features/control/authentication-commands.rst index 76eff66200..5fab8b7133 100644 --- a/docs/source/devguides/walkthroughs/Agent-Authentication-Walkthrough.rst +++ b/docs/source/platform-features/control/authentication-commands.rst @@ -1,10 +1,82 @@ -.. _AgentAuthentication: +.. _Agent-Authentication-Commands: -How to authenticate an agent to communicate with VOLTTRON platform: -=================================================================== +======================= +Authentication Commands +======================= -An administrator can allow an agent to communicate with VOLTTRON platform by creating an authentication record for that agent. -An authentication record is created by using :code:`vctl auth add` command and entering values to asked arguments. +All authentication sub-commands can be viewed by entering following command. + +.. code-block:: console + + vctl auth --help + +.. code-block:: console + + optional arguments: + -h, --help show this help message and exit + -c FILE, --config FILE + read configuration from FILE + --debug show tracebacks for errors rather than a brief message + -t SECS, --timeout SECS + timeout in seconds for remote calls (default: 60) + --msgdebug MSGDEBUG route all messages to an agent while debugging + --vip-address ZMQADDR + ZeroMQ URL to bind for VIP connections + + subcommands: + + add add new authentication record + add-group associate a group name with a set of roles + add-known-host add server public key to known-hosts file + add-role associate a role name with a set of capabilities + keypair generate CurveMQ keys for encrypting VIP connections + list list authentication records + list-groups show list of group names and their sets of roles + list-known-hosts list entries from known-hosts file + list-roles show list of role names and their sets of capabilities + publickey show public key for each agent + remove removes one or more authentication records by indices + remove-group disassociate a group name from a set of roles + remove-known-host remove entry from known-hosts file + remove-role disassociate a role name from a set of capabilities + serverkey show the serverkey for the instance + update updates one authentication record by index + update-group update group to include (or remove) given roles + update-role update role to include (or remove) given capabilities + remote manage pending RMQ certs and ZMQ credentials + + + +Authentication record +--------------------- + +An authentication record consist of following parameters + +.. code-block:: console + + domain []: + address []: Either a single agent identity or an array of agents identities + user_id []: Arbitrary string to identify the agent + capabilities (delimit multiple entries with comma) []: Array of strings referring to authorized capabilities defined by exported RPC methods + roles (delimit multiple entries with comma) []: + groups (delimit multiple entries with comma) []: + mechanism [CURVE]: + credentials []: Public key string for the agent + comments []: + enabled [True]: + +For more details on how to create authentication record, please see section +:ref:`Agent Authentication ` + + +.. _Agent-Authentication: + +How to authenticate an agent to communicate with VOLTTRON platform +================================================================== + +An administrator can allow an agent to communicate with VOLTTRON platform by creating an authentication record for that +agent. An authentication record is created by using :code:`vctl auth add` command and entering values to asked +arguments. .. code-block:: console @@ -206,7 +278,6 @@ auth.json file entry for the above command would be: } - Roles: ------- A role is a name for a set of capabilities. Roles can be used to grant an agent @@ -259,6 +330,7 @@ To remove a capability from a role: vctl auth update-role BUILDING_A_ADMIN TRIGGER_ALARM --remove + Groups: ------- Groups provide one more layer of *grouping*. A group is a named set of roles. @@ -285,10 +357,12 @@ such agents would implicity be granted the following capabilities: ``READ_BUILDING_A_TEMP``, ``SET_BUILDING_A_TEMP``, ``READ_BUILDLING_B_TEMP``, and ``SET_BUILDING_B_TEMP``. + Mechanism: ----------- Mechanism is the authentication method by which the agent will communicate with VOLTTRON platform. Currently VOLTTRON uses only CURVE mechanism to authenticate agents. + Credentials: ------------- @@ -310,5 +384,81 @@ TRUE of FALSE value to enable or disable the authentication record. Record will only be used if this value is True +Remote Agent Management +======================= + +The remote sub-parser allows the user to manage connections to remote platforms and agents. +This functionality is comparable to that provided by the admin webpage, and requires the +volttron instance to be web enabled. In addition, when working with RMQ based CSRs, the RMQ messagebus must be used. + +All remote sub-commands can be viewed by entering following command: + +.. code-block:: console + + vctl auth remote --help + +.. code-block:: console + + optional arguments: + -h, --help show this help message and exit + -c FILE, --config FILE + read configuration from FILE + --debug show tracebacks for errors rather than a brief message + -t SECS, --timeout SECS + timeout in seconds for remote calls (default: 60) + --msgdebug MSGDEBUG route all messages to an agent while debugging + --vip-address ZMQADDR + ZeroMQ URL to bind for VIP connections + + remote subcommands: + + list lists approved, denied, and pending certs and + credentials + approve approves pending or denied remote connection + deny denies pending or denied remote connection + delete approves pending or denied remote connection + + +The four primary actions are list, approve, deny, and delete. +List displays all remote CSRs and ZMQ credentials, their address, +and current status, either APPROVED, DENIED, or PENDING. + +.. code-block:: console + + USER_ID ADDRESS STATUS + volttron1.volttron1.platform.agent 192.168.56.101 PENDING + 917a5da0-5a85-4201-b7d8-cd8c3959f391 127.0.0.1 PENDING + +To accept a pending cert/credential, use: + +.. code-block:: console + + vctl auth remote approve + +The USER_ID can be taken directly from vctl auth remote list. + +To deny a pending cert/credential, use: + +.. code-block:: console + + vctl auth remote deny + + +Once a cert/credential has been approved or denied, the status will change. + +.. code-block:: + + USER_ID ADDRESS STATUS + volttron1.volttron1.platform.agent 192.168.56.101 APPROVED + 917a5da0-5a85-4201-b7d8-cd8c3959f391 127.0.0.1 DENIED + + +The status of an approved or denied cert is persistent. A user may deny a previously approved cert/credential, +or approve a previously denied cert/credential. However, if a cert or credential is deleted, then the remote instance +must resend the request. + +A request can be deleted using the following command: +.. code-block:: + vctl auth remote delete diff --git a/docs/source/platform-features/control/index.rst b/docs/source/platform-features/control/index.rst new file mode 100644 index 0000000000..2e2e47dd71 --- /dev/null +++ b/docs/source/platform-features/control/index.rst @@ -0,0 +1,15 @@ +.. _Control: + +================ +VOLTTRON Control +================ + +The base platform functionality focuses on the agent lifecycle, management of the platform itself, and security. This +section describes how to use the commands included with VOLTTRON to configure and control the platform, agents and +drivers. + +.. toctree:: + + platform-commands + agent-management-control + authentication-commands diff --git a/docs/source/platform-features/control/platform-commands.rst b/docs/source/platform-features/control/platform-commands.rst new file mode 100644 index 0000000000..8075b81bac --- /dev/null +++ b/docs/source/platform-features/control/platform-commands.rst @@ -0,0 +1,347 @@ +.. _Platform-Commands: + +================= +Platform Commands +================= + +VOLTTRON files for a platform instance are stored under a single directory known as the VOLTTRON home. This home +directory is set via the :term:`VOLTTRON_HOME` environment variable and defaults to ``~/.volttron``. Multiple instances +of the platform may exist under the same account on a system by setting the `VOLTTRON_HOME` environment variable +appropriately before executing VOLTTRON commands. + +VOLTTRON's configuration file uses a modified INI format where section names are command names for which the settings in +the section apply. Settings before the first section are considered global and will be used by all commands for which +the settings are valid. Settings keys are long options (with or without the opening "--") and are followed by a colon +(``:``) or equal (``=``) and then the value. Boolean options need not include the separator or value, but may specify a +value of ``1``, ``yes``, or ``true`` for `true` or ``0``, ``no``, or ``false`` for `false`. + +It is best practice to use the :ref:`vcfg command ` prior to starting VOLTTRON for the first time to +populate the configuration file for your deployment. If VOLTTRON is started without having run `vcfg`, a default config +will be created in `$VOLTTRON_HOME/config`. The following is an example configuration after running `vcfg`: + +.. code-block:: + + [volttron] + message-bus = rmq + instance-name = volttron1 + vip-address = tcp://127.0.0.1:22916 + bind-web-address = https://:8443 + volttron-central-address = https://:8443 + +where: + +* **message-bus** - Indicates message bus to be used. Valid values are ``zmq`` and ``rmq`` +* **instance-name** - Name of the VOLTTRON instance. This has to be unique if multiple instances need to be connected + together +* **vip-address** - :term:`VIP address` of the VOLTTRON instance. It contains the IP address and port number (default + port number is 22916) +* **bind-web-address** - Optional parameter, only needed if VOLTTRON instance needs a web interface +* **volttron-central-address** - Optional parameter. Web address of VOLTTRON Central agent + +.. note:: + + + + .. code-block:: bash + + env/bin/volttron -c -l volttron.log & + +Below is a compendium of commands which can be used to operate the VOLTTRON Platform from the command line interface. + + +VOLTTRON Platform Command +========================= + +The main VOLTTRON platform command is ``volttron``, however this command is seldom run as-is. In most cases the user +will want to run the platform in the background. In a limited number of cases, the user will wish to enable verbose +logging. A typical command to start the platform is: + +.. note:: + + * All commands and sub-commands have help available with ``-h`` or ``--help`` + * Additional configuration files may be specified with ``-c`` or ``-config`` + * To specify a log file, use ``-l`` or ``--log`` + * The ampersand (``&``) can be added to then end of the command to run the platform in the background, freeing the + open shell to be used for additional commands. + +.. code-block:: bash + + volttron -vv -l volttron.log & + + +volttron Optional Arguments +--------------------------- + +- **-c FILE, --config FILE** - Start the platform using the configuration from the provided FILE +- **-l FILE, --log FILE** - send log output to FILE instead of standard output/error +- **-L FILE, --log-config FILE** - Use the configuration from FILE for VOLTTRON platform logging +- **--log-level LOGGER:LEVEL** - override default logger logging level (`INFO`, `DEBUG`, `WARNING`, `ERROR`, `CRITICAL`, + `NOTSET`) +- **--monitor** - monitor and log connections (implies verbose logging mode ``-v``) +- **-q, --quiet** - decrease logger verboseness; may be used multiple times to further reduce logging (i.e. ``-qq``) +- **-v, --verbose** - increase logger verboseness; may be used multiple times (i.e. ``-vv``) +- **--verboseness LEVEL** - set logger verboseness level +- **-h, --help** - show this help message and exit +- **--version** - show program's version number and exit +- **--message-bus MESSAGE_BUS** - set message bus to be used. valid values are ``zmq`` and ``rmq`` + +.. note:: + + Visit the Python 3 logging documentation for more information about + `logging and verboseness levels `_. + + +Agent Options +------------- + +- **--autostart** - automatically start enabled agents and services after platform startup +- **--vip-address ZMQADDR** - ZeroMQ URL to bind for VIP connections +- **--vip-local-address ZMQADDR** - ZeroMQ URL to bind for local agent VIP connections +- **--bind-web-address BINDWEBADDR** - Bind a web server to the specified ip:port passed +- **--web-ca-cert CAFILE** - If using self-signed certificates, this variable will be set globally to allow requests to + be able to correctly reach the webserver without having to specify verify in all calls. +- **--web-secret-key WEB_SECRET_KEY** - Secret key to be used instead of HTTPS based authentication. +- **--web-ssl-key KEYFILE** - SSL key file for using https with the VOLTTRON server +- **--web-ssl-cert CERTFILE** - SSL certificate file for using https with the VOLTTRON server +- **--volttron-central-address VOLTTRON_CENTRAL_ADDRESS** - The web address of a VOLTTRON Central install instance. +- **--volttron-central-serverkey VOLTTRON_CENTRAL_SERVERKEY** - The server key of the VOLTTRON Central being connected + to. +- **--instance-name INSTANCE_NAME** - The name of the instance that will be reported to VOLTTRON Central. +- **--msgdebug** - Route all messages to an instance of the MessageDebug agent while debugging. +- **--setup-mode** - Setup mode flag for setting up authorization of external platforms. +- **--volttron-central-rmq-address VOLTTRON_CENTRAL_RMQ_ADDRESS** - The AMQP address of a VOLTTRON Central install + instance +- **--agent-monitor-frequency AGENT_MONITOR_FREQUENCY** - How often should the platform check for crashed agents + and attempt to restart. Units=seconds. Default=600 +- **--secure-agent-users SECURE_AGENT_USERS** - Require that agents run with their own users (this requires running + scripts/secure_user_permissions.sh as sudo) + +.. warning:: + + Certain options alter some basic behaviors of the platform, such as `--secure-agent-users` which causes the platform + to run each agent using its own Unix user to spawn the process. Please view the documentation for each feature to + understand its implications before choosing to run the platform in that fashion. + + +volttron-ctl Commands +===================== + +`volttron-ctl` is used to issue commands to the platform from the command line. Through `volttron-ctl` it is possible +to install and removed agents, start and stop agents, manage the configuration store, get the platform status, and +shutdown the platform. + +In more recent versions of VOLTTRON, the commands `vctl`, `vpkg`, and `vcfg` have been added to be used as a stand-in +for `volttron-ctl`, `volttron-pkg`, and `volttron-cfg` in the CLI. The VOLTTRON documentation will often use this +convention. + +.. warning:: + + `vctl` creates a special temporary agent to communicate with the platform with a specific :term:`VIP Identity`, thus + multiple instances of `vctl` cannot run at the same time. Attempting to do so will result in a conflicting + identity error. + +Use `vctl` with one or more of the following arguments, or below sub-commands: + + +vctl Optional Arguments +----------------------- + +- **-c FILE, --config FILE** - Start the platform using the configuration from the provided FILE +- **--debug** - show tracebacks for errors rather than a brief message +- **-t SECS, --timeout SECS** - timeout in seconds for remote calls (default: 60) +- **--msgdebug MSGDEBUG** - route all messages to an agent while debugging +- **--vip-address ZMQADDR** - ZeroMQ URL to bind for VIP connections +- **-l FILE, --log FILE** - send log output to FILE instead of standard output/error +- **-L FILE, --log-config FILE** - Use the configuration from FILE for VOLTTRON platform logging +- **-q, --quiet** - decrease logger verboseness; may be used multiple times to further reduce logging (i.e. ``-qq``) +- **-v, --verbose** - increase logger verboseness; may be used multiple times (i.e. ``-vv``) +- **--verboseness LEVEL** - set logger verboseness level (this level is a numeric level co +- **--json** - format output to json +- **-h, --help** - show this help message and exit + + +Commands +-------- + +- **install** - install an agent from wheel or from an agent package directory + + .. note:: + + If --agent-config is not specified then a default config, config.json or config.yml file in the agent + directory will be used as configuration. If none present then no configuration file will be loaded. + +- **tag AGENT TAG** - set, show, or remove agent tag for a particular agent +- **remove AGENT** - disconnect specified agent from the platform and remove its installed agent package from `VOLTTRON_HOME` +- **peerlist** - list the peers connected to the platform +- **list** - list installed agents +- **status** - show status of installed agents +- **health AGENT** - show agent health as JSON +- **clear** - clear status of defunct agents +- **enable AGENT** - enable agent to start automatically +- **disable AGENT** - prevent agent from start automatically +- **start AGENT** - start installed agent +- **stop AGENT** - stop agent +- **restart AGENT** - restart agent +- **run PATH** - start any agent by path +- **upgrade AGENT WHEEL** - upgrade agent from wheel file + + .. note:: + + Does *NOT* upgrade agents from the agent's code directory, requires agent wheel file. + +- **rpc** - rpc controls +- **certs OPTIONS** - manage certificate creation +- **auth OPTIONS** - manage authorization entries and encryption keys +- **config OPTIONS** - manage the platform configuration store +- **shutdown** - stop all agents (providing the `--platform` optional argument causes the platform to be shutdown) +- **send WHEEL** - send agent and start on a remote platform +- **stats** - manage router message statistics tracking +- **rabbitmq OPTIONS** - manage rabbitmq + +.. note:: + + For each command with `OPTIONS` in the description, additional options are required to make use of the command. For + each, please visit the corresponding section of documentation. + + * :ref:`Auth ` + * :ref:`Certs ` + * :ref:`Config ` + * :ref:`RPC ` + +.. note:: + + Visit the Python 3 logging documentation for more information about + `logging and verboseness levels `_. + + +.. _VCTL-Auth-Commands: + +vctl auth Subcommands +^^^^^^^^^^^^^^^^^^^^^ + +- **add** - add new authentication record +- **add-group** - associate a group name with a set of roles +- **add-known-host** - add server public key to known-hosts file +- **add-role** - associate a role name with a set of capabilities +- **keypair** - generate CurveMQ keys for encrypting VIP connections +- **list** - list authentication records +- **list-groups** - show list of group names and their sets of roles +- **list-known-hosts** - list entries from known-hosts file +- **list-roles** - show list of role names and their sets of capabilities +- **publickey** - show public key for each agent +- **remove** - removes one or more authentication records by indices +- **remove-group** - disassociate a group name from a set of roles +- **remove-known-host** - remove entry from known-hosts file +- **remove-role** - disassociate a role name from a set of capabilities +- **serverkey** - show the serverkey for the instance +- **update** - updates one authentication record by index +- **update-group** - update group to include (or remove) given roles +- **update-role** - update role to include (or remove) given capabilities + + +.. _VCTL-Certs-Commands: + +vctl certs Subcommands +^^^^^^^^^^^^^^^^^^^^^^ + +- **create-ssl-keypair** - create a SSL keypair +- **export-pkcs12** - create a PKCS12 encoded file containing private and public key from an agent. This function is + may also be used to create a Java key store using a p12 file. + + +.. _VCTL-Config-Commands: + +vctl config Subcommands +^^^^^^^^^^^^^^^^^^^^^^^ + +- **store AGENT CONFIG_NAME CONFIG PATH** - store a configuration file in agent's config store (parses JSON by default, + use `--csv` for CSV files) +- **edit AGENT CONFIG_NAME** - edit a configuration. (opens nano by default, respects EDITOR env variable) +- **delete AGENT CONFIG_NAME** - delete a configuration from agent's config store (`--all` removes all configs for the + agent) +- **list AGENT** - list stores or configurations in a store +- **get AGENT CONFIG_NAME** - get the contents of a configuration + + +.. _VCTL-RPC-Commands: + +vctl rpc Subcommands +^^^^^^^^^^^^^^^^^^^^ + +- **code** - shows how to use RPC call in other agents +- **list** - lists all agents and their RPC methods + + +vpkg Commands +============= + +`vpkg` is the VOLTTRON command used to manage agent packages (code directories and wheel files) including creating +initializing new agent code directories, creating agent wheels, etc. + + +vpkg Optional Arguments +----------------------- + +- **-h, --help** - show this help message and exit +- **-l FILE, --log FILE** - send log output to FILE instead of standard output/error +- **-L FILE, --log-config FILE** - Use the configuration from FILE for VOLTTRON platform logging +- **-q, --quiet** - decrease logger verboseness; may be used multiple times to further reduce logging (i.e. ``-qq``) +- **-v, --verbose** - increase logger verboseness; may be used multiple times (i.e. ``-vv``) +- **--verboseness LEVEL** - set logger verboseness level + + +Subcommands +----------- + +- **package** - Create agent package (whl) from a directory +- **init** - Create new agent code package from a template. Will prompt for additional metadata. +- **repackage** - Creates agent package from a currently installed agent. +- **configure** - Add a configuration file to an agent package + + +volttron-cfg Commands +===================== + +`volttron-cfg` (`vcfg`) is a tool aimed at making it easier to get up and running with VOLTTRON and a handful of agents. +Running the tool without any arguments will start a *wizard* with a walk through for setting up instance configuration +options and available agents. If only individual agents need to be configured they can be listed at the command line. + +.. note:: + + For a detailed description of the VOLTTRON configuration file and `vcfg` wizard, as well as example usage, view the + :ref:`platform configuration ` docs. + +vcfg Optional Arguments +----------------------- + +- **-h, --help** - show this help message and exit +- **-v, --verbose** - increase logger verboseness; may be used multiple times (i.e. ``-vv``) +- **--vhome VHOME** Path to volttron home +- **--instance-name INSTANCE_NAME** + Name of this volttron instance +- **--list-agents** - list configurable agents + + .. code-block:: console + + Agents available to configure: + listener + platform_driver + platform_historian + vc + vcp + +- **--agent AGENT [AGENT ...]** - configure listed agents +- **--rabbitmq RABBITMQ [RABBITMQ ...]** - Configure RabbitMQ for single instance, federation, or shovel either based on + configuration file in YML format or providing details when prompted. Usage: + + .. code-block:: bash + + vcfg --rabbitmq single|federation|shovel [rabbitmq config file] + +- **--secure-agent-users** Require that agents run with their own users (this requires running + scripts/secure_user_permissions.sh as sudo) + + .. warning:: + + The secure agent users significantly changes the operation of agents on the platform, please read the + :ref:`secure agent users ` documentation before using this feature. diff --git a/docs/source/platform-features/message-bus/index.rst b/docs/source/platform-features/message-bus/index.rst new file mode 100644 index 0000000000..12bfdcc795 --- /dev/null +++ b/docs/source/platform-features/message-bus/index.rst @@ -0,0 +1,26 @@ +.. _Message-Bus: + +=========== +Message Bus +=========== + +The VOLTTRON message bus is the mechanism responsible for enabling communication between agents, drivers, and platform +instances. The message bus supports communication using the :ref:`Publish/Subscribe Paradigm ` and +:ref:`JSON RPC `. +Currently VOLTTRON may be configured to use either Zero MQ or RabbitMQ messaging software to perform messaging. + +To standardize message bus communication, VOLTTRON implements VIP - VOLTTRON Interconnect Protocol. VIP defines +patterns for pub/sub communication as well as JSON-RPC, and allows for the creation of agent communication subsystems. + +For more information on messaging, VIP, multi-platform communication and more, please explore the message bus +documentation linked below: + + +.. toctree:: + :caption: Message Bus Topics + + topics + vip/vip-overview + rabbitmq/rabbitmq-overview + multi-platform/multi-platform-communication + diff --git a/docs/source/platform-features/message-bus/multi-platform/images/csr-approve.png b/docs/source/platform-features/message-bus/multi-platform/images/csr-approve.png new file mode 100644 index 0000000000..fbc359a255 Binary files /dev/null and b/docs/source/platform-features/message-bus/multi-platform/images/csr-approve.png differ diff --git a/docs/source/platform-features/message-bus/multi-platform/images/csr-collector-forwarder-approved.png b/docs/source/platform-features/message-bus/multi-platform/images/csr-collector-forwarder-approved.png new file mode 100644 index 0000000000..a4eeb35997 Binary files /dev/null and b/docs/source/platform-features/message-bus/multi-platform/images/csr-collector-forwarder-approved.png differ diff --git a/docs/source/platform-features/message-bus/multi-platform/images/csr-collector-forwarder-request.png b/docs/source/platform-features/message-bus/multi-platform/images/csr-collector-forwarder-request.png new file mode 100644 index 0000000000..5d38932525 Binary files /dev/null and b/docs/source/platform-features/message-bus/multi-platform/images/csr-collector-forwarder-request.png differ diff --git a/docs/source/platform-features/message-bus/multi-platform/images/csr-collector-vcp-approve.png b/docs/source/platform-features/message-bus/multi-platform/images/csr-collector-vcp-approve.png new file mode 100644 index 0000000000..5e4a414da6 Binary files /dev/null and b/docs/source/platform-features/message-bus/multi-platform/images/csr-collector-vcp-approve.png differ diff --git a/docs/source/platform-features/message-bus/multi-platform/images/csr-collector-vcp-request.png b/docs/source/platform-features/message-bus/multi-platform/images/csr-collector-vcp-request.png new file mode 100644 index 0000000000..423283bee1 Binary files /dev/null and b/docs/source/platform-features/message-bus/multi-platform/images/csr-collector-vcp-request.png differ diff --git a/docs/source/platform-features/message-bus/multi-platform/images/csr-no-requests-page.png b/docs/source/platform-features/message-bus/multi-platform/images/csr-no-requests-page.png new file mode 100644 index 0000000000..0178e6f7ba Binary files /dev/null and b/docs/source/platform-features/message-bus/multi-platform/images/csr-no-requests-page.png differ diff --git a/docs/source/platform-features/message-bus/multi-platform/images/csr-request.png b/docs/source/platform-features/message-bus/multi-platform/images/csr-request.png new file mode 100644 index 0000000000..9bbc516fec Binary files /dev/null and b/docs/source/platform-features/message-bus/multi-platform/images/csr-request.png differ diff --git a/docs/source/setup/RabbitMQ/images/node-rmq-central-vcfg.png b/docs/source/platform-features/message-bus/multi-platform/images/node-rmq-central-vcfg.png similarity index 100% rename from docs/source/setup/RabbitMQ/images/node-rmq-central-vcfg.png rename to docs/source/platform-features/message-bus/multi-platform/images/node-rmq-central-vcfg.png diff --git a/docs/source/setup/RabbitMQ/images/node-rmq-collector2-vcfg.png b/docs/source/platform-features/message-bus/multi-platform/images/node-rmq-collector2-vcfg.png similarity index 100% rename from docs/source/setup/RabbitMQ/images/node-rmq-collector2-vcfg.png rename to docs/source/platform-features/message-bus/multi-platform/images/node-rmq-collector2-vcfg.png diff --git a/docs/source/setup/RabbitMQ/images/node-zmq-collector1-vcfg.png b/docs/source/platform-features/message-bus/multi-platform/images/node-zmq-collector1-vcfg.png similarity index 100% rename from docs/source/setup/RabbitMQ/images/node-zmq-collector1-vcfg.png rename to docs/source/platform-features/message-bus/multi-platform/images/node-zmq-collector1-vcfg.png diff --git a/docs/source/platform-features/message-bus/multi-platform/images/zmq_pending_credential_1_approved.png b/docs/source/platform-features/message-bus/multi-platform/images/zmq_pending_credential_1_approved.png new file mode 100644 index 0000000000..443aa3f755 Binary files /dev/null and b/docs/source/platform-features/message-bus/multi-platform/images/zmq_pending_credential_1_approved.png differ diff --git a/docs/source/core_services/multiplatform/Multiplatform-Communication.rst b/docs/source/platform-features/message-bus/multi-platform/multi-platform-communication.rst similarity index 62% rename from docs/source/core_services/multiplatform/Multiplatform-Communication.rst rename to docs/source/platform-features/message-bus/multi-platform/multi-platform-communication.rst index 51eda65b2f..a6c8d55df4 100644 --- a/docs/source/core_services/multiplatform/Multiplatform-Communication.rst +++ b/docs/source/platform-features/message-bus/multi-platform/multi-platform-communication.rst @@ -5,36 +5,40 @@ Multi-Platform Communication ============================ To connect to remote VOLTTRON platforms, we would need platform discovery information of the remote platforms. This -information contains the platform name, VIP address and serverkey of the remote platforms and we need to provide this -as part of Multiplatform configuration. +information contains the platform name, :term:`VIP` address and `serverkey` of the remote platforms and we need to +provide this as part of multi-platform configuration. + Configuration ************* The configuration and authentication for multi-platform connection can be setup either manually or by running the -platforms in set up mode. Both the setups are described below. +platforms in set up mode. Both the setups are described below. + Setup Mode For Automatic Authentication *************************************** -.. note:: It is necessary for each platform to have a web server if running in setup mode. +.. note:: + + It is necessary for each platform to have a web server if running in setup mode. For ease of use and to support multi-scale deployment, the process of obtaining the platform discovery information and -authenticating the new platform connection is automated. We can now bypass the manual process of adding auth keys (i.e., -either by using the volttron-ctl utility or directly updating the auth.json config file). +authenticating the new platform connection is automated. We can now bypass the manual process of adding auth keys +(i.e., either by using the `volttron-ctl` utility or directly updating the `auth.json` config file). -A config file containing list of web addresses (one for each platform) need to be made available in VOLTTRON_HOME +A config file containing list of web addresses (one for each platform) need to be made available in :term:`VOLTTRON_HOME` directory. -Name of the file: external_address.json +Name of the file: `external_address.json` Directory path: Each platform’s VOLTTRON_HOME directory. -For example: /home/volttron/.volttron1 +For example: `/home/volttron/.volttron1` Contents of the file: -.. code-block:: json +:: [ "http://:", @@ -46,24 +50,23 @@ Contents of the file: We then start each VOLTTRON platform with setup mode option in this way. - :: - - volttron -vv -l volttron.log --setup-mode& +.. code-block:: bash + volttron -vv -l volttron.log --setup-mode& Each platform will obtain the platform discovery information of the remote platform that it is trying to connect through a HTTP discovery request and store the information in a configuration file -($VOLTTRON_HOME/external_platform_discovery.json). It will then use the VIP address and serverkey to connect to the -remote platform. The remote platform shall authenticate the new connection and store the auth keys (public key) of the -connecting platform for future use. +(`$VOLTTRON_HOME/external_platform_discovery.json`). It will then use the :term:`VIP address` and `serverkey` to connect +to the remote platform. The remote platform shall authenticate the new connection and store the auth keys (public key) +of the connecting platform for future use. -The platform discovery information will be stored in VOLTTRON_HOME directory and looks like below: +The platform discovery information will be stored in `VOLTTRON_HOME` directory and looks like below: -Name of config file: external_platform_discovery.json +Name of config file: `external_platform_discovery.json` Contents of the file: -.. code-block:: json +:: {"": {"vip-address":"tcp://:", "instance-name":"", @@ -85,24 +88,23 @@ Each platform will use this information for future connections. Once the keys have been exchanged and stored in the auth module, we can restart all the VOLTTRON platforms in normal mode. - :: - - ./stop-volttron +.. code-block:: bash - ./start-volttron + ./stop-volttron + ./start-volttron Manual Configuration of External Platform Information ***************************************************** -Platform discovery configuration file can also be built manually and it needs to be added inside VOLTTRON_HOME directory -of each platform. +Platform discovery configuration file can also be built manually and it needs to be added inside `VOLTTRON_HOME` +directory of each platform. -Name of config file: external_platform_discovery.json +Name of config file: `external_platform_discovery.json` Contents of the file: -.. code-block:: json +:: {"": {"vip-address":"tcp://:", "instance-name":"", @@ -120,12 +122,24 @@ Contents of the file: } With this configuration, platforms can be started in normal mode. - :: - ./start-volttron +.. code-block:: bash + + ./start-volttron For external platform connections to be authenticated, we would need to add the credentials of the connecting platforms -in each platform using the volttron-ctl auth utility. For more details -:ref:`Agent authentication walkthrough `. +in each platform using the `volttron-ctl auth` utility. For more details +:ref:`Agent authentication walk-through `. + +.. seealso:: + + :ref:`Multi-Platform Walk-through ` + + +.. toctree:: + :caption: Multi-platform Message Bus Topics -.. seealso:: :ref:`Multi-Platform Walkthrough ` + pubsub-remote-platforms + multi-platform-rpc + multi-platform-rabbit/multi-platform-rabbitmq + multi-platform-rabbit/agent-communication-rabbitmq diff --git a/docs/source/setup/RabbitMQ/remote_agent.rst b/docs/source/platform-features/message-bus/multi-platform/multi-platform-rabbit/agent-communication-rabbitmq.rst similarity index 67% rename from docs/source/setup/RabbitMQ/remote_agent.rst rename to docs/source/platform-features/message-bus/multi-platform/multi-platform-rabbit/agent-communication-rabbitmq.rst index 15480ed1e7..30d9f4a0cf 100644 --- a/docs/source/setup/RabbitMQ/remote_agent.rst +++ b/docs/source/platform-features/message-bus/multi-platform/multi-platform-rabbit/agent-communication-rabbitmq.rst @@ -1,4 +1,4 @@ -.. _Connecting_to_remote_RMQ: +.. _Agent-Communication-to-Remote-RabbitMQ: =============================================== Agent communication to Remote RabbitMQ instance @@ -6,10 +6,10 @@ Agent communication to Remote RabbitMQ instance Communication between two RabbitMQ based VOLTTRON instances must be done using SSL certificate based authentication. Non SSL based authentication will not be supported for communication to remote RabbitMQ based VOLTTRON instances. -An volttron instance that wants to communicate with a remote instance should first request a SSL certificate that is -signed by the remote instance. To facilitate this process there will be a web based server api for requesting, listing, -approving and denying certificate requests. This api will be exposed via the MasterWebService and will be available -to any RabbitMQ based VOLTTRON instance with ssl enabled. This api will be tested and used in the following agents: +A VOLTTORN instance that wants to communicate with a remote instance should first request a SSL certificate that is +signed by the remote instance. To facilitate this process there will be a web based server API for requesting, listing, +approving and denying certificate requests. This api will be exposed via the PlatformWebService and will be available +to any RabbitMQ based VOLTTRON instance with SSL enabled. This API will be tested and used in the following agents: - ForwarderAgent - DataPuller @@ -17,15 +17,15 @@ to any RabbitMQ based VOLTTRON instance with ssl enabled. This api will be test For the following document we will assume we have two instances a local-instance and remote-volttron-instance. The remote-volttron-instance will be configured to allow certificate requests to be sent to it from the -local-instance. A remote-agent running in local-instance will attempt to establish a connection to the +local-instance. A remote-agent running in local-instance will attempt to establish a connection to the remote-volttron-instance Configuration -------------- +============= Both volttron-server and volttron-client must be configured for RabbitMQ message bus with SSL using the step described -at :ref:`Installing Volttron`. +at :ref:`Installing Volttron `. In addition the remote-volttron-instance configuration file must have a https bind-web-address specified in the instance config file. Below is an example config file with bind-web-address. Restart volttron after editing the config @@ -39,7 +39,7 @@ file bind-web-address = https://volttron1:8443 instance-name = volttron1 -By default the `bind-web-address` parameter will use the `MasterWebService` agent's certificate and private key. +By default the `bind-web-address` parameter will use the PlatformWebService agent's certificate and private key. Both private and public key are necessary in order to bind the port to the socket for incoming connections. This key pair is auto generated for RabbitMQ based VOLTTRON at the time of platform startup. Users can provide a different certificate and private key to be used for the bind-web-address by specifying web-ssl-cert and web-ssl-key in the @@ -60,9 +60,9 @@ config file. Below is an example config file with the additional entries - The `/etc/hosts` file should be modified in order for the dns name to be used for the bound address. remote-agent on local-instance ------------------------------- +============================== -The `auth` subsystem of the volttron architecture is how a remote-agent on local instnace will connect to the remote +The `auth` subsystem of the volttron architecture is how a remote-agent on local instance will connect to the remote volttron instance. The following is a code snippet from the remote-agent to connect to the remote volttron instance. @@ -73,31 +73,33 @@ The following is a code snippet from the remote-agent to connect to the remote v value = self.vip.auth.connect_remote_platform(address) The above function call will return an agent that connects to the remote instance only after the request is approved -by an adminstrator of the remote instance. It is up to the agent to repeat calling `connect_remote_platform` +by an administrator of the remote instance. It is up to the agent to repeat calling `connect_remote_platform` periodically until an agent object is obtained. + Approving a CSR Request -~~~~~~~~~~~~~~~~~~~~~~~ +----------------------- The following diagram shows the sequence of events when an access request is approved by the administrator of remote -volttron instance. In this case, the volttron-client agent will get a Agent object that is connected to the -remote instance. The diagram shows the client agent repeating the call to connect_remote_platform until the return value -is not None. +volttron instance. In this case, the volttron-client agent will get a Agent object that is connected to the +remote instance. The diagram shows the client agent repeating the call to connect_remote_platform until the return +value is not None. |CSR Approval| + Denying a CSR Request -~~~~~~~~~~~~~~~~~~~~~~~~ +--------------------- The following diagram shows the sequence of events when an access request is denied by the administrator. The client -agent repeats the call to connect_remote_platform until the return value is not None. When the remote instance's +agent repeats the call to connect_remote_platform until the return value is not None. When the remote instance's administrator denies a access request, the auth subsystem will raise an alert and shutdown the agent. |CSR Denied| -.. |CSR Approval| image:: images/csr-sequence-approval.png -.. |CSR Denied| image:: images/csr-sequence-deny.png +.. |CSR Approval| image:: files/csr-sequence-approval.png +.. |CSR Denied| image:: files/csr-sequence-deny.png -Follow walk-through in :ref:`Multi-Platform Multi-Bus Walk-through <_Multi_Platform_Walkthrough>` for setting up different -combinations of multi-bus multi-platform setup using CSR. +Follow walk-through in :ref:`Multi-Platform Multi-Bus Walk-through ` for setting up +different combinations of multi-bus multi-platform setup using CSR. diff --git a/docs/source/platform-features/message-bus/multi-platform/multi-platform-rabbit/files/csr-sequence-approval.png b/docs/source/platform-features/message-bus/multi-platform/multi-platform-rabbit/files/csr-sequence-approval.png new file mode 100644 index 0000000000..cf32895d02 Binary files /dev/null and b/docs/source/platform-features/message-bus/multi-platform/multi-platform-rabbit/files/csr-sequence-approval.png differ diff --git a/docs/source/platform-features/message-bus/multi-platform/multi-platform-rabbit/files/csr-sequence-deny.png b/docs/source/platform-features/message-bus/multi-platform/multi-platform-rabbit/files/csr-sequence-deny.png new file mode 100644 index 0000000000..6f7a5d4396 Binary files /dev/null and b/docs/source/platform-features/message-bus/multi-platform/multi-platform-rabbit/files/csr-sequence-deny.png differ diff --git a/docs/source/core_services/messagebus_refactor/files/federation.png b/docs/source/platform-features/message-bus/multi-platform/multi-platform-rabbit/files/federation.png similarity index 100% rename from docs/source/core_services/messagebus_refactor/files/federation.png rename to docs/source/platform-features/message-bus/multi-platform/multi-platform-rabbit/files/federation.png diff --git a/docs/source/platform-features/message-bus/multi-platform/multi-platform-rabbit/multi-platform-rabbitmq.rst b/docs/source/platform-features/message-bus/multi-platform/multi-platform-rabbit/multi-platform-rabbitmq.rst new file mode 100644 index 0000000000..1bfb954a24 --- /dev/null +++ b/docs/source/platform-features/message-bus/multi-platform/multi-platform-rabbit/multi-platform-rabbitmq.rst @@ -0,0 +1,139 @@ +.. _Multi-Platform-RabbitMQ: + +============================ +Distributed RabbitMQ Brokers +============================ + +RabbitMQ allows multiple distributed RabbitMQ brokers to be connected in three different ways - with clustering, with +federation and using shovel. We take advantage of these built-in plugins for multi-platform VOLTTRON communication. For +more information about the differences between clustering, federation, and shovel, please refer to the RabbitMQ +documentation on `Distributed RabbitMQ brokers `_. + +Clustering +---------- + +Clustering connects multiple brokers residing in multiple machines to form a single logical broker. It is used in +applications where tight coupling is necessary i.e, where each node shares the data and knows the state of all other +nodes in the cluster. A new node can connect to the cluster through a peer discovery mechanism if configured to do so +in the RabbitMQ config file. For all the nodes to be connected together in a cluster, it is necessary for them to share +the same Erlang cookie and be reachable through it's DNS hostname. A client can connect to any one of the nodes in the +cluster and perform any operation (to send/receive messages from other nodes etc.), the nodes will route the operation +internally. In case of a node failure, clients should be able to reconnect to a different node, recover their topology +and continue operation. + +.. note:: + + This feature is not integrated into VOLTTRON, but we hope to support it in the future. For more detailed + information about clustering, please refer to RabbitMQ documentation on the + `Clustering plugin `_. + + +.. _RabbitMQ-Federation: + +Federation +---------- +Federation plugin is used in applications that does not require as much of tight coupling as clustering. Federation has +several useful features: + +* Loose coupling - The federation plugin can transmit messages between brokers (or clusters) in different administrative + domains: + + * they may have different users and virtual hosts; + * they may run on different versions of RabbitMQ and Erlang. + +* WAN friendliness - They can tolerate network intermittent connectivity. + +* Specificity - Not everything needs to be federated ( made available to other brokers ); There can be local-only + components. + +* Scalability - Federation does not require O(n2) connections for *n* brokers, so it scales better. + +The federation plugin allows you to make exchanges and queues *federated*. A federated exchange or queue can receive +messages from one or more upstreams (remote exchanges and queues on other brokers). A federated exchange can route +messages published upstream to a local queue. A federated queue lets a local consumer receive messages from an upstream +queue. + +Before we move forward, let's define upstream and downstream servers. + +* Upstream server - The node that is publishing some message of interest +* Downstream server - The node connected to a different broker that wants to receive messages from the upstream server + +A federation link needs to be established from downstream server to the upstream server. The data flows in single +direction from upstream server to downstream server. For bi-directional data flow, we would need to create federation +links on both the nodes. + +We can receive messages from upstream server to downstream server by either making an exchange or a queue *federated*. + +For more detailed information about federation, please refer to RabbitMQ documentation +`Federation plugin `_. + + +Federated Exchange +------------------ + +When we make an exchange on the downstream server *federated*, the messages published to the upstream exchanges are +copied to the federated exchange, as though they were published directly to it. + +.. image:: files/federation.png + +The above figure explains message transfer using federated exchange. The box on the right acts as the downstream server +and the box on the left acts as the upstream server. A federation/upstream link is established between the downstream +server and the upstream server by using the federation management plugin. + +An exchange on the downstream server is made *federated* using federation policy configuration. The federated exchange +only receives the messages for which it has subscribed. An upstream queue is created on the upstream server with a +binding key same as subscription made on the federated exchange. For example, if an upstream server is publishing +messages with binding key "foo" and a client on the downstream server is interested in receiving messages of the +binding key "foo", then it creates a queue and binds the queue to the federated with the same binding key. This binding +is sent to the upstream and the upstream queue binds to the upstream exchange with that key. + +Publications to either exchange may be received by queues bound to the federated exchange, but publications +directly to the federated exchange cannot be received by queues bound to the upstream exchange. + +For more information about federated exchanges and different federation topologies, please read about +`Federated Exchanges `_. + + +Federated Queue +--------------- + +Federated queue provides a way of balancing load of a single queue across nodes or clusters. A federated queue lets a +local consumer receive messages from an upstream queue. A typical use would be to have the same "logical" queue +distributed over many brokers. Such a logical distributed queue is capable of having higher capacity than a single +queue. A federated queue links to other upstream queues. + +A federation or upstream link needs to be created like before and a federated queue needs to be setup on the downstream +server using federation policy configuration. The federated queue will only retrieve messages when it has run out of +messages locally, it has consumers that need messages, and the upstream queue has "spare" messages that are not being +consumed. + +For more information about federated queues, please read about +`Federated Queues `_. + + +.. _RabbitMQ-Shovel: + +Shovel +------ +The Shovel plugin allows you to reliably and continually move messages from a source in one +broker to destination in another broker. A shovel behaves like a well-written client application in that it: + +* connects to it's source and destination broker +* consumes messages from the source queue +* re-publishes messages to the destination if the messages match the routing key. + +The Shovel plugin uses an Erlang client under the hood. In the case of shovel, apart from configuring the hostname, +port and virtual host of the remote node, we will also have to provide a list of routing keys that we want to forward to +the remote node. The primary advantages of shovels are: + +* Loose coupling - A shovel can move messages between brokers (or clusters) in different administrative domains: + * they may have different users and virtual hosts; + * they may run on different versions of RabbitMQ and Erlang. +* WAN friendliness - They can tolerate network intermittent connectivity. + +Shovels are also useful in cases where one of the nodes is behind NAT. We can setup shovel on the node behind NAT to +forward messages to the node outside NAT. Shovels do not allow you to adapt to subscriptions like a federation link and +we need to a create a new shovel per subscription. + +For more detailed information about shovel, please refer to RabbitMQ documentation on the +`Shovel plugin `_. diff --git a/docs/source/core_services/multiplatform/Multiplatform-RPC.rst b/docs/source/platform-features/message-bus/multi-platform/multi-platform-rpc.rst similarity index 100% rename from docs/source/core_services/multiplatform/Multiplatform-RPC.rst rename to docs/source/platform-features/message-bus/multi-platform/multi-platform-rpc.rst diff --git a/docs/source/platform-features/message-bus/multi-platform/pubsub-remote-platforms.rst b/docs/source/platform-features/message-bus/multi-platform/pubsub-remote-platforms.rst new file mode 100644 index 0000000000..897de2b134 --- /dev/null +++ b/docs/source/platform-features/message-bus/multi-platform/pubsub-remote-platforms.rst @@ -0,0 +1,293 @@ +.. _PubSub-Between-Remote-Platforms: + +============================================= +PubSub Communication Between Remote Platforms +============================================= + +This document describes pubsub communication between different platforms. The goal of this specification is to improve +forward historians forwarding local PubSub messages to remote platforms. Agents interested in receiving PubSub +messages from external platforms will not need to have a forward historian running on the source platform to forward +PubSub messages to the interested destination platforms; The VIP router will now do all the work. It shall use the +Routing Service to internally manage connections with external VOLTTRON platforms and use the PubSubService for the +actual inter-platform PubSub communication. + +For future: + +This specification will need to be extended to support PubSub communication between platforms that are +multiple hops away. The VIP router of each platform shall need to maintain a routing table and use it to forward pubsub +messages to subscribed platforms that are multiple hops away. The routing table shall contain shortest path to each +destination platform. + + +Functional Capabilities +======================= + +1. Each VOLTTRON platform shall have a list of other VOLTTRON platforms that it has to connect to in a config file. + +2. Routing Service of each platform connects to other platforms on startup. + +3. The Routing Service in each platform is responsible for connecting to (and also initiating reconnection if required), + monitoring and disconnecting from each external platform. The function of the Routing Service is explained in detail + in the Routing Service section. + +4. Platform to platform PubSub communication shall be using VIP protocol with the subsystem frame set to "pubsub". + +5. The PubSubService of each VOLTTRON platform shall maintain a list of local and external subscriptions. + +6. Each VIP router sends its list of external subscriptions to other connected platforms in the following cases: + + a. On startup + + b. When a new subscription is added + + c. When an existing subscription is removed + + d. When a new platform gets connected + +7. When a remote platform disconnection is detected, all stale subscriptions related to that platform shall be removed. + +8. Whenever an agent publishes a message to a specific topic, the PubSubService on the local platform first checks the + topic against its list of local subscriptions. If a local subscription exists, it sends the publish message to + corresponding local subscribers. + +9. The PubSubService shall also check the topic against list of external subscriptions. If an external subscription + exists, it shall use the Routing Service to send the publish message to the corresponding external platform. + +10. Whenever a router receives messages from other platform, it shall check the destination platform in the incoming + message. + + a. If the destination platform is the local platform, it hand overs the publish message to the PubSubService which + checks the topic against list of external subscriptions. If an external subscription matches, the PubSubService + forwards the message to all the local subscribers subscribed to that topic. + + b. If the destination platform is not the local platform, it discards the message. + + +Routing Service +--------------- + +1. The Routing Service shall maintain connection status (CONNECTING, CONNECTED, DISCONNECTED etc.) for each external + platform. + +2. In order to establish connection with an external VOLTTRON platform, the server key of the remote platform is needed. + The Routing Service shall connect to an external platform once it obtains the server key for that platform from the + KeyDiscoveryService. + +3. The Routing Service shall exchange "hello"/"welcome" handshake messages with the newly connected remote platform to + confirm the connection. It shall use VIP protocol with the subsystem frame set to “routing_table” for the handshake + messages. + +4. Routing Service shall monitor the connection status and inform the PubSubService whenever a remote platform gets + connected/disconnected. + + +For Future: + +1. Each VIP router shall exchange its routing table with its connected platforms on startup and whenever a new platform + gets connected or disconnected. + +2. The router shall go through each entry in the routing table that it received from other platforms and calculate the + shortest, most stable path to each remote platform. It then sends the updated routing table to other platforms for + adjustments in the forwarding paths (in their local routing table) if any. + +3. Whenever a VIP router detects a new connection, it adds an entry into the routing table and sends updated routing + table to its neighboring platforms. Each router in the other platforms shall update and re-calculate the forwarding + paths in its local routing table and forward to rest of the platforms. + +4. Similarly, whenever a VIP router detects a remote platform disconnection, it deletes the entry in the routing table + for that platform and forwards the routing table to other platforms to do the same. + + +KeyDiscovery Service +-------------------- + +1. Each platform tries to obtain the platform discovery information - platform name, :term:`VIP address` and server key + of remote VOLTTRON platforms through HTTP discovery service at startup. + +2. If unsuccessful, it shall make regular attempts to obtain discovery information until successful. + +3. The platform discovery information shall then be sent to the Routing Service using VIP protocol with subsystem + frame set to "routing_table". + + +Messages for Routing Service +============================ + +Below are example messages that are applicable to the Routing Service. + +* Message sent by KeyDiscovery Service containing the platform discovery information (platform name, VIP address and + server key) of a remote platform + + :: + + +-+ + | | Empty recipient frame + +-+----+ + | VIP1 | Signature frame + +-+----+ + | | Empty user ID frame + +-+----+ + | 0001 | Request ID, for example "0001" + +---------------+ + | routing_table | Subsystem, "routing_table" + +---------------+----------------+ + | normalmode_platform_connection | Type of operation, "normalmode_platform_connection" + +--------------------------------+ + | platform discovery information | + | of external platform | platform name, VIP address and server key of external platform + +--------------------------------+ + | platform name | Remote platform for which the server key belongs to. + +---------------------+ + + +Handshake messages between two newly connected external VOLTTRON platform to confirm successful connection. + +* Message from initiating platform + + :: + + +-+ + | | Empty recipient frame + +-+----+ + | VIP1 | Signature frame + +-+----+ + | | Empty user ID frame + +-+----+ + | 0001 | Request ID, for example "0001" + +--------------++ + | routing_table | Subsystem, "routing_table" + +---------------+ + | hello | Operation, "hello" + +--------+ + | hello | Hello handshake request frame + +--------+------+ + | platform name | Platform initiating a "hello" + +---------------+ + + +* Reply message from the destination platform + + :: + + +-+ + | | Empty recipient frame + +-+----+ + | VIP1 | Signature frame + +-+----+ + | | Empty user ID frame + +-+----+ + | 0001 | Request ID, for example "0001" + +--------------++ + | routing_table | Subsystem, "routing_table" + +--------+------+ + | hello | Operation, "hello" + +--------++ + | welcome | Welcome handshake reply frame + +---------+-----+ + | platform name | Platform sending reply to "hello" + +---------------+ + +Messages for PubSub communication +================================= + +The VIP routers of each platform shall send PubSub messages between platforms using VIP protocol message semantics. +Below is an example of external subscription list message sent by VOLTTRON platform `V1` router to VOLTTRON platform +`V2`. + +:: + + +-+ + | | Empty recipient frame + +-+----+ + | VIP1 | Signature frame + +-+---------+ + |V1 user id | Empty user ID frame + +-+---------+ + | 0001 | Request ID, for example "0001" + +-------++ + | pubsub | Subsystem, "pubsub" + +-------------+-+ + | external_list | Operation, "external_list" in this case + +---------------+ + | List of | + | subscriptions | Subscriptions dictionary consisting of VOLTTRON platform id and list of topics as + +---------------+ key - value pairings, for example: { "V1": ["devices/rtu3"]} + + +This shows an example of an external publish message sent by the router of VOLTTRON platform `V2` to VOLTTRON platform +`V1`. + +:: + + + +-+ + | | Empty recipient frame + +-+----+ + | VIP1 | Signature frame + +-+---------+ + |V1 user id | Empty user ID frame + +-+---------+ + | 0001 | Request ID, for example "0001" + +-------++ + | pubsub | Subsystem, "pubsub" + +------------------+ + | external_publish | Operation, "external_publish" in this case + +------------------+ + | topic | Message topic + +------------------+ + | publish message | Actual publish message frame + +------------------+ + + +API +=== + + +Methods for Routing Service +--------------------------- + +- *external_route( )* - This method receives message frames from external platforms, checks the subsystem frame and + redirects to appropriate subsystem (routing table, pubsub) handler. It shall run within a separate thread and get + executed whenever there is a new incoming message from other platforms. +- *setup( )* - This method initiates socket connections with all the external VOLTTRON platforms configured in the config + file. It also starts monitor thread to monitor connections with external platforms. +- *handle_subsystem( frames )* - Routing Service subsytem handler to handle serverkey message from KeyDiscoveryService and + "hello/welcome" handshake message from external platforms. +- *send_external( instance_name, frames )* - This method sends input message to specified VOLTTRON platform/instance. +- *register( type, handler )* - Register method for PubSubService to register for connection and disconnection events. +- *disconnect_external_instances( instance_name )* - Disconnect from specified VOLTTRON platform. +- *close_external_connections( )* - Disconnect from all external VOLTTRON platforms. +- *get_connected_platforms( )* - Return list of connected platforms. + + +Methods for PubSubService +------------------------- + +- *external_platform_add( instance_name )* - Send external subscription list to newly connected external VOLTTRON + platform. +- *external_platform_drop( instance_name )* - Remove all subscriptions for the specified VOLTTRON platform +- *update_external_subscriptions( frames )* - Store/Update list of external subscriptions as per the subscription list + provided in the message frame. +- *_distribute_external( frames )* - Publish the message all the external platforms that have subscribed to the topic. It + uses send_external_pubsub_message() of router to send out the message. +- *external_to_local_publish( frames )* - This method retrieves actual message from the message frame, checks the message + topic against list of external subscriptions and sends the message to corresponding subscribed agents. + + +Methods for agent pubsub subsystem +---------------------------------- + +To subscribe to topics from a remote platform, the subscribing agent has to add an additional input parameter - +``all_platforms`` to the pubsub subscribe method. + +- *subscribe(peer, prefix, callback, bus='', all_platforms=False)* - The existing 'subscribe' method is modified to + include optional keyword argument - 'all_platforms'. If 'all_platforms' is set to True, the agent is subscribing to + topic from local publisher and from external platform publishers. + +.. code:: python + + self.vip.pubsub.subscribe('pubsub', 'foo', self.on_match, all_platforms=True) + +There is no change in the publish method pf PubSub subsystem. If all the configurations are correct and the publisher +agent on the remote platform is publishing message to topic=``foo``, then the subscriber agent will start receiving +those messages. diff --git a/docs/source/platform-features/message-bus/rabbitmq/files/csr-sequence-approval.png b/docs/source/platform-features/message-bus/rabbitmq/files/csr-sequence-approval.png new file mode 100644 index 0000000000..cf32895d02 Binary files /dev/null and b/docs/source/platform-features/message-bus/rabbitmq/files/csr-sequence-approval.png differ diff --git a/docs/source/platform-features/message-bus/rabbitmq/files/csr-sequence-deny.png b/docs/source/platform-features/message-bus/rabbitmq/files/csr-sequence-deny.png new file mode 100644 index 0000000000..6f7a5d4396 Binary files /dev/null and b/docs/source/platform-features/message-bus/rabbitmq/files/csr-sequence-deny.png differ diff --git a/docs/source/core_services/messagebus_refactor/files/proxy_router.png b/docs/source/platform-features/message-bus/rabbitmq/files/proxy_router.png similarity index 100% rename from docs/source/core_services/messagebus_refactor/files/proxy_router.png rename to docs/source/platform-features/message-bus/rabbitmq/files/proxy_router.png diff --git a/docs/source/core_services/messagebus_refactor/files/pubsub.png b/docs/source/platform-features/message-bus/rabbitmq/files/pubsub.png similarity index 100% rename from docs/source/core_services/messagebus_refactor/files/pubsub.png rename to docs/source/platform-features/message-bus/rabbitmq/files/pubsub.png diff --git a/docs/source/core_services/messagebus_refactor/files/rabbitmq_exchange.png b/docs/source/platform-features/message-bus/rabbitmq/files/rabbitmq_exchange.png similarity index 100% rename from docs/source/core_services/messagebus_refactor/files/rabbitmq_exchange.png rename to docs/source/platform-features/message-bus/rabbitmq/files/rabbitmq_exchange.png diff --git a/docs/source/core_services/messagebus_refactor/files/rmq_server_ssl_certs.png b/docs/source/platform-features/message-bus/rabbitmq/files/rmq_server_ssl_certs.png similarity index 100% rename from docs/source/core_services/messagebus_refactor/files/rmq_server_ssl_certs.png rename to docs/source/platform-features/message-bus/rabbitmq/files/rmq_server_ssl_certs.png diff --git a/docs/source/core_services/messagebus_refactor/files/rpc.png b/docs/source/platform-features/message-bus/rabbitmq/files/rpc.png similarity index 100% rename from docs/source/core_services/messagebus_refactor/files/rpc.png rename to docs/source/platform-features/message-bus/rabbitmq/files/rpc.png diff --git a/docs/source/core_services/messagebus_refactor/Messagebus-Plugin.rst b/docs/source/platform-features/message-bus/rabbitmq/message-bus-plugin.rst similarity index 99% rename from docs/source/core_services/messagebus_refactor/Messagebus-Plugin.rst rename to docs/source/platform-features/message-bus/rabbitmq/message-bus-plugin.rst index b6a191ee18..461313bad5 100644 --- a/docs/source/core_services/messagebus_refactor/Messagebus-Plugin.rst +++ b/docs/source/platform-features/message-bus/rabbitmq/message-bus-plugin.rst @@ -1,8 +1,9 @@ - .. _Messagebus-Plugin: + .. _Message-Bus-Plugin: ============================ Message Bus Plugin Framework ============================ + The message bus plugin framework aims to decouple the VOLTTRON specific code from the message bus implementation without compromising the existing features of the platform. The concept of the plugin framework is similar to that used in historian diff --git a/docs/source/platform-features/message-bus/rabbitmq/rabbitmq-overview.rst b/docs/source/platform-features/message-bus/rabbitmq/rabbitmq-overview.rst new file mode 100644 index 0000000000..08b2879734 --- /dev/null +++ b/docs/source/platform-features/message-bus/rabbitmq/rabbitmq-overview.rst @@ -0,0 +1,144 @@ + .. _RabbitMQ-Overview: + +================= +RabbitMQ Overview +================= + +RabbitMQ is a new message bus that was integrated with VOLTTRON in VOLTTRON 6 version. RabbitMQ provides many of the +features off the shelf that had to be custom built for ZeroMQ based message bus. VOLTTRON leverages many of these +features and take advantage of large pre-existing and growing industry support for RabbitMQ development. + +.. NOTE:: + + Some of the RabbitMQ summary/overview documentation and supporting images added here are taken from the + `RabbitMQ official documentation `_. + + +RabbitMQ Library +================ + +RabbitMQ is the most popular messaging library with over 35,000 production deployments. It is highly scalable, easy to +deploy, runs on many operating systems and cloud environments. It supports many kinds of distributed deployment +methodologies such as clusters, federation and shovels. + +RabbitMQ uses `Advanced Message Queueing Protocol` (AMQP) and works on the basic producer consumer model. A consumer is +a program that consumes/receives messages and producer is a program that sends the messages. Following are some +important definitions that we need to know before we proceed. + +* Queue - Queues can be considered like a post box that stores messages until consumed by the consumer. Each consumer + must create a queue to receives messages that it is interested in receiving. We can set properties to the queue + during it's declaration. The queue properties are: + + * Name - Name of the queue + * Durable - Flag to indicate if the queue should survive broker restart. + * Exclusive - Used only for one connection and it will be removed when connection is closed. + * Auto-queue - Flag to indicate if auto-delete is needed. The queue is deleted when last consumer un-subscribes from + it. + * Arguments - Optional, can be used to set message TTL (Time To Live), queue limit etc. + +* Bindings - Consumers bind the queue to an exchange with binding keys or routing patterns. Producers send messages and + associate them with a routing key. Messages are routed to one or many queues based on a pattern matching between a + message routing key and binding key. + +* Exchanges - Exchanges are entities that are responsible for routing messages to the queues based on the routing + pattern/binding key used. They look at the routing key in the message when deciding how to route messages to queues. + There are different types of exchanges and one must choose the type of exchange depending on the application design + requirements + + #. Fanout - It blindly broadcasts the message it receives to all the queues it knows. + + #. Direct - Here, the message is routed to a queue if the routing key of the message exactly matches the binding key + of the queue. + + #. Topic - Here, the message is routed to a queue based on pattern matching of the routing key with the binding key. + The binding key and the routing key pattern must be a list of words delimited by dots, for example, + "car.subaru.outback" or "car.subaru.*", "car.#". A message sent with a particular routing key will be delivered + to all the queues that are bound with a matching binding key with some special rules as + + '*' (star) - can match exactly one word in that position. + '#' (hash) - can match zero or more words + + #. Headers - If we need more complex matching then we can add a header to the message with all the attributes set to + the values that need to be matched. The message is considered matching if the values of the attributes in the + header is equal to that of the binding. The Header exchange ignores the routing key. + + We can set some properties of the exchange during it's declaration. + + * Name - Name of the exchange + * Durable - Flag to indicate if the exchange should survive broker restart. + * Auto-delete - Flag indicates if auto-delete is needed. If set to true, the exchange is deleted when the last queue + is unbound from it. + * Arguments - Optional, used by plugins and broker-specific features + +Lets use an example to understand how they all fit together. Consider an example where there are four consumers +(Consumer 1 - 4) interested in receiving messages matching the pattern "green", "red" or "yellow". In this example, we +are using a direct exchange that will route the messages to the queues only when there is an exact match of the routing +key of the message with the binding key of the queues. Each of the consumers declare a queue and bind the queue to the +exchange with a binding key of interest. Lastly, we have a producer that is continuously sending messages to exchange +with routing key "green". The exchange will check for an exact match and route the messages to only Consumer 1 and +Consumer 3. + +.. image:: files/rabbitmq_exchange.png + + +For more information about queues, bindings, exchanges, please refer to the +`RabbitMQ tutorial `_. + + +Authentication in RabbitMQ +========================== + +By default RabbitMQ supports SASL PLAIN authentication with username and password. RabbitMQ supports other SASL +authentication mechanisms using plugins. In VOLTTRON we use one such external plugin based on x509 certificates +(``_). This authentication is based on a technique called +public key cryptography which consists of a key pair - a public key and a private key. Data that has been encrypted +with a public key can only be decrypted with the corresponding private key and vice versa. The owner of key pair makes +the public key available and keeps the private confidential. To send a secure data to a receiver, a sender encrypts the +data with the receiver's public key. Since only the receiver has access to his own private key only the receiver can +decrypted. This ensures that others, even if they can get access to the encrypted data, cannot decrypt it. This is how +public key cryptography achieves confidentiality. + +A digital certificate is a digital file that is used to prove ownership of a public key. Certificates act like +identification cards for the owner/entity. Certificates are therefore crucial to determine that a sender is using the +right public key to encrypt the data in the first place. Digital Certificates are issued by Certification +Authorities(CA). Certification Authorities fulfill the role of the `Trusted Third Party` by accepting Certificate +applications from entities, authenticating applications, issuing Certificates and maintaining status information about +the Certificates issued. Each CA has its own public private key pair and its public key certificate is called a root CA +certificate. The CA attests to the identity of a Certificate applicant when it signs the Digital Certificate using its +private key. + +In x509 based authentication, a signed certificate is presented instead of username/password for authentication and if +the server recognizes the the signer of the certificate as a trusted CA, accepts and allows the connection. Each +server/system can maintain its own list of trusted CAs (i.e. list of public certificates of CAs). Certificates signed +by any of the trusted CA would be considered trusted. Certificates can also be signed by intermediate CAs that are in +turn signed by a trusted. + +This section only provides a brief overview about the SSL based authentication. Please refer to the vast material +available online for detailed description. Some useful links to start: + + * ``_ + * ``_ + + +Management Plugin +================= + +The RabbitMQ-management plugin provides an HTTP-based API for management and monitoring of RabbitMQ nodes and clusters, +along with a browser-based UI and a command line tool, *rabbitmqadmin*. The management interface allows you to: + +* Create, Monitor the status and delete resources such as virtual hosts, users, exchanges, queues etc. +* Monitor queue length, message rates and connection information and more +* Manage users and add permissions (read, write and configure) to use the resources +* Manage policies and runtime parameters +* Send and receive messages (for trouble shooting) + +For more detailed information about the management plugin, please refer to RabbitMQ documentation on the +`Management Plugin `_. + + +.. toctree:: + :caption: RabbitMQ + + message-bus-plugin + rabbitmq-volttron + rabbitmq-ssl-auth diff --git a/docs/source/core_services/messagebus_refactor/RabbitMQ-SSL-Auth.rst b/docs/source/platform-features/message-bus/rabbitmq/rabbitmq-ssl-auth.rst similarity index 67% rename from docs/source/core_services/messagebus_refactor/RabbitMQ-SSL-Auth.rst rename to docs/source/platform-features/message-bus/rabbitmq/rabbitmq-ssl-auth.rst index 3e684d06e8..3d9b1b6d58 100644 --- a/docs/source/core_services/messagebus_refactor/RabbitMQ-SSL-Auth.rst +++ b/docs/source/platform-features/message-bus/rabbitmq/rabbitmq-ssl-auth.rst @@ -7,11 +7,14 @@ Authentication And Authorization With RabbitMQ Message Bus Authentication In RabbitMQ VOLTTRON *********************************** -RabbitMQ VOLTTRON uses SSL based authentication, rather than the default username and password authentication. VOLTTRON -adds SSL based configuration entries into the 'rabbitmq.conf' file during the setup process. The necessary SSL + +RabbitMQ VOLTTRON uses SSL based authentication, rather than the default username and password authentication. VOLTTRON +adds SSL based configuration entries into the `rabbitmq.conf` file during the setup process. The necessary SSL configurations can be seen by running the following command: -``cat ~/rabbitmq_server/rabbitmq_server-3.7.7/etc/rabbitmq/rabbitmq.conf`` +.. code-block:: bash + + cat ~/rabbitmq_server/rabbitmq_server-3.7.7/etc/rabbitmq/rabbitmq.conf The configurations required to enable SSL: @@ -31,17 +34,19 @@ The configurations required to enable SSL: - ssl_options.certfile: path to server public certificate - ssl_options.keyfile: path to server's private key - ssl_options.verify: whether verification is enabled -- ssl_options.fail_if_no_peer_cert: upon client's failure to provide certificate, SSL connection either rejected (true) or accepted (false) +- ssl_options.fail_if_no_peer_cert: upon client's failure to provide certificate, SSL connection either rejected (true) + or accepted (false) - auth_mechanisms.1: type of authentication mechanism. EXTERNAL means SSL authentication is used SSL in RabbitMQ VOLTTRON ------------------------ + To configure RabbitMQ-VOLTTRON to use SSL based authentication, we need to add SSL configuration in rabbitmq_config.yml. .. code-block:: yaml - #host parameter is mandatory parameter. fully qualified domain name + # mandatory. fully qualified domain name for the system host: mymachine.pnl.gov # mandatory. certificate data used to create root ca certificate. Each volttron @@ -52,7 +57,7 @@ To configure RabbitMQ-VOLTTRON to use SSL based authentication, we need to add S location: 'Richland' organization: 'PNNL' organization-unit: 'VOLTTRON Team' - # volttron1 has to be replaced with actual instance name of the VOLTTRON + # volttron1 has to be replaced with actual instance name of the VOLTTRON instance common-name: 'volttron1_root_ca' virtual-host: 'volttron' # defaults to volttron @@ -83,34 +88,28 @@ The parameters of interest for SSL based configuration are - amqp-port-ssl: Port number for SSL connection (defaults to 5671) - mgmt-port-ssl: Port number for HTTPS management connection (defaults to 15671) - -We can then configure the VOLTTRON instance to use SSL based authentication with the below command. +We can then configure the VOLTTRON instance to use SSL based authentication with the below command: vcfg --rabbitmq single -When one creates a single instance of RabbitMQ, the following is created / re-created in the VOLTTRON_HOME/certificates directory: +When one creates a single instance of RabbitMQ, the following is created / re-created in the VOLTTRON_HOME/certificates +directory: - Public and private certificates of root Certificate Authority (CA) - - Public and private (automatically signed by the CA) server certificates needed by RabbitMQ broker - - Admin certificate for the RabbitMQ instance - - Public and private (automatically signed by the CA) certificates for VOLTTRON platform service agents. - - Trusted CA certificate The public files can be found at ``VOLTTRON_HOME/certificates/certs`` and the private files can be found -at ``VOLTTRON_HOME/certificates/private``. The trusted-cas.crt file is used to store -the root CAs of all VOLTTRON instances that the RabbitMQ server has to connected to. The trusted ca is only created -once, but can be updated. Initially, the trusted ca is a copy of the the root CA file, -but when an external VOLTTRON instance needs to be connected to an instance, then external VOLTTRON instance's root CA -have to be appended to this file in order for RabbitMQ broker to trust the new connection. - +at ``VOLTTRON_HOME/certificates/private``. The `trusted-cas.crt` file is used to store +the root CAs of all VOLTTRON instances that the RabbitMQ server has to connected to. The trusted CA is only created +once, but can be updated. Initially, the trusted CA is a copy of the the root CA file, +but when an external VOLTTRON instance needs to be connected to an instance, the external VOLTTRON instance's root CA +will be appended to this file in order for the RabbitMQ broker to trust the new connection. .. image:: files/rmq_server_ssl_certs.png - Every RabbitMQ has a single self signed root ca and server certificate signed by the root CA. This is created during VOLTTRON setup and the RabbitMQ server is configured and started with these two certificates. Every time an agent is started, the platform automatically creates a pair of public-private certificates for that agent that is signed by the @@ -119,29 +118,14 @@ key to the server and the server validates if it is signed by a root CA it trust started with. Since there is only a single root CA for one VOLTTRON instance, all the agents in this instance can communicate with the message bus over SSL. -Multi-Platform Communication With RabbitMQ SSL -============================================== - -For multi-platform communication over federation and shovel, we need connecting instances to trust each other. - -.. image:: files/multiplatform_ssl.png - -Suppose there are two VMs (VOLTTRON1 and VOLTTRON2) running single instances of RabbitMQ, and VOLTTRON1 and VOLTTRON2 -want to talk to each other via either the federation or shovel plugins. In order for VOLTTRON1 to talk to VOLTTRON2, -VOLTTRON1's root certificate must be appended to VOLTTRON's trusted CA certificate, so that when VOLTTRON1 presents it's -root certificate during connection, VOLTTRON2's RabbitMQ server can trust the connection. VOLTTRON2's root CA must be -appended to VOLTTRON1's root CA and it must in turn present its root certificate during connection, so that VOLTTRON1 will -know it is safe to talk to VOLTTRON2. - -Agents trying to connect to remote instance directly, need to have a public certificate signed by the remote -instance for authenticated SSL based connection. To facilitate this process, the VOLTTRON platform exposes a web based server -api for requesting, listing, approving and denying certificate requests. For more detailed description, refer to -:ref:`Agent communication to Remote RabbitMQ instance<_Connecting_to_remote_RMQ>` +For information about using SSL with multi-platform RabbitMQ deployments, view the +:ref:`docs ` Authorization in RabbitMQ VOLTTRON ================================== -To be implemented in VOLTTRON + +To be implemented in VOLTTRON at a later date. For more detailed information about access control, please refer to RabbitMQ documentation `Access Control `_. diff --git a/docs/source/platform-features/message-bus/rabbitmq/rabbitmq-volttron.rst b/docs/source/platform-features/message-bus/rabbitmq/rabbitmq-volttron.rst new file mode 100644 index 0000000000..c7370c488f --- /dev/null +++ b/docs/source/platform-features/message-bus/rabbitmq/rabbitmq-volttron.rst @@ -0,0 +1,253 @@ +.. _RabbitMQ-VOLTTRON: + +======================= +RabbitMQ Based VOLTTRON +======================= + +RabbitMQ VOLTTRON uses the `Pika` library for the RabbitMQ message bus implementation. To install Pika, it is +recommended to use the VOLTTRON :ref:`bootstrap.py ` script: + +.. code-block:: bash + + python3 bootstrap.py --rabbitmq + + +Configuration +============= + +To setup a VOLTTRON instance to use the RabbitMQ message bus, we need to first configure VOLTTRON to use the RabbitMQ +message library. The contents of the RabbitMQ configuration file should follow the pattern below. + +Path: `$VOLTTRON_HOME/rabbitmq_config.yml` + +.. code-block:: yaml + + #host parameter is mandatory parameter. fully qualified domain name + host: mymachine.pnl.gov + + # mandatory. certificate data used to create root ca certificate. Each volttron + # instance must have unique common-name for root ca certificate + certificate-data: + country: 'US' + state: 'Washington' + location: 'Richland' + organization: 'PNNL' + organization-unit: 'VOLTTRON Team' + # volttron1 has to be replaced with actual instance name of the VOLTTRON + common-name: 'volttron1_root_ca' + # + # optional parameters for single instance setup + # + virtual-host: 'volttron' # defaults to volttron + + # use the below four port variables if using custom rabbitmq ports + # defaults to 5672 + amqp-port: '5672' + + # defaults to 5671 + amqp-port-ssl: '5671' + + # defaults to 15672 + mgmt-port: '15672' + + # defaults to 15671 + mgmt-port-ssl: '15671' + + # defaults to true + ssl: 'true' + + # defaults to ~/rabbitmq_server/rabbbitmq_server-3.7.7 + rmq-home: "~/rabbitmq_server/rabbitmq_server-3.7.7" + +Each VOLTTRON instance resides within a RabbitMQ virtual host. The name of the virtual host needs to be unique per +VOLTTRON instance if there are multiple virtual instances within a single host/machine. The hostname needs to be able +to resolve to a valid IP. The default port of an AMQP port without authentication is `5672` and with authentication +it is `5671`. The default management HTTP port without authentication is `15672` and with authentication is `15671`. +These needs to be set appropriately if the default ports are not used. + +The 'ssl' flag indicates if SSL based authentication is required or not. If set to `True`, information regarding SSL +certificates needs to be also provided. SSL based authentication is described in detail in +`Authentication And Authorization With RabbitMQ Message Bus `_. + +To configure the VOLTTRON instance to use RabbitMQ message bus, run the following command: + +.. code-block:: bash + + vcfg --rabbitmq single [optional path to rabbitmq_config.yml] + +At the end of the setup process, a RabbitMQ broker is setup to use the configuration provided. A new topic exchange for +the VOLTTRON instance is created within the configured virtual host. + +On platform startup, VOLTTRON checks for the type of message bus to be used. If using the RabbitMQ message bus, the +RabbitMQ platform router is instantiated. The RabbitMQ platform router: + +* Connects to RabbitMQ broker (with or without authentication) +* Creates a VIP queue and binds itself to the "VOLTTRON" exchange with binding key `.router`. This + binding key makes it unique across multiple VOLTTRON instances in a single machine as long as each instance has a + unique instance name. +* Handles messages intended for router module such as `hello`, `peerlist`, `query` etc. +* Handles "unrouteable" messages - Messages which cannot be routed to any destination agent are captured and an error + message indicating "Host Unreachable" error is sent back to the caller. +* Disconnects from the broker when the platform shuts down. + +When any agent is installed and started, the Agent Core checks for the type of message bus used. If it is RabbitMQ +message bus then: + +* It creates a RabbitMQ user for the agent +* If SSL based authentication is enabled, client certificates for the agent is created +* Connect to the RabbitQM broker with appropriate connection parameters +* Creates a VIP queue and binds itself to the "VOLTTRON" exchange with binding key `.` +* Sends and receives messages using Pika library methods. +* Checks for the type of subsystem in the message packet that it receives and calls the appropriate subsystem message + handler. +* Disconnects from the broker when the agent stops or platform shuts down. + + +RPC In RabbitMQ VOLTTRON +======================== + +The agent functionality remain unchanged regardless of the underlying message bus used, meaning they can continue to use +the same RPC interfaces without any change. + +.. image:: files/rpc.png + +Consider two agents with VIP identities "agent_a" and "agent_b" connected to VOLTTRON platform +with instance name "volttron1". Agent A and B each have a VIP queue with binding key volttron1.agent_a" +and "volttron1.agent_b". Following is the sequence of operation when Agent A wants to make RPC +call to Agent B: + +1. Agent A makes a RPC call to Agent B. + +.. code-block:: python + + agent_a.vip.rpc.call("agent_b", set_point, "point_name", 2.5) + +2. RPC subsystem wraps this call into a VIP message object and sends it to Agent B. +3. The VOLTTRON exchange routes the message to Agent B as the destination routing in the VIP message object matches with + the binding key of Agent B. +4. Agent Core on Agent B receives the message, unwraps the message to find the subsystem type and calls the RPC + subsystem handler. +5. RPC subsystem makes the actual RPC call `set_point()` and gets the result. It then wraps into VIP message object and + sends it back to the caller. +6. The VOLTTRON exchange routes it to back to Agent A. +7. Agent Core on Agent A calls the RPC subsystem handler which in turn hands over the RPC result to Agent A application. + + +PUBSUB In RabbitMQ VOLTTRON +=========================== + +The agent functionality remains unchanged irrespective of the platform using ZeroMQ based pubsub or +RabbitMQ based pubsub, i.e. agents continue to use the same PubSub interfaces and use the same topic +format delimited by “/”. Since RabbitMQ expects binding key to be delimited by '.', RabbitMQ PUBSUB +internally replaces '/' with ".". Additionally, all agent topics are converted to +``_pubsub__..`` to differentiate them from the main Agent VIP queue binding. + +.. image:: files/pubsub.png + +Consider two agents with VIP identities "agent_a" and "agent_b" connected to VOLTTRON platform +with instance name "volttron1". Agent A and B each have a VIP queue with binding key "volttron1.agent_a" +and "volttron1.agent_b". Following is the sequence of operation when Agent A subscribes to a topic and Agent B +publishes to same the topic: + +1. Agent B makes subscribe call for topic "devices". + +.. code-block:: python + + agent_b.vip.pubsub.subscribe("pubsub", prefix="devices", callback=self.onmessage) + +2. Pubsub subsystem creates binding key from the topic ``__pubsub__.volttron1.devices.#`` + +3. It creates a queue internally and binds the queue to the VOLTTRON exchange with the above binding key. + +4. Agent B is publishing messages with topic: "devices/hvac1". + +.. code-block:: python + + agent_b.vip.pubsub.publish("pubsub", topic="devices/hvac1", headers={}, message="foo"). + +5. PubSub subsystem internally creates a VIP message object and publishes on the VOLTTRON exchange. + +6. RabbitMQ broker routes the message to Agent B as routing key in the message matches with the binding key of the topic + subscription. + +7. The pubsub subsystem unwraps the message and calls the appropriate callback method of Agent A. + +If agent wants to subscribe to topic from remote instances, it uses: + +.. code-block:: python + + agent.vip.subscribe('pubsub', 'devices.hvac1', all_platforms=True) + +It is internally set to ``__pubsub__.*.`` + + +Further Work +------------ + +The Pubsub subsystem for the ZeroMQ message bus performs O(N) comparisons where N is the number of unique subscriptions. +The RabbitMQ Topic Exchange was enhanced in version 2.6.0 to reduce the overhead of additional unique subscriptions to +almost nothing in most cases. We speculate they are using a tree structure to store the binding keys which would reduce +the search time to O(1) in most cases and O(ln) in the worst case. The VOLTTRON PubSub with ZeroMQ could be updated to +match this performance scalability with some effort. + + +RabbitMQ Management Tool Integrated Into VOLTTRON +================================================= + +Some of the important native RabbitMQ control and management commands are now integrated with the +:ref`volttron-ctl ` (vctl) utility. Using `volttron-ctl`'s RabbitMQ management utility, we can +control and monitor the status of RabbitMQ message bus: + +.. code-block:: console + + vctl rabbitmq --help + usage: vctl command [OPTIONS] ... rabbitmq [-h] [-c FILE] [--debug] + [-t SECS] + [--msgdebug MSGDEBUG] + [--vip-address ZMQADDR] + ... + subcommands: + + add-vhost add a new virtual host + add-user Add a new user. User will have admin privileges + i.e,configure, read and write + add-exchange add a new exchange + add-queue add a new queue + list-vhosts List virtual hosts + list-users List users + list-user-properties + List users + list-exchanges add a new user + list-exchange-properties + list exchanges with properties + list-queues list all queues + list-queue-properties + list queues with properties + list-bindings list all bindings with exchange + list-federation-parameters + list all federation parameters + list-shovel-parameters + list all shovel parameters + list-policies list all policies + remove-vhosts Remove virtual host/s + remove-users Remove virtual user/s + remove-exchanges Remove exchange/s + remove-queues Remove queue/s + remove-federation-parameters + Remove federation parameter + remove-shovel-parameters + Remove shovel parameter + remove-policies Remove policy + +For information about using RabbitMQ in multi-platform deployments, view the :ref:`docs +` + + +Deployments +=========== + +The :ref:`platform installation ` docs describe performing first time setup for single machine +RabbitMQ deployments. + +See the :ref:`multi-platform RabbitMQ ` docs for setting up shovel or federation in +multi-platform RabbitMQ deployments. diff --git a/docs/source/platform-features/message-bus/topics.rst b/docs/source/platform-features/message-bus/topics.rst new file mode 100644 index 0000000000..afed381984 --- /dev/null +++ b/docs/source/platform-features/message-bus/topics.rst @@ -0,0 +1,55 @@ +.. _Messaging-Topics: + +==================== +Messaging and Topics +==================== + + +Introduction +============ + +Agents in |VOLTTRON| communicate with each other using a publish/subscribe mechanism built on the Zero MQ or RabbitMQ +Python libraries. This allows for great flexibility as topics can be created dynamically and the messages sent can be +any format as long as the sender and receiver understand it. An agent with data to share publishes to a topic, then +any agents interested in that data subscribe to that topic. + +While this flexibility is powerful, it also could also lead to confusion if some standard is not followed. The current +conventions for communicating in the VOLTTRON are: + +- Topics and subtopics follow the format: ``topic/subtopic/subtopic`` +- Subscribers can subscribe to any and all levels. Subscriptions to `topic` will include messages for the base topic + and all subtopics. Subscriptions to ``topic/subtopic1`` will only receive messages for that subtopic and any + children subtopics. Subscriptions to empty string ("") will receive ALL messages. This is not recommended. + +Agents should set the `From` header. This will allow agents to filter on the `To` message sent back. + + +Topics +====== + + +In VOLTTRON +----------- + +- **alerts** - Base topic for alerts published by agents and subsystems, such as agent health alerts +- **analysis** - Base topic for analytics being used with building data +- **config** - Base topic for managing agent configuration +- **devices** - Base topic for data being published by drivers +- **datalogger** - Base topic for agents wishing to record time series data +- **heartbeat** - Topic for publishing periodic "heartbeat" or "keep-alive" +- **market** - Base topics for market agent communication +- **record** - Base topic for agents to record data in an arbitrary format +- **weather** - Base topic for polling publishes of weather service agents + +.. note:: + + Other more specific topics may exist for specific agents or purposes. Please review the documentation for the + specific feature for more information. + + +Controller Agent Topics +----------------------- + +See the documentation for the :ref:`Actuator Agent `. + +.. |VOLTTRON| unicode:: VOLTTRON U+2122 diff --git a/docs/source/platform-features/message-bus/vip/agent-vip-id.rst b/docs/source/platform-features/message-bus/vip/agent-vip-id.rst new file mode 100644 index 0000000000..f0d63948d4 --- /dev/null +++ b/docs/source/platform-features/message-bus/vip/agent-vip-id.rst @@ -0,0 +1,144 @@ +.. _Agent-Identity-Specification: + +=========================================== +Agent VIP IDENTITY Assignment Specification +=========================================== + +This document explains how an agent obtains it's :term:`VIP IDENTITY `, how the platform sets an agent's +VIP IDENTITY at startup, and what mechanisms are available to the user to set the VIP IDENTITY for any agent. + + +What is a VIP IDENTITY +====================== + +A VIP IDENTITY is a platform instance unique identifier for agents. The IDENTITY is used to route messages from one +Agent through the VOLTTRON router to the recipient Agent. The VIP IDENTITY provides a consistent, user defined, and +human readable character set to build a VIP IDENTITY. VIP IDENTITIES should be composed of both upper and lowercase +letters, numbers and the following special characters. + + +Runtime +======= + +The primary interface for obtaining a VIP IDENTITY *at runtime* is via the runtime environment of the agent. At startup +the utility function `vip_main` shall check for the environment variable **AGENT_VIP_IDENTITY**. If the +**AGENT_VIP_IDENTITY** environment variable is not set then the `vip_main` function will fall back to a supplied +identity argument. `vip_main` will pass the appropriate identity argument to the agent constructor. If no identity is +set the Agent class will create a random VIP IDENTITY using python's `uuid4` function. + +An agent that inherits from the platform's base Agent class can get it's current VIP IDENTITY by retrieving the value of +``self.core.identity``. + +The primary use of the 'identity' argument to `vip_main` is for agent development. For development it allows agents to +specify a default VIP IDENTITY when run outside the platform. As platform Agents are not started via `vip_main` they +will simply receive their VIP IDENTITY via the identity argument when they are instantiated. Using the identity +argument of the Agent constructor to set the VIP IDENTITY via agent configuration is no longer supported. + +At runtime the platform will set the environment variable **AGENT_VIP_IDENTITY** to the value set at installation time. + +Agents not based on the platform's base Agent should set their VIP IDENTITY by setting the identity of the ZMQ socket +before the socket connects to the platform. If the agent fails to set it's VIP IDENTITY via the ZMQ socket it will be +selected automatically by the platform. This platform chosen ID is currently not discoverable to the agent. + + +Agent Implementation +==================== + +If an Agent has a preferred VIP IDENTITY (for example the Platform Driver Agent prefers to use "platform.driver") it may +specify this as a default packed value. This is done by including a file named IDENTITY containing only the desired VIP +IDENTITY in ASCII plain text in the same directory at the `setup.py` file for the Agent. This will cause the packaged +agent wheel to include an instruction to set the VIP IDENTITY at installation time. + +This value may be overridden at packaging or installation time. + + +Packaging +========= + +An Agent may have it's VIP IDENTITY configured when it is packaged. The packaged value may be used by the platform to +set the **AGENT_VIP_IDENTITY** environment variable for the agent process. + +The packaged VIP IDENTITY may be overridden at installation time. This overrides any preferred VIP IDENTITY of the +agent. This will cause the packaged agent wheel to include an instruction to set the VIP IDENTITY at installation time. + +To specify the VIP IDENTITY when packaging use the ``--vip-identity`` option when running `volttron-pkg package`. + + +Installation +============ + +An agent may have it's VIP IDENTITY configured when it is installed. This overrides any VIP IDENTITY specified when the +agent was packaged. + +To specify the VIP IDENTITY when packaging use the ``--vip-identity`` option when running `volttron-ctl install`. + + +Installation Default VIP IDENTITY +--------------------------------- + +If no VIP IDENTITY has been specified by installation time the platform will assign one automatically. + +The platform uses the following template to generate a VIP IDENTITY: + +.. code-block:: python + + "{agent_name}_{n}" + +``{agent_name}`` is substituted with the name of the actual agent such as ``listeneragent-0.1`` + +``{n}`` is a number to make VIP IDENTITY unique. ``{n}`` is set to the first unused number (starting from 1) for all +installed instances of an agent. e.g. If there are 2 listener agents installed and the first (VIP IDENTITY +listeneragent-0.1_1) is uninstalled leaving the second (VIP IDENTITY "listeneragent-0.1_2"), a new listener agent will +receive the VIP IDENTITY "listeneragent-0.1_1" when installed. The next installed listener will receive a VIP IDENTITY +of "listeneragent-0.1_3". + +The ``#`` sign is used to prevent confusing the agent version number with the installed instance number. + +If an agent is repackaged with a new version number it is treated as a new agent and the number will start again from 1. + + +VIP IDENTITY Conflicts During Installation +------------------------------------------ + +If an agent is assigned a VIP IDENTITY besides the default value given to it by the platform it is possible for VIP IDENTITY conflicts to exist between installed agents. In this case the platform rejects the installation of an agent with a conflicting VIP IDENTITY and reports an error to the user. + + +VIP IDENTITY Conflicts During Runtime +------------------------------------- + +In the case where agents are not started through the platform (usually during development or when running standalone +agents) it is possible to encounter a VIP IDENTITY conflict during runtime. In this case the first agent to use a VIP +IDENTITY will function as normal. Subsequent agents will still connect to the ZMQ socket but will be silently rejected +by the platform router. The router will not route any message to that Agent. Agents using the platforms base Agent +will detect this automatically during the initial handshake with the platform. This condition will shutdown the Agent +with an error indicating a VIP IDENTITY conflict as the most likely cause of the problem. + +Auto Numbering With Non-Default VIP IDENTITYs +============================================= + +It is possible to use the auto numbering mechanism that the default VIP IDENTITY scheme uses. Simply include the string +``{n}`` somewhere in the requested VIP IDENTITY and it will be replaced with a number in the same manner as the default +VIP IDENTITY is. Python `string.format()` escaping rules apply. `See this question on StackOverflow. +`_ + + +Script Features +=============== + +The `scripts/install-agent.py` script supports specifying the desired VIP IDENTITY using the ``-i`` (or +``--vip-identity``) ```` option + + +Security/Privacy +================ + +Currently, much like the `TAG` file in an installed agent, there is nothing to stop someone from modifying the +`IDENTITY` file in the installed agent. + + +Constraints and Limitations +=========================== + +Currently there is no way for an agent based on the platform base Agent class to recover from a VIP IDENTITY conflict. +This case only affects developers and a very tiny minority of users and is reported via an error message, there +are currently no plans to fix it. diff --git a/docs/source/core_services/messagebus/VIP/VIP-Authentication.rst b/docs/source/platform-features/message-bus/vip/vip-authentication.rst similarity index 96% rename from docs/source/core_services/messagebus/VIP/VIP-Authentication.rst rename to docs/source/platform-features/message-bus/vip/vip-authentication.rst index 8166eea3bc..b31feda632 100644 --- a/docs/source/core_services/messagebus/VIP/VIP-Authentication.rst +++ b/docs/source/platform-features/message-bus/vip/vip-authentication.rst @@ -55,7 +55,7 @@ The auth file should not be modified directly. To change the auth file, use ``vctl auth`` subcommands: ``add``, ``list``, ``remove``, and ``update``. (Run ``vctl auth --help`` for more details and see the -:ref:`authentication commands documentation<_AuthenticationCommands>`.) +:ref:`authentication commands documentation `.) Here are some example entries:: @@ -145,7 +145,7 @@ Platform Configuration By default, the platform only listens on the local IPC VIP socket. Additional addresses may be bound using the ``--vip-address`` option, which can be provided multiple times to bind multiple addresses. Each -VIP address should follow the standard ZeroMQ convention of prefixing +:term:`VIP address` should follow the standard ZeroMQ convention of prefixing with the socket type (*ipc://* or *tcp://*) and may include any of the following additional URL parameters: @@ -210,4 +210,4 @@ Now if agent ``A`` can successfully connect to platform ``B``, and platform 2016-10-19 14:26:16,446 () volttron.platform.auth INFO: authentication success: domain='vip', address='127.0.0.1', mechanism='CURVE', credentials=['HOVXfTspZWcpHQcYT_xGcqypBHzQHTgqEzVb4iXrcDg'], user_id='Agent-A' -For a more details see the :ref:`authentication walkthrough`. +For a more details see the :ref:`authentication walk-through `. diff --git a/docs/source/platform-features/message-bus/vip/vip-authorization.rst b/docs/source/platform-features/message-bus/vip/vip-authorization.rst new file mode 100644 index 0000000000..97386ff798 --- /dev/null +++ b/docs/source/platform-features/message-bus/vip/vip-authorization.rst @@ -0,0 +1,274 @@ +.. _VIP-Authorization: + +================= +VIP Authorization +================= + +VIP :ref:`authentication ` and authorization go hand in hand. When an agent authenticates to a +VOLTTRON platform that agent proves its identity to the platform. Once authenticated, an agent is allowed to connect to +the :ref:`message bus `. VIP authorization is about giving a platform owner the ability to limit +the capabilities of authenticated agents. + +There are two parts to authorization: + +#. Required capabilities (specified in agent's code) +#. Authorization entries (specified via ``volttron-ctl auth`` commands) + +The following example will walk through how to specify required capabilities and grant those capabilities in +authorization entries. + + +Single Capability +----------------- +For this example suppose there is a temperature agent that can read and set the temperature of a particular room. The +agent author anticipates that building managers will want to limit which agents can set the temperature. + +In the temperature agent, a required capability is specified by using the ``RPC.allow`` decorator: + +.. code:: Python + + @RPC.export + def get_temperature(): + ... + + @RPC.allow('CAP_SET_TEMP') + @RPC.export + def set_temperature(temp): + ... + +In the code above, any agent can call the ``get_temperature`` method, but only agents with the ``CAP_SET_TEMP`` +capability can call ``set_temperature``. + +.. Note:: + + Capabilities are arbitrary strings. This example follows the general style used for Linux capabilities, but it is + up to the agent author. + +Now that a required capability has been specified, suppose a VOLTTRON platform owner wants to allow a specific agent, +say `Alice Agent`, to set the temperature. + +The platform owner runs ``vctl auth add`` to add new authorization entries or ``vctl auth update`` to update an existing +entry. If `Alice Agent` is installed on the platform, then it already has an authorization entry. Running +``vctl auth list`` shows the existing entries: + +:: + + ... + INDEX: 3 + { + "domain": null, + "user_id": "AliceAgent", + "roles": [], + "enabled": true, + "mechanism": "CURVE", + "capabilities": [], + "groups": [], + "address": null, + "credentials": "JydrFRRv-kdSejL6Ldxy978pOf8HkWC9fRHUWKmJfxc", + "comments": null + } + ... + +Currently AliceAgent cannot set the temperature because it does not have the ``CAP_SET_TEMP`` capability. To grant this +capability the platform owner runs ``vctl auth update 3``: + +.. code:: Bash + + (For any field type "clear" to clear the value.) + domain []: + address []: + user_id [AliceAgent]: + capabilities (delimit multiple entries with comma) []: CAP_SET_TEMP + roles (delimit multiple entries with comma) []: + groups (delimit multiple entries with comma) []: + mechanism [CURVE]: + credentials [JydrFRRv-kdSejL6Ldxy978pOf8HkWC9fRHUWKmJfxc]: + comments []: + enabled [True]: + updated entry at index 3 + + +Now `Alice Agent` can call ``set_temperature`` via RPC. If other agents try to call that method they will get the +following exception: + +.. code-block:: console + + error: method "set_temperature" requires capabilities set(['CAP_SET_TEMP']), + but capability list [] was provided + + +Multiple Capabilities +--------------------- + +Expanding on the temperature-agent example, the ``set_temperature`` method can require agents to have multiple +capabilities: + +.. code:: Python + + @RPC.allow(['CAP_SET_TEMP', 'CAP_FOO_BAR']) + @RPC.export + def set_temperature(): + ... + +This requires an agent to have both the ``CAP_SET_TEMP`` and the ``CAP_FOO_BAR`` capabilities. Multiple capabilities can +also be specified by using multiple ``RPC.allow`` decorators: + +.. code:: Python + + @RPC.allow('CAP_SET_TEMP') + @RPC.allow('CAN_FOO_BAR') + @RPC.export + def temperature(): + ... + + +Capability with parameter restriction +------------------------------------- + +Capabilities can also be used to restrict access to a rpc method only with certain parameter values. For example, if +`Agent A` exposes a method bar which accepts parameter `x`. + +AgentA's capability enabled exported RPC method: + +.. code-block:: python + + @RPC.export + @RPC.allow('can_call_bar') + def bar(self, x): + return 'If you can see this, then you have the required capabilities' + +You can restrict access to `Agent A`'s `bar` method to `Agent B` with ``x=1``. To add this auth entry use the +``vctl auth add`` command as show below: + +.. code-block:: bash + + vctl auth add --capabilities '{"test1_cap2":{"x":1}}' --user_id AgentB --credential vELQORgWOUcXo69DsSmHiCCLesJPa4-CtVfvoNHwIR0 + + +The auth.json file entry for the above command would be: + +.. code-block:: json + + { + "domain": null, + "user_id": "AgentB", + "roles": [], + "enabled": true, + "mechanism": "CURVE", + "capabilities": { + "test1_cap2": { + "x": 1 + } + }, + "groups": [], + "address": null, + "credentials": "vELQORgWOUcXo69DsSmHiCCLesJPa4-CtVfvoNHwIR0", + "comments": null + } + + +Parameter values can also be regular expressions: + +.. code-block:: console + + (volttron)volttron@volttron1:~/git/myvolttron$ vctl auth add + domain []: + address []: + user_id []: + capabilities (delimit multiple entries with comma) []: {'test1_cap2':{'x':'/.*'}} + roles (delimit multiple entries with comma) []: + groups (delimit multiple entries with comma) []: + mechanism [CURVE]: + credentials []: vELQORgWOUcXo69DsSmHiCCLesJPa4-CtVfvoNHwIR0 + comments []: + enabled [True]: + added entry domain=None, address=None, mechanism='CURVE', credentials=u'vELQORgWOUcXo69DsSmHiCCLesJPa4-CtVfvoNHwIR0', user_id='b22e041d-ec21-4f78-b32e-ab7138c22373' + + +The auth.json file entry for the above command would be: + +.. code-block:: json + + { + "domain": null, + "user_id": "90f8ef35-4407-49d8-8863-4220e95974c7", + "roles": [], + "enabled": true, + "mechanism": "CURVE", + "capabilities": { + "test1_cap2": { + "x": "/.*" + } + }, + "groups": [], + "address": null, + "credentials": "vELQORgWOUcXo69DsSmHiCCLesJPa4-CtVfvoNHwIR0", + "comments": null + } + + +.. _Protected-Topics: + +Protecting Pub/Sub Topics +========================= + +VIP :ref:`authorization ` enables VOLTTRON platform owners to protect pub/sub topics. More +specifically, a platform owner can limit who can publish to a given topic. This protects subscribers on that platform +from receiving messages (on the protected topic) from unauthorized agents. + + +Example +------- + +To protect a topic, add the topic name to ``$VOLTTRON_HOME/protected_topics.json``. For example, the following +protected-topics file declares that the topic ``foo`` is protected: + +.. code:: JSON + + { + "write-protect": [ + {"topic": "foo", "capabilities": ["can_publish_to_foo"]} + ] + } + +.. note:: + + The capability name ``can_publish_to_foo`` is not special; It can be any string, but it is easier to manage + capabilities with meaningful names. + +Now only agents with the capability ``can_publish_to_foo`` can publish to the topic ``foo``. To add this capability to +authenticated agents, run ``vctl auth update`` (or ``volttron-ctl auth add`` for new authentication entries), and enter +``can_publish_to_foo`` in the capabilities field: + +.. code:: Bash + + capabilities (delimit multiple entries with comma) []: can_publish_to_foo + +Agents that have the ``can_publish_to_foo`` capabilities can publish to topic ``foo``. That is, such agents can call: + +.. code:: Python + + self.vip.pubsub.publish('pubsub', 'foo', message='Here is a message') + +If unauthorized agents try to publish to topic ``foo`` they will get an exception: + +.. code-block:: console + + to publish to topic "foo" requires capabilities ['can_publish_to_foo'], but capability list [] was provided + + +Regular Expressions +------------------- + +Topic names in ``$VOLTTRON_HOME/protected_topics.json`` can be specified as regular expressions. In order to use a +regular expression, the topic name must begin and end with a "/". For example: + +.. code:: JSON + + { + "write-protect": [ + {"topic": "/foo/*.*/", "capabilities": ["can_publish_to_foo"]} + ] + } + +This protects topics such as ``foo/bar`` and ``foo/anything``. diff --git a/docs/source/platform-features/message-bus/vip/vip-enhancements.rst b/docs/source/platform-features/message-bus/vip/vip-enhancements.rst new file mode 100644 index 0000000000..7e2967dfc7 --- /dev/null +++ b/docs/source/platform-features/message-bus/vip/vip-enhancements.rst @@ -0,0 +1,85 @@ +.. _VIP-Enhancements: + +================ +VIP Enhancements +================ + +When creating VIP for VOLTTRON 3.0 we wanted to address two security concerns and one user request: + +- Security Concern 1: Agents can spoof each other on the VOLTTRON message bus and fake messages. +- Security Concern 2: Agents can subscribe to topics that they are not authorized to subscribe to. +- User Request 1: Several users requested means to transfer large + amounts of data between agents without using the message bus. + +VOLTTRON Interconnect Protocol (VIP) was created to address these issues but unfortunately, it broke the easy to use +pub-sub messaging model of VOLTTRON. Additionally to use the security features of VOLTTRON in 3.0 code has become an +ordeal especially when multiple platforms are concerned. Finally, VIP has introduced the requirement for knowledge of +specific other platforms to agents written by users in order to be able to communicate. The rest of this memo focuses +on defining the way VOLTTRON message bus will work going forward indefinitely and should be used as the guiding +principles for any future work on VIP and VOLTTRON. + + +VOLTTRON Message Bus Guiding Principles: +---------------------------------------- + +#. | All communications between two or more different VOLTTRON platforms MUST go through the VIP Router. Said another + way, a user agent (application) should have *NO* capability to reach out to an agent on a different VOLTTRON + platform directly. + + | All communications between two or more VOLTTRON platforms must be in the form of topics on the message bus. Agents + *MUST* not use a distinct platform address or name to communicate via a direct connection between two platforms. + +#. VOLTTRON will use two TCP ports. One port is used to extend VIP across platforms. A second port is used for the + VOLTTRON discovery protocol (more on this to come on a different document). VIP will establish bi-directional + communication via a single TCP port. + +#. In order to solve the bootstrapping problem that CurveMQ has punted on, we will modify VIP to operate similar + (behaviorally) to SSH. + +A. On a single VOLTTRON platform, the platform’s public key will be made available via an API so that all agents will be + able to communicate with the platform. Additionally, the behavior of the platform will be changed so that agents on + the same platform will automatically be added to the `auth.json` file. No more need for user to add the agents + manually to the file. The desired behavior is similar to how SSH handles `known_hosts`. + + .. Note:: + + This behavior still addresses the security request 1 & 2. + +B. When connecting VOLTTRON platforms, VOLTTRON Discovery Protocol (VDP) will be used to discover the other platforms + public key to establish the router to router connection. Note that since we *BANNED* agent to agent communication + between two platforms, we have prevented an "O(N^2)" communication pattern and key bootstrapping problem. + +#. Authorization determines what agents are allowed to access what topics. Authorization MUST be managed by the + VOLTTRON Central platform on a per organization basis. It is not recommended to have different authorization + profiles on different VOLTTRON instances belonging to the same organization. + +#. VOLTTRON message bus uses topics such as and will adopt an information model agreed upon by the VOLTTRON community + going forward. Our initial information model is based on the OpenEIS schema going forward. A different document + will describe the information model we have adopted going forward. All agents are free to create their own topics + but the VOLTTRON team (going forward) will support the common VOLTTRON information model and all agents developed by + PNNL will be converted to use the new information model. + +#. Two connected VOLTTRON systems will exchange a list of available topics via the message router. This will allow each + VIP router to know what topics are available at what VOLTTRON platform. + +#. Even though each VOLTTRON platform will have knowledge of what topics are available around itself, no actual messages + will be forwarded between VOLTTRON platforms until an agent on a specific platform subscribes to a topic. When an + agent subscribes to a topic that has a publisher on a different VOLTTRON platform, the VIP router will send a request + to its peer routers so that the messages sent to that topic will be forwarded. There will be cases (such as clean + energy transactive project) where the publisher to a topic may be multiple hops away. In this case, the subscribe + request will be sent towards the publisher through other VIP routers. In order to find the most efficient path, we + may need to keep track of the total number of hops (in terms of number of VIP routers). + +#. The model described in steps 5/6/7 applies to data collection. For control applications, VOLTTRON team only allows + control actions to be originated from the VOLTTRON instance that is directly connected to that controlled device. + This decision is made to increase the robustness of the control agent and to encourage truly distributed applications + to be developed. + +#. Direct agent to agent communication will be supported by creation of an ephemeral topic under the topic hierarchy. + Our measurements have shown repeatedly that the overhead of using the ZeroMQ message pub/sub is minimal and has zero + impact on communications throughput. + +In summary, by making small changes to the way VIP operates, I believe that we can significantly increase the usability +of the platform and also correct the mixing of two communication platforms into VIP. VOLTTRON message bus will return +to being a pub/sub messaging system going forward. Direct agent to agent communication will be supported through the +message bus. diff --git a/docs/source/platform-features/message-bus/vip/vip-json-rpc.rst b/docs/source/platform-features/message-bus/vip/vip-json-rpc.rst new file mode 100644 index 0000000000..8bd4684e26 --- /dev/null +++ b/docs/source/platform-features/message-bus/vip/vip-json-rpc.rst @@ -0,0 +1,305 @@ +.. _Remote-Procedure-Calls: + +====================== +Remote Procedure Calls +====================== + +Remote procedure calls (RPC) is a feature of VOLTTRON Interconnect Protocol :ref:`VIP `. VIP includes the +ability to create new point-to-point protocols, called subsystems, enabling the implementation of +`JSON-RPC 2.0 `_. This provides a simple method for agent authors to write +methods and expose or export them to other agents, making request-reply or notify communications patterns as +simple as writing and calling methods. + + +Exporting Methods +================= + +The ``export()`` method, defined on the RPC subsystem class, is used to mark a method as remotely accessible. This +``export()`` method has a dual use: + +* The class method can be used as a decorator to statically mark methods when the agent class is defined. +* The instance method dynamically exports methods, and can be used with methods not defined on the agent + class. + +Each take an optional export name argument, which defaults to the method name. Here are the two export method +signatures: + +Instance method: + +.. code-block:: python + + RPC.export(method, name=None) + +Class method: + +.. code-block:: python + + RPC.export(name=None) + +And here is an example agent definition using both methods: + +.. code-block:: python + + from volttron.platform.vip import Agent, Core, RPC + + def add(a, b): + '''Add two numbers and return the result''' + return a + b + + + class ExampleAgent(Agent): + @RPC.export + def say_hello(self, name): + '''Build and return a hello string''' + return 'Hello, %s!' % (name,) + + @RPC.export('say_bye') + def bye(self, name): + '''Build and return a goodbye string''' + return 'Goodbye, %s.' % (name,) + + @Core.receiver('setup') + def onsetup(self, sender, **kwargs): + self.vip.rpc.export('add') + + +Calling exported methods +======================== + +The RPC subsystem provides three methods for calling exported RPC methods: + +.. code-block:: python + + RPC.call(peer, method, *args, **kwargs) + +Call the remote ``method`` exported by ``peer`` with the given arguments. Returns a `gevent` `AsyncResult` object. + +.. code-block:: python + + RPC.batch(peer, requests) + +Batch call remote methods exported by `peer`. `requests` must be an iterable of 4-tuples +``(notify, method, args, kwargs)``, where ``notify`` is a boolean indicating whether this is a notification or standard +call, ``method`` is the method name, ``args`` is a list and ``kwargs`` is a dictionary. Returns a list of `AsyncResult` +objects for any standard calls. Returns ``None`` if all requests were notifications. + +.. code-block:: python + + RPC.notify(peer, method, *args, **kwargs) + +Send a one-way notification message to `peer` by calling `method` without returning a result. + +Here are some examples: + +.. code-block:: python + + self.vip.rpc.call(peer, 'say_hello', 'Bob').get() + results = self.vip.rpc.batch(peer, [(False, 'say_bye', 'Alice', {}), (True, 'later', [], {})]) + self.vip.rpc.notify(peer, 'ready') + + +Inspection +---------- + +A list of methods is available by calling the `inspect` method. Additional information can be returned for any method +by appending ``.inspect`` to the method name. Here are a couple examples: + +.. code-block:: python + + self.vip.rpc.call(peer, 'inspect') # Returns a list of exported methods + self.vip.rpc.call(peer, 'say_hello.inspect') # Return metadata on say_hello method + + +VCTL RPC Commands +~~~~~~~~~~~~~~~~~ + +There are two rpc subcommands available through vctl, *list* and *code*. + +The list subcommand displays all of the agents that have a peer +connection to the instance and which methods are available from +each of these agents. + +.. code-block:: console + + vctl rpc list + config.store + delete_config + get_configs + manage_delete_config + manage_delete_store + manage_get + manage_get_metadata + manage_list_configs + manage_list_stores + manage_store + set_config + . + . + . + + platform.historian + get_aggregate_topics + get_topic_list + get_topics_by_pattern + get_topics_metadata + get_version + insert + query + volttron.central + get_publickey + is_registered + +If a single agent is specified, it will list all methods available for that agent. + +.. code-block:: console + + vctl rpc list platform.historian + platform.historian + get_aggregate_topics + get_topic_list + get_topics_by_pattern + get_topics_metadata + get_version + insert + query + +If the -v option is selected, all agent subsystem rpc methods will be displayed +for each selected agent as well. + +.. code-block:: console + + vctl rpc list -v platform.historian + platform.historian + get_aggregate_topics + get_topic_list + get_topics_by_pattern + get_topics_metadata + get_version + insert + query + agent.version + health.set_status + health.get_status + health.get_status_json + health.send_alert + heartbeat.start + heartbeat.start_with_period + heartbeat.stop + heartbeat.restart + heartbeat.set_period + config.update + config.initial_update + auth.update + +If an agent is specified, and then a method (or methods) are specified, +all parameters associated with the method(s) will be output. + +.. code-block:: console + + vctl rpc list platform.historian get_version query + platform.historian + get_version + Parameters: + query + Parameters: + topic: + {'kind': 'POSITIONAL_OR_KEYWORD', 'default': None} + start: + {'kind': 'POSITIONAL_OR_KEYWORD', 'default': None} + end: + {'kind': 'POSITIONAL_OR_KEYWORD', 'default': None} + agg_type: + {'kind': 'POSITIONAL_OR_KEYWORD', 'default': None} + agg_period: + {'kind': 'POSITIONAL_OR_KEYWORD', 'default': None} + skip: + {'kind': 'POSITIONAL_OR_KEYWORD', 'default': 0} + count: + {'kind': 'POSITIONAL_OR_KEYWORD', 'default': None} + order: + {'kind': 'POSITIONAL_OR_KEYWORD', 'default': 'FIRST_TO_LAST'} + + +By adding the '-v' option to this stage, the doc-string description +of the method will be displayed along with the method and parameters if available. + +.. code-block:: console + + vctl rpc list -v platform.historian get_version + platform.historian + get_version + Documentation: + RPC call to get the version of the historian + + :return: version number of the historian used + :rtype: string + + Parameters: + + vctl rpc code + vctl rpc list + vctl rpc list + vctl rpc list -v + vctl rpc list -v + vctl rpc code -v + vctl rpc code + vctl rpc code + +The code subcommand functions similarly to list, except that it will output the code +to be used in an agent when writing an rpc call. Any available parameters are included +as a list in the line of code where the parameters will need to be provided. These will +need to be modified based on the use case. + +.. code-block:: console + + vctl rpc code + self.vip.rpc.call(config.store, delete_config, ['config_name', 'trigger_callback', 'send_update']).get() + self.vip.rpc.call(config.store, get_configs).get() + self.vip.rpc.call(config.store, manage_delete_config, ['args', 'kwargs']).get() + self.vip.rpc.call(config.store, manage_delete_store, ['args', 'kwargs']).get() + self.vip.rpc.call(config.store, manage_get, ['identity', 'config_name', 'raw']).get() + self.vip.rpc.call(config.store, manage_get_metadata, ['identity', 'config_name']).get() + self.vip.rpc.call(config.store, manage_list_configs, ['identity']).get() + self.vip.rpc.call(config.store, manage_list_stores).get() + self.vip.rpc.call(config.store, manage_store, ['args', 'kwargs']).get() + self.vip.rpc.call(config.store, set_config, ['config_name', 'contents', 'trigger_callback', 'send_update']).get() + . + . + . + self.vip.rpc.call(platform.historian, get_aggregate_topics).get() + self.vip.rpc.call(platform.historian, get_topic_list).get() + self.vip.rpc.call(platform.historian, get_topics_by_pattern, ['topic_pattern']).get() + self.vip.rpc.call(platform.historian, get_topics_metadata, ['topics']).get() + self.vip.rpc.call(platform.historian, get_version).get() + self.vip.rpc.call(platform.historian, insert, ['records']).get() + self.vip.rpc.call(platform.historian, query, ['topic', 'start', 'end', 'agg_type', 'agg_period', 'skip', 'count', 'order']).get() + self.vip.rpc.call(volttron.central, get_publickey).get() + self.vip.rpc.call(volttron.central, is_registered, ['address_hash', 'address']).get() + +As with rpc list, the code subcommand can be filtered based on the :term`VIP identity` and/or the method(s). + +.. code-block:: console + + vctl rpc code platform.historian + self.vip.rpc.call(platform.historian, get_aggregate_topics).get() + self.vip.rpc.call(platform.historian, get_topic_list).get() + self.vip.rpc.call(platform.historian, get_topics_by_pattern, ['topic_pattern']).get() + self.vip.rpc.call(platform.historian, get_topics_metadata, ['topics']).get() + self.vip.rpc.call(platform.historian, get_version).get() + self.vip.rpc.call(platform.historian, insert, ['records']).get() + self.vip.rpc.call(platform.historian, query, ['topic', 'start', 'end', 'agg_type', 'agg_period', 'skip', 'count', 'order']).get() + +.. code-block:: console + + vctl rpc code platform.historian query + self.vip.rpc.call(platform.historian, query, ['topic', 'start', 'end', 'agg_type', 'agg_period', 'skip', 'count', 'order']).get() + + +Implementation +-------------- + +See the `RPC module `_ +for implementation details. + +Also see :ref:`Multi-Platform RPC Communication ` and :ref:`RPC in RabbitMQ ` for +additional resources. diff --git a/docs/source/platform-features/message-bus/vip/vip-known-identities.rst b/docs/source/platform-features/message-bus/vip/vip-known-identities.rst new file mode 100644 index 0000000000..fbe2846e09 --- /dev/null +++ b/docs/source/platform-features/message-bus/vip/vip-known-identities.rst @@ -0,0 +1,31 @@ +.. _VIP-Known-Identities: + +==================== +VIP Known Identities +==================== + +It is critical for systems to have known locations for receiving resources and services from in a networked environment. +The following table details the vip identities that are reserved for VOLTTRON specific usage. + +.. csv-table:: Known Identities + :header: "VIP Identity","Agent/Feature","Notes" + + "platform","","" + "platform.agent","Platform Agent","Used to allow the VolttronCentralAgent to control and individual platform" + "platform.auth","Platform Auth","The identity of VolttronCentralAgent" + "volttron.central","VOLTTRON Central","The identity of VolttronCentralAgent" + "platform.historian","User-Selected Historian","An individual platform may have many historians available to it, however this is one available through Volttron Central. Note that this does not require a specific type of historian, just that it's :term:`VIP Identity`" + "platform.topic_watcher","TopicWatcher","Agent which publishes alerts for topics based on timing thresholds" + "platform.sysmon","Sysmon","Agent which publishes System Monitoring statistics" + "platform.emailer","Emailer","Agent used by other agents on the platform to send email notifications" + "platform.health","Platform Health","Agent health service" + "platform.market","Market Services","The default identity for Market Service agents" + "control","Platform Control","Control service facilitates the starting, stopping, removal, and installation of the agents on an instance. This agent is executing within the main volttron process" + "control.connection","Platform Control","Short lived identity used by all of the volttron-ctl (`vctl`) commands" + "pubsub","Pub/Sub Router","Pub/Sub subsystem router. Allows backward compatibility with version 4.1" + "platform_web","Platform Web Service","Facilitates HTTP/HTTPS requests from browsers and routes them to the corresponding agent for processing (will be renamed to platform.web in future update)" + "keydiscovery","Server Key Discovery","Agent that enables discovery of server keys of remote platforms in a multi-platform setup" + "platform.actuator","Actuator","Agent which coordinates sending control commands to devices" + "config.store","Configuration Store","The configuration subsystem service agent on the platform. Includes scheduling" + "platform.driver","Platform Driver","The default identity for the Platform Driver Agent (will be renamed Platform Driver Agent) which is responsible for coordinating device communication" + "zmq.proxy.router","Zero MQ Proxy","ZeroMQ's proxy service for Pub/Sub subsystem router. Allows backward compatibility between rmq and zmq instances of VOLTTRON" diff --git a/docs/source/core_services/messagebus/VIP/VIP-Overview.rst b/docs/source/platform-features/message-bus/vip/vip-overview.rst similarity index 65% rename from docs/source/core_services/messagebus/VIP/VIP-Overview.rst rename to docs/source/platform-features/message-bus/vip/vip-overview.rst index 32fc98f9bb..8bc24271b8 100644 --- a/docs/source/core_services/messagebus/VIP/VIP-Overview.rst +++ b/docs/source/platform-features/message-bus/vip/vip-overview.rst @@ -1,9 +1,13 @@ .. _VIP-Overview: -VIP - VOLTTRON™ Interconnect Protocol -+++++++++++++++++++++++++++++++++++++ +=============================== +VOLTTRON™ Interconnect Protocol +=============================== -This document specifies VIP, the VOLTTRON™ Interconnect Protocol. The use case for VIP is to provide communications between *agents*, *controllers*, *services*, and the supervisory *platform* in an abstract fashion so that additional protocols can be built and used above VIP. VIP defines how *peers* connect to the *router* and the messages they exchange. +This document specifies VIP, the VOLTTRON™ Interconnect Protocol. The use case for VIP is to provide communications +between *agents*, *controllers*, *services*, and the supervisory *platform* in an abstract fashion so that additional +protocols can be built and used above VIP. VIP defines how *peers* connect to the *router* and the messages they +exchange. * Name: github.com/VOLTTRON/volttron/wiki/VOLTTRON-Interconnect-Protocol * Editor: Brandon Carpenter @@ -16,57 +20,66 @@ This document specifies VIP, the VOLTTRON™ Interconnect Protocol. The use case .. _ZAP: http://rfc.zeromq.org/spec:27/ZAP. -Preamble -======== +.. toctree:: + :caption: VIP Topics -Copyright 2019, Battelle Memorial Institute. + vip-json-rpc + vip-known-identities + vip-authentication + vip-authorization + vip-enhancements + agent-vip-id -Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at -http://www.apache.org/licenses/LICENSE-2.0 - -The patent license grant shall only be applicable to the following patent and patent application (Battelle IPID 17008-E), as assigned to the Battelle Memorial Institute, as used in conjunction with this Work: • US Patent No. 9,094,385, issued 7/28/15 • USPTO Patent App. No. 14/746,577, filed 6/22/15, published as US 2016-0006569. - -Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. - -The key words "MUST", "MUST NOT", "REQUIRED", "SHALL", "SHALL NOT", "SHOULD", "SHOULD NOT", "RECOMMENDED", "MAY", and "OPTIONAL" in this document are to be interpreted as described in `RFC 2119`_. - -.. _RFC 2119: http://tools.ietf.org/html/rfc2119 - - -Overall Design -============== +Design Overview +=============== What Problems does VIP Address? ------------------------------- -When VOLTTRON agents, controllers, or other entities needed to exchange data, they previously used the first generation pub/sub messaging mechanism and ad-hoc methods to set up direct connections. While the pub/sub messaging is easy to implement and use, it suffers from several limitations: +When VOLTTRON agents, controllers, or other entities needed to exchange data, they previously used the first generation +pub/sub messaging mechanism and ad-hoc methods to set up direct connections. While the pub/sub messaging is easy to +implement and use, it suffers from several limitations: * It requires opening two listening sockets: one each for publishing and subscribing. * There is no trivial way to prevent message spoofing. * There is no trivial way to enable private messaging * It is not ideal for peer-to-peer communications. -These limitations have severe security implications. For improved security in VOLTTRON, the communications protocol must provide a method for secure data exchange that is intuitive and simple to implement and use. +These limitations have severe security implications. For improved security in VOLTTRON, the communications protocol +must provide a method for secure data exchange that is intuitive and simple to implement and use. + +Many messaging platforms already provides many of the building blocks to implement encrypted and authenticated +communications over a shared socket. They include a socket type implementing the router pattern. What remains is a +protocol built on the ZeroMQ and/or RabbitMQ to provide a single connection point, secure message passing, and retain +the ability for entities to come and go as they please. -ZeroMQ already provides many of the building blocks to implement encrypted and authenticated communications over a shared socket. It already includes a socket type implementing the router pattern. What remains is a protocol built on ZeroMQ to provide a single connection point, secure message passing, and retain the ability for entities to come and go as they please. +VIP is VOLTTRON protocol implementation targeting the limitations above. -VIP is just that protocol, specifically targeting the limitations above. +ZeroMQ +====== Why ZeroMQ? ----------- -Rather than reinvent the wheel, VIP makes use of many features already implemented in ZeroMQ, including ZAP and CurveMQ. While VIP doesn't require the use of ZAP or CurveMQ, their use substantially improves security by encrypting traffic over public networks and limiting connections to authenticated peers. +Rather than reinvent the wheel, VIP makes use of many features already implemented in ZeroMQ, including ZAP and CurveMQ. +While VIP doesn't require the use of ZAP or CurveMQ, their use substantially improves security by encrypting traffic +over public networks and limiting connections to authenticated peers. -ZeroMQ also provides reliable transports with built-in framing, automatic reconnection, in-process zero-copy message passing, abstractions for underlying protocols, and so much more. While some of these features create other pain points, they are minimal compared with the effort of either reimplementing or cobbling together libraries. +ZeroMQ also provides reliable transports with built-in framing, automatic reconnection, in-process zero-copy message +passing, abstractions for underlying protocols, and so much more. While some of these features create other pain +points, they are minimal compared with the effort of either reimplementing or cobbling together libraries. VIP is a routing protocol ------------------------- -VIP uses the ZeroMQ router pattern. Specifically, the router binds a ROUTER socket and peers connect using a DEALER or ROUTER socket. Unless the peer is connecting a single socket to multiple routers, using the DEALER socket is easiest, but there are instances where using a ROUTER is more appropriate. One must just exercise care to include the proper address envelope to ensure proper routing. +VIP uses the ZeroMQ router pattern. Specifically, the router binds a ROUTER socket and peers connect using a DEALER or +ROUTER socket. Unless the peer is connecting a single socket to multiple routers, using the DEALER socket is easiest, +but there are instances where using a ROUTER is more appropriate. One must just exercise care to include the proper +address envelope to ensure proper routing. Extensible Security @@ -78,7 +91,8 @@ VIP makes no assumptions about the security mechanisms used. It works equally we ZeroMQ Compatibility -------------------- -For enhanced security, VOLTTRON recommends libzmq version 4.1 or greater, however, most features of VIP are available with older versions. The following is an incomplete list of core features available with recent versions of libzmq. +For enhanced security, VOLTTRON recommends libzmq version 4.1 or greater, however, most features of VIP are available +with older versions. The following is an incomplete list of core features available with recent versions of libzmq. * Version 3.2: @@ -97,7 +111,11 @@ For enhanced security, VOLTTRON recommends libzmq version 4.1 or greater, howeve Message Format and Version Detection ------------------------------------ -VIP uses a simple, multi-frame format for its messages. The first one (for peers) or two (for router) frames contain the delivery address(es) and are follow immediately by the VIP signature ``VIP1``. The first characters of the signature are used to match the protocol and the last character digit indicates the protocol version, which will be incremented as the protocol is revised. This allows for fail-fast behavior and backward compatibility while being simple to implement in any language supported by ZeroMQ. +VIP uses a simple, multi-frame format for its messages. The first one (for peers) or two (for router) frames contain +the delivery address(es) and are follow immediately by the VIP signature ``VIP1``. The first characters of the +signature are used to match the protocol and the last character digit indicates the protocol version, which will be +incremented as the protocol is revised. This allows for fail-fast behavior and backward compatibility while being +simple to implement in any language supported by ZeroMQ. Formal Specification @@ -107,7 +125,8 @@ Formal Specification Architecture ------------ -VIP defines a message-based dialog between a *router* that transfers data between *peers*. The *router* and *peers* SHALL communicate using the following socket types and transports: +VIP defines a message-based dialog between a *router* that transfers data between *peers*. The *router* and *peers* +SHALL communicate using the following socket types and transports: * The router SHALL use a ROUTER socket. * Peers SHALL use a DEALER or ROUTER socket. @@ -119,7 +138,8 @@ VIP defines a message-based dialog between a *router* that transfers data betwee Message Format -------------- -A routing exchange SHALL consist of a peer sending a message to the router followed by the router receiving the message and sending it to the destination peer. +A routing exchange SHALL consist of a peer sending a message to the router followed by the router receiving the message +and sending it to the destination peer. Messages sent to the router by peers SHALL consist of the following message frames: @@ -130,7 +150,11 @@ Messages sent to the router by peers SHALL consist of the following message fram * The *subsystem*, which SHALL contain a string. * The *data*, which SHALL be zero or more subsystem-specific opaque frames. -Messages received from a peer by the router will automatically have a *sender* frame prepended to the message by the ROUTER socket. When the router forwards the message, the sender and recipient fields are swapped so that the *recipient* is in the first frame and the *sender* is in the second frame. The *recipient* frame is automatically stripped by the ROUTER socket during delivery. Peers using ROUTER sockets must prepend the message with an *intermediary* frame, which SHALL contain the identity of a router socket. +Messages received from a peer by the router will automatically have a *sender* frame prepended to the message by the +ROUTER socket. When the router forwards the message, the sender and recipient fields are swapped so that the *recipient* +is in the first frame and the *sender* is in the second frame. The *recipient* frame is automatically stripped by the +ROUTER socket during delivery. Peers using ROUTER sockets must prepend the message with an *intermediary* frame, which +SHALL contain the identity of a router socket. Messages received from the router by peers SHALL consist of the following message frames: @@ -146,22 +170,33 @@ The various fields have these meanings: * sender: the ZeroMQ DEALER or ROUTER identity of the sending (source) peer. * recipient: the ZeroMQ DEALER or ROUTER identity of the recipient (destination) peer. * intermediary: the ZeroMQ ROUTER identity of the intermediary router. -* user id: VIP authentication metadata set in the authenticator. See the discussion below for more information on this value. -* request id: the meaning of this field is defined by the sending peer. Replies SHALL echo the request id without modifying it. -* subsystem: this specifies the peer subsystem the data is intended for. The length of a subsystem name SHALL NOT exceed 255 characters and MUST only contain ASCII characters. -* data: provides the data for the given subsystem. The number of frames required is defined by each subsystem. +* user id: VIP authentication metadata set in the authenticator. See the discussion below for more information on this + value. +* request id: the meaning of this field is defined by the sending peer. Replies SHALL echo the request id without + modifying it. +* subsystem: this specifies the peer subsystem the data is intended for. The length of a subsystem name SHALL NOT + exceed 255 characters and MUST only contain ASCII characters. +* data: provides the data for the given subsystem. The number of frames required is defined by each subsystem. User ID ------- -The value in the *user id* frame depends on the implementation and the version of ZeroMQ. If ZAP is used with libzmq 4.1.0 or newer, peers should send an empty string for the user id and the ZAP authenticator will replace it with an authentication token which receiving peers may use to authorize access. If ZAP is not used or a version of libzmq is used which lacks support for retrieving the user id metadata, an authentication subsystem may be used to authenticate peers. The authentication subsystem SHALL provide peers with private tokens that must be sent with each message in the user id frame and which the router will substitute with a public token before forwarding. If the message cannot be authenticated, the user id received by peers SHALL be a zero-length string. +The value in the *user id* frame depends on the implementation and the version of ZeroMQ. If `ZAP` is used with libzmq +4.1.0 or newer, peers should send an empty string for the user id and the ZAP authenticator will replace it with an +authentication token which receiving peers may use to authorize access. If ZAP is not used or a version of libzmq is +used which lacks support for retrieving the user id metadata, an authentication subsystem may be used to authenticate +peers. The authentication subsystem SHALL provide peers with private tokens that must be sent with each message in the +user id frame and which the router will substitute with a public token before forwarding. If the message cannot be +authenticated, the user id received by peers SHALL be a zero-length string. Socket Types ------------ -Peers communicating via the router will typically use DEALER sockets and should not require additional handling. However, a DEALER peer may only connect to a single router. Peers may use ROUTER sockets to connect to multiple endpoints, but must prepend the routing ID of the destination. +Peers communicating via the router will typically use DEALER sockets and should not require additional handling. +However, a DEALER peer may only connect to a single router. Peers may use ROUTER sockets to connect to multiple +endpoints, but must prepend the routing ID of the destination. When using a DEALER socket: @@ -177,12 +212,14 @@ When using a ROUTER socket: Routing Identities ------------------ -Routing identities are set on a socket using the ZMQ_IDENTITY socket option and MUST be set on both ROUTER and DEALER sockets. The following additional requirements are placed on the use of peer identities: +Routing identities are set on a socket using the ZMQ_IDENTITY socket option and MUST be set on both ROUTER and DEALER +sockets. The following additional requirements are placed on the use of peer identities: * Peers SHALL set a valid identity rather than rely on automatic identity generation. * The router MAY drop messages with automatically generated identities, which begin with the zero byte ('\0'). -A zero length identity is invalid for peers and is, therefore, unroutable. It is used instead to address the router itself. +A zero length identity is invalid for peers and is, therefore, unroutable. It is used instead to address the router +itself. * Peers SHALL use a zero length recipient to address the router. * Messages sent from the router SHALL have a zero length sender address. @@ -191,7 +228,10 @@ A zero length identity is invalid for peers and is, therefore, unroutable. It is Error Handling ============== -The documented default behavior of ZeroMQ ROUTER sockets when entering the mute state (when the send buffer is full) is to silently discard messages without blocking. This behavior, however, is not consistently observed. Quietly discarding messages is not the desired behavior anyway because it prevents peers from taking appropriate action to the error condition. +The documented default behavior of ZeroMQ ROUTER sockets when entering the mute state (when the send buffer is full) is +to silently discard messages without blocking. This behavior, however, is not consistently observed. Quietly discarding +messages is not the desired behavior anyway because it prevents peers from taking appropriate action to the error +condition. * Routers SHALL set the ZMQ_SNDTIMEO socket option to 0. * Routers SHALL forward EAGAIN errors to sending peers. @@ -222,7 +262,9 @@ An error message must contain the following: Subsystems ========== -Peers may support any number of communications protocols or subsystems. For instance, there may be a remote procedure call (RPC) subsystem which defines its own protocol. These subsystems are outside the scope of VIP and this document with the exception of the *hello* and *ping* subsystems. +Peers may support any number of communications protocols or subsystems. For instance, there may be a remote procedure +call (RPC) subsystem which defines its own protocol. These subsystems are outside the scope of VIP and this document +with the exception of the *hello* and *ping* subsystems. * A router SHALL implement the hello subsystem. * All peers and routers SHALL implement the ping subsystem. @@ -231,7 +273,8 @@ Peers may support any number of communications protocols or subsystems. For inst The hello Subsystem ------------------- -The hello subsystem provides one simple RPC-style routine for peers to probe the router for version and identity information. +The hello subsystem provides one simple RPC-style routine for peers to probe the router for version and identity +information. A peer hello request message must contain the following: @@ -258,13 +301,16 @@ The hello subsystem can help a peer with the following tasks: * Discover the identity of the peer. * Discover authentication metadata. -For instance, if a peer will use a ROUTER socket for its connections, it must first know the identity of the router. The peer might first connect with a DEALER socket, issue a hello, and use the returned identity to then connect the ROUTER socket. +For instance, if a peer will use a ROUTER socket for its connections, it must first know the identity of the router. +The peer might first connect with a DEALER socket, issue a hello, and use the returned identity to then connect the +ROUTER socket. The ping Subsystem ------------------ -The *ping* subsystem is useful for testing the presence of a peer and the integrity and latency of the connection. All endpoints, including the router, must support the ping subsystem. +The *ping* subsystem is useful for testing the presence of a peer and the integrity and latency of the connection. +All endpoints, including the router, must support the ping subsystem. A peer ping request message must contain the following: @@ -282,19 +328,24 @@ A ping response message must contain the following: * The first data frame SHALL be the 4 octets 'pong'. * The remaining data frames SHALL be copied from the ping request unchanged, starting with the second data frame. -Any data can be included in the ping and should be returned unchanged in the pong, but limited trust should be placed in that data as it is possible a peer might modify it against the direction of this specification. +Any data can be included in the ping and should be returned unchanged in the pong, but limited trust should be placed in +that data as it is possible a peer might modify it against the direction of this specification. Discovery --------- -VIP does not define how to discover peers or routers. Typical options might be to hard code the router address in peers or to pass it in via the peer configuration. A well known (i.e. statically named) directory service might be used to register connected peers and allow for discovery by other peers. +VIP does not define how to discover peers or routers. Typical options might be to hard code the router address in peers +or to pass it in via the peer configuration. A well known (i.e. statically named) directory service might be used to +register connected peers and allow for discovery by other peers. Example Exchanges ================= -These examples show the messages *as sent on the wire* as sent or received by peers using DEALER sockets. The messages received or sent by peers or routers using ROUTER sockets will have an additional address at the start. We do not show the frame sizes or flags, only frame contents. +These examples show the messages *as sent on the wire* as sent or received by peers using DEALER sockets. The messages +received or sent by peers or routers using ROUTER sockets will have an additional address at the start. We do not show +the frame sizes or flags, only frame contents. Example of hello Request @@ -318,7 +369,8 @@ This shows a hello request sent by a peer, with identity "alice", to a connected | hello | Operation, "hello" in this case +-------+ -This example assumes a DEALER socket. If a peer uses a ROUTER socket, it SHALL prepend an additional frame containing the router identity, similar to the following example. +This example assumes a DEALER socket. If a peer uses a ROUTER socket, it SHALL prepend an additional frame containing +the router identity, similar to the following example. This shows the example request received by the router: @@ -478,7 +530,8 @@ This shows the example request received by "bob": | 1422573492 | Data, a single frame in this case (Unix timestamp) +------------+ -If "bob" were using a ROUTER socket, there would be an additional frame prepended to the message containing the router identity, "router" in this case. +If "bob" were using a ROUTER socket, there would be an additional frame prepended to the message containing the router +identity, "router" in this case. This shows an example reply from "bob" to "alice" diff --git a/docs/source/platform-features/security/key-stores.rst b/docs/source/platform-features/security/key-stores.rst new file mode 100644 index 0000000000..a556fdb5d2 --- /dev/null +++ b/docs/source/platform-features/security/key-stores.rst @@ -0,0 +1,45 @@ +.. _Key-Stores: + +========== +Key Stores +========== + +.. warning:: + + Most VOLTTRON users should not need to directly interact with agent key stores. These are notes for VOLTTRON + platform developers. This is not a stable interface and the implementation details are subject to change. + +Each agent has its own encryption key-pair that is used to :ref:`authenticate` itself with the +VOLTTRON platform. A key-pair comprises a public key and a private (secret) key. These keys are saved in a +"key store", which is implemented by the :py:class:`KeyStore class`. Each agent +has its own key store. + +Key Store Locations +------------------- + +There are two main locations key stores will be saved. Installed agents' key stores are in the the agent's data +directory: + +.. code-block:: bash + + $VOLTTRON_HOME/agents///keystore.json + +Agents that are not installed, such as platform services and stand-alone agents, store their key stores here: + +.. code-block:: bash + + $VOLTTRON_HOME/keystores//keystore.json + + +Generating a Key Store +---------------------- + +Agents automatically retrieve keys from their key store unless both the ``publickey`` and ``secretkey`` parameters are +specified when the agent is initialized. If an agent's key store does not exist it will automatically be generated upon +access. + +Users can generate a key pair by running the following command: + +.. code-block:: bash + + vctl auth keypair diff --git a/docs/source/platform-features/security/known-hosts-file.rst b/docs/source/platform-features/security/known-hosts-file.rst new file mode 100644 index 0000000000..76ed433696 --- /dev/null +++ b/docs/source/platform-features/security/known-hosts-file.rst @@ -0,0 +1,72 @@ +.. _Known-Hosts-File: + +================ +Known Hosts File +================ + +Before an agent can connect to a VOLTTRON platform that agent must know the platform's :term:`VIP address` and public +key (known as the `server key`). It can be tedious to manually keep track of server keys and match them with their +corresponding addresses. + +The purpose of the known-hosts file is to save a mapping of platform addresses to server keys. This way the user only +has to specify a server key one time. + + +Saving a Server Key +------------------- + +Suppose a user wants to connect to a platform at ``192.168.0.42:22916``, and the platform's public key is +``uhjbCUm3kT5QWj5Py9w0XZ7c1p6EP8pdo4Hq4dNEIiQ``. To save this address-to-server-key association, the user can run: + +.. code-block:: bash + + volttron-ctl auth add-known-host --host 192.168.0.42:22916 --serverkey uhjbCUm3kT5QWj5Py9w0XZ7c1p6EP8pdo4Hq4dNEIiQ + +Now agents on this system will automatically use the correct server key when connecting to the platform at +``192.168.0.42:22916``. + + +Server Key for Local Platforms +------------------------------ + +When a platform starts it automatically adds its public key to the known-hosts file. Thus agents connecting to the +local VOLTTRON platform (on the same system and using the same ``$VOLTTRON_HOME``) will automatically be able to +retrieve the platform's public key. + + +Know-Host-File Details +---------------------- + +.. note:: + + The following details regarding the known-hosts file are subject to change. These notes are primarily for + developers, but the may be helpful if troubleshooting an issue. **The known-hosts file should not be edited + directly.** + + +File Location +^^^^^^^^^^^^^ + +The known-hosts-file is stored at ``$VOLTTRON_HOME/known_hosts``. + + +File Contents +^^^^^^^^^^^^^ + +Here are the contents of an example known-hosts file: + +.. code:: JSON + + { + "@": "FSG7LHhy3v8tdNz3gK35G6-oxUcyln54pYRKu5fBJzU", + "127.0.0.1:22916": "FSG7LHhy3v8tdNz3gK35G6-oxUcyln54pYRKu5fBJzU", + "127.0.0.2:22916": "FSG7LHhy3v8tdNz3gK35G6-oxUcyln54pYRKu5fBJzU", + "127.0.0.1:12345": "FSG7LHhy3v8tdNz3gK35G6-oxUcyln54pYRKu5fBJzU", + "192.168.0.42:22916": "uhjbCUm3kT5QWj5Py9w0XZ7c1p6EP8pdo4Hq4dNEIiQ" + } + +The first four entries are for the local platform. (They were automatically added when the platform started.) The first +entry with the ``@`` key is for IPC connections, and the entries with the ``127.0.0.*`` keys are for local TCP +connections. Note that a single VOLTTRON platform can bind to multiple TCP addresses, and each address will be +automatically added to the known-hosts file. The last entry is for a remote VOLTTRON platform. (It was added in the +`Saving a Server Key`_ section.) diff --git a/docs/source/platform-features/security/running-agent-as-user.rst b/docs/source/platform-features/security/running-agent-as-user.rst new file mode 100644 index 0000000000..24be716ab0 --- /dev/null +++ b/docs/source/platform-features/security/running-agent-as-user.rst @@ -0,0 +1,118 @@ +.. _Running-Agents-as-Unix-User: + +============================ +Running Agents as Unix Users +============================ + +This VOLTTRON feature will cause the platform to create a new, unique Unix user(agent users) on the host machine for +each agent installed on the platform. This user will have restricted permissions for the file system, and will be used +to run the agent process. + +.. warning:: + + The Unix user starting the VOLTTRON platform will be given limited sudo access to create and delete agent users. + +Since this feature requires system level changes (e.g. sudo access, user creation, file permission changes), the initial +step needs to be run as root or user with `sudo` access. This can be a user other than Unix user used to run the +VOLTTRON platform. + +All files and folder created by the VOLTTRON process in this mode would not have any access to others by default. +Permission for Unix group others would be provided to specific files and folder based on VOLTTRON process requirement. + +It is recommended that you use a new :term:`VOLTTRON_HOME` to run VOLTTRON in secure mode. Converting a existing +VOLTTRON instance to secure mode is also possible but would involve some manual changes. Please see the section +`Porting existing volttron home to secure mode`_. + +.. note:: + + VOLTTRON has to be bootstrapped as prerequisite to running agents as unique users. + + +Setup agents to run using unique users +====================================== + +1. **This feature requires acl to be installed.** + + Make sure the `acl` library is installed. If you are running on a Docker image `acl` might not be installed by + default. + + .. code-block:: bash + + apt-get install acl + +2. Agents now run as a user different from VOLTTRON platform user. Agent users should have `read` and `execute` + permissions to all directories in the path to the Python executable used by VOLTTRON. For example, if VOLTTRON is + using a virtual environment, then agent users should have *read* permissions to `/bin/python` and *read + and execute* permission to all the directories in the path `/bin`. This can be achieved by running: + + .. code-block:: bash + + chmod -R o+rx /bin + +3. **Run scripts/secure_user_permissions.sh as root or using sudo** + + This script *MUST* be run as root or using `sudo`. This script gives the VOLTTRON platform user limited `sudo` + access to create a new Unix user for each agent. All users created will be of the format `volttron_`. + + This script prompts for: + + a. **volttron platform user** - Unix user who would be running the VOLTTRON platform. This should be an existing + Unix user. On a development machine this could be the Unix user you logged in as to check out VOLTTRON source + + b. **VOLTTRON_HOME directory** - The absolute path of the volttron home directory. + + c. **Volttron instance name if VOLTTRON_HOME/config does not exist** - + + If the `VOLTTRON_HOME/config` file exists then instance name is obtained from that config file. If not, the user + will be prompted for an instance name. `volttron_` *MUST* be a 23 characters or shorter containing + only characters valid as Unix user names. + + This script will create necessary entries in `/etc/sudoers.d/volttron` to allow the VOLTTRON platform user to create + and delete agent users, the VOLTTRON agent group, and run any non-sudo command as the agent users. + + This script will also create `VOLTTRON_HOME` and the config file if given a new VOLTTRON home directory when + prompted. + +4. **Continue with VOLTTRON bootstrap and setup as normal** - point to the `VOLTTRON_HOME` that you provided in step 2. + +5. **On agent install (or agent start for existing agents)** - a unique agent user(Unix user) is created and the agent + is started as this user. The agent user name is recorded in `USER_ID` file under the agent install directory + (`VOLTTRON_HOME/agents//USER_ID`). Subsequent agent restarts will read the content of the `USER_ID` file + and start the agent process as that user. + +6. **On agent uninstall** - The agent user is deleted and the agent install directory is deleted. + + +Creating new Agents +=================== + +In this secure mode, agents will only have read write access to the agent-data directory under the agent install +directory - `VOLTTRON_HOME/agents///.agent-data`. Attempting to write in any other +folder under `VOLTTRON_HOME` **will result in permission errors**. + + +Changes to existing agents in secure mode +========================================= + +Due to the above change, **SQL historian has been modified to create its database by default under its agent-data +directory** if no path is given in the config file. If providing a path to the database in the config file, please +provide a directory where agent will have write access. This can be an external directory for which agent user +(`recorded in VOLTTRON_HOME/agents//USER_ID`) has *read, write, and execute* access. + + +Porting existing VOLTTRON home to secure mode +============================================= + +When running `scripts/secure_users_permissions.sh` you will be prompted for a `VOLTTRON_HOME` directory. If this +directory exists and contains a volttron config file, the script will update the file locations and permissions of +existing VOLTTRON files including installed directories. However this step has the following limitations: + +#. **You will NOT be able to revert to insecure mode once the changes are done.** - Once setup is complete, changing the + config file manually to make parameter `secure-agent-users` to `False`, may result inconsistent VOLTTRON behavior +#. The VOLTTRON process and all agents have to be restarted to take effect +#. **Agents can only to write to its own agent-data dir.** - If your agents writes to any directory outside + `$VOLTTRON_HOME/agents///agent-name.agent-data` move existing files and update the agent + configuration such that the agent writes to the `agent-name.agent-data` dir. For example, if you have a + `SQLHistorian` which writes a `.sqlite` file to a subdirectory under `VOLTTRON_HOME` that is not + `$VOLTTRON_HOME/agents///agent-name.agent-data` this needs to be manually updated. + diff --git a/docs/source/platform-features/security/volttron-security.rst b/docs/source/platform-features/security/volttron-security.rst new file mode 100644 index 0000000000..a9175536f8 --- /dev/null +++ b/docs/source/platform-features/security/volttron-security.rst @@ -0,0 +1,32 @@ +.. _VOLTTRON-Security: + +================= +Platform Security +================= + +There are various security-related topics throughout VOLTTRON's documentation. This is a quick roadmap for finding +security documentation. + +A core component of VOLTTRON is its :ref:`message bus`. The security of this message bus is +crucial to the entire system. The :ref:`VOLTTRON Interconnect Protocol` provides communication over the +message bus. + +VIP was built with security in mind from the ground up. VIP uses encrypted channels and enforces agent +:ref:`authentication` by default for all network communication. VIP's +:ref:`authorization` mechanism allows system administrators to limit agent capabilities with fine +granularity. + +Even with these security mechanisms built into VOLTTRON, it is important for system administrators to +:ref:`harden VOLTTRON's underlying OS `. + +The VOLTTRON team has engaged with PNNL's Secure Software Central team to create a threat profile document. You can +read about the threat assessment findings and how the VOLTTRON team is addressing them here: `SSC Threat Profile +`_ + +Additional documentation related to VIP authentication and authorization is available here: + +.. toctree:: + + key-stores + known-hosts-file + running-agent-as-user diff --git a/docs/source/services.rst b/docs/source/services.rst deleted file mode 100644 index ce633fc475..0000000000 --- a/docs/source/services.rst +++ /dev/null @@ -1,9 +0,0 @@ -Services -======== - -.. toctree:: - :maxdepth: 4 - :glob: - - apidocs/services/*/modules - diff --git a/docs/source/setup/RabbitMQ/Backward_Compatibility.rst b/docs/source/setup/RabbitMQ/Backward_Compatibility.rst deleted file mode 100644 index 86e493d6b1..0000000000 --- a/docs/source/setup/RabbitMQ/Backward_Compatibility.rst +++ /dev/null @@ -1,257 +0,0 @@ -.. RMQ-Backward-Compatability: - -Backward Compatibility With ZeroMQ Message Based VOLTTRON -========================================================= - -RabbitMQ VOLTTRON supports backward compatibility with ZeroMQ based VOLTTRON. RabbitMQ VOLTTRON has a ZeroMQ router running internally to accept incoming ZeroMQ connections and to route ZeroMQ messages coming in/going out of it's instance. There are multiple ways for an instance with a RabbitMQ message bus, and an instance with ZeroMQ message bus to connect with each other. For example, an agent from one instance can directly connect to the remote instance to publish or pull data from it. Another way is through multi-platform communication, where the VOLTTRON platform is responsible for connecting to the remote instance. For more information on multi-platform communication, see https://volttron.readthedocs.io/en/develop/core_services/multiplatform/Multiplatform-Communication.html. - - -Agent Connecting Directly to Remote Instance --------------------------------------------- - -The following steps are to demonstrate how RabbitMQ VOLTTRON is backward compatible with ZeroMQ VOLTTRON, using the Forward Historian as an example. This example shows how to forward messages from local ZeroMQ based VOLTTRON to remote RabbitMQ based VOLTTRON instance. Similar steps can be followed if you needed to move messages from local RabbiMQ based VOLTTRON to ZeroMQ based VOLTTRON. - -1. In order for RabbitMQ and ZeroMQ VOLTTRONs to communicate with each other, one needs two instances of VOLTTRON_HOME on the same VM. To create a new instance of VOLTTRON_HOME use the command. - - ``export VOLTTRON_HOME=~/.new_volttron_home`` - - It is recommended that one uses multiple terminals to keep track of both instances. - -2. Start VOLTTRON on both instances. Note: since the start-volttron script uses the volttron.log by default, the second instance will need be started manually in the background, using a separate log. For example: - - ``volttron -vv -l volttron-two.log&`` - -3. Modify the configuration file for both instances. The config file is located at ``$VOLTTRON_HOME/config`` - - For RabbitMQ VOLTTRON, the config file should look similar to: - - .. code-block:: bash - - [volttron] - message-bus = rmq - vip-address = tcp://127.0.0.1:22916 - instance-name = volttron_rmq - - The ZeroMQ config file should look similar, with all references to RMQ being replaced with ZMQ, and a different vip-address - (e.g. tcp://127.0.0.2:22916). - -4. On the instance running ZeroMQ: - - a. Install the Forward Historian agent using an upgrade script similar to: - - .. code-block:: python - - #!/bin/bash - export CONFIG=$(mktemp /tmp/abc-script.XXXXXX) - cat > $CONFIG < $CONFIG <": {"vip-address":"tcp://:", - "instance-name":"", - "serverkey":"" - }, - "": {"vip-address":"tcp://:", - "instance-name":"", - "serverkey":"" - }, - "": {"vip-address":"tcp://:", - "instance-name":"", - "serverkey":"" - }, - ...... - } - - -Additionally for different combinations of multi-bus, multi-platform setup, please refer to :ref:`Multi-Platform Multi-Bus Walk-through <_Multi_Platform_Walkthrough>` diff --git a/docs/source/setup/RabbitMQ/Monitoring_RMQ.rst b/docs/source/setup/RabbitMQ/Monitoring_RMQ.rst deleted file mode 100644 index 81957ff694..0000000000 --- a/docs/source/setup/RabbitMQ/Monitoring_RMQ.rst +++ /dev/null @@ -1,50 +0,0 @@ -.. _Monitoring-RMQ: - -Monitoring and Controlling RabbitMQ -=================================== - -Some of the important native RabbitMQ control and management commands are now -integrated with "volttron-ctl" utility. Using volttron-ctl rabbitmq management -utility, we can control and monitor the status of RabbitMQ message bus. - -.. code-block:: bash - - vctl rabbitmq --help - usage: vctl command [OPTIONS] ... rabbitmq [-h] [-c FILE] [--debug] - [-t SECS] - [--msgdebug MSGDEBUG] - [--vip-address ZMQADDR] - ... - subcommands: - - add-vhost add a new virtual host - add-user Add a new user. User will have admin privileges - i.e,configure, read and write - add-exchange add a new exchange - add-queue add a new queue - list-vhosts List virtual hosts - list-users List users - list-user-properties - List users - list-exchanges add a new user - list-exchange-properties - list exchanges with properties - list-queues list all queues - list-queue-properties - list queues with properties - list-bindings list all bindings with exchange - list-federation-parameters - list all federation parameters - list-shovel-parameters - list all shovel parameters - list-policies list all policies - remove-vhosts Remove virtual host/s - remove-users Remove virtual user/s - remove-exchanges Remove exchange/s - remove-queues Remove queue/s - remove-federation-parameters - Remove federation parameter - remove-shovel-parameters - Remove shovel parameter - remove-policies Remove policy - diff --git a/docs/source/setup/RabbitMQ/RMQ_Multiplatform_Setup.rst b/docs/source/setup/RabbitMQ/RMQ_Multiplatform_Setup.rst deleted file mode 100644 index 9a8f0a469b..0000000000 --- a/docs/source/setup/RabbitMQ/RMQ_Multiplatform_Setup.rst +++ /dev/null @@ -1,25 +0,0 @@ -.. _RMQ-Multi-Platform-Setup: - -Multi-Platform Deployment With RabbitMQ Message bus -=================================================== - -In ZeroMQ based VOLTTRON, if multiple instances needed to be connected together -and be able to send or receive messages to/from remote instances we would do it -in few different ways. - -1. Write an agent that would connect to remote instance directly and publish/subscribe to messages or - perform RPC communication directly. This is described in - :ref:`Agent connection to remote volttron instance ` - - -2. Use special agents such as forwarder/data puller agents to forward/receive - messages to/from remote instances. This can be achieved using RabbitMQ's shovel plugin and is described at - :ref:`Using Shovel Plug-in` - - -3. Configure vip address of all remote instances that an instance has to connect to - in it's $VOLTTRON_HOME/external_discovery.json and let the router module in each instance - manage the connection and take care of the message routing for us. - This is the most seamless way to do multi-platform communication. This can be achieved using RabbitMQ's federation - plugin. Setup for this is described at :ref:`Using Federation Plug-in` - diff --git a/docs/source/setup/RabbitMQ/Troubleshooting_RMQ.rst b/docs/source/setup/RabbitMQ/Troubleshooting_RMQ.rst deleted file mode 100644 index dc21a8afea..0000000000 --- a/docs/source/setup/RabbitMQ/Troubleshooting_RMQ.rst +++ /dev/null @@ -1,93 +0,0 @@ -.. _Troubleshooting-RMQ: - - -RabbitMQ Trouble Shooting -========================= - -Check the status of the federation connection ----------------------------------------------- - - .. code-block:: bash - - $RABBITMQ_HOME/sbin/rabbitmqctl eval 'rabbit_federation_status:status().' - -If everything is properly configured, then the status is set to "running". If not look for the error status. Some of the typical errors are, - -a. "failed_to_connect_using_provided_uris" - Check if RabbitMQ user is created in downstream server node. Refer to step 3b of federation setup - - -b. "unknown ca" - Check if the root CAs are copied to all the nodes correctly. Refer to step 2 of federation setup - - -c. "no_suitable_auth_mechanism" - Check if the AMPQ/S ports are correctly configured. - - -Check the status of the shovel connection ------------------------------------------- - - .. code-block:: bash - - RABBITMQ_HOME/sbin/rabbitmqctl eval 'rabbit_shovel_status:status().' - -If everything is properly configured, then the status is set to "running". -If not look for the error status. Some of the typical errors are, - -a. "failed_to_connect_using_provided_uris" - Check if RabbitMQ user is created in subscriber node. Refer to step 3 b of shovel setup - -b. "unknown ca" - Check if the root CAs are copied to remote servers correctly. Refer to step 2 of shovel setup - -c. "no_suitable_auth_mechanism" - Check if the AMPQ/S ports are correctly configured. - - -Check the RabbitMQ logs for any errors ---------------------------------------- - - .. code-block:: bash - - tail -f /rabbitmq.log - - - -Rabbitmq startup hangs ----------------------- - -a. Check for errors in rabbitmq log. There is a rabbitmq.log file in your - volttron source directory that is a symbolic link to the rabbitmq server - logs. - -b. Check for errors in syslog (/var/log/syslog or /var/log/messages) - -c. If there are no errors in either of the logs, stop rabbitmq and - starting rabbitmq server in foreground and see if there are any errors - written on the console. Once you find the error you can kill the - process by entering Ctl+C, fix the error and start rabbitmq again using - ./start-rabbitmq from volttron source directory. - - .. code-block:: bash - - ./stop-volttron - ./stop-rabbitmq - @RABBITMQ_HOME/sbin/rabbitmq-server - - -SSL trouble shooting --------------------- -There are few things that are essential for SSL certificates to work right. - -a. Please use a unique common-name for CA certificate for each volttroninstance. This is configured under certificate-data - in the rabbitmq_config.yml or if no yml file is used while configuring a volttron single instance - (using vcfg --rabbitmq single). Certificate generated for agent will automatically get agent's vip identity as the - certificate's common-name - -b. host name in ssl certificate should match hostname used to access the server. For example, if the fully qualified domain - name was configured in the certificate-data, you should use the fully qualified domain name to access - rabbitmq's management url. - -c. Check if your system time is correct especially if you are running virtual machines. If the system clock is not right, it could lead to ssl certificate errors - -DataMover troubleshooting -------------------------- - -If output from volttron.log is not as expected check for ``{'alert_key': 'historian_not_publishing'}`` in the callee node's volttron.log. Most likely cause is the historian is not running properly or credentials between caller and callee nodes was not set properly. - - diff --git a/docs/source/setup/RabbitMQ/federation_plugin.rst b/docs/source/setup/RabbitMQ/federation_plugin.rst deleted file mode 100644 index 3d8480cede..0000000000 --- a/docs/source/setup/RabbitMQ/federation_plugin.rst +++ /dev/null @@ -1,144 +0,0 @@ -.. _federation-plugin: - -RabbitMQ Multi-Platform Deployment Using Federation Plugin -========================================================== - -Federation pluggin allows us to send and receive messages to/from remote instances with -few simple connection settings. Once a federation link is established to remote instance, -the messages published on the remote instance become available to local instance as if it -were published on the local instance. Before, we illustrate the steps to setup a federation -link, let us start by defining the concept of upstream and downstream server. - -**Upstream Server** - The node that is publishing some message of interest - -**DownStream Server** - The node that wants to receive messages from the upstream server - -A federation link needs to be established from downstream server to the upstream server. The -data flows in single direction from upstream server to downstream server. For bi-directional -data flow we would need to create federation links on both the nodes. - -1. Setup two VOLTTRON instances using the instructions at :ref:`RMQ Setup`. **Please note that each instance should have a unique instance name and should be running on machine/VM that has a unique host name.** - -2. In a multi platform setup that need to communicate with each other with RabbitMQ over SSL, each VOLTTRON instance should should trust the ROOT CA of the other instance(RabbitMQ root ca) - - a. Transfer (scp/sftp/similar) voltttron_home/certificates/certs/-root-ca.crt to a temporary - location on the other volttron instance machine. For example, if you have two instance v1 and v2, - scp v1's v1-root-ca.crt to v2 and v2-root-ca.crt to v1. - - Note: If using VMs, in order to scp files between VM openssh should be installed and running. - - b. Append the contents of the transferred root ca to the instance's trusted-cas.crt file. Do this on both the instances. Now both - the instances -trusted-cas.crt will have two certificates. - - For example: - - On v1: - cat /tmp/v2-root-ca.crt >> VOLTTRON_HOME/certificates/certs/v1-trusted-cas.crt - - On v2: - cat /tmp/v1-root-ca.crt >> VOLTTRON_HOME/certificates/certs/v2-trusted-cas.crt - -3. Stop volttron, stop rabbitmq server and start volttron on both the -instances. This is required only when you update the root certificate and not -required when you add a new shovel/federation between the same hosts - -.. code-block:: bash - - ./stop-volttron - ./stop-rabbitmq - ./start-volttron - -4. Identify upstream servers (publisher nodes) and downstream servers -(collector nodes). To create a RabbitMQ federation, we have to configure -upstream servers on the downstream server and make the VOLTTRON exchange -"federated". - - a. On the downstream server (collector node) - - .. code-block:: bash - - vcfg --rabbitmq federation [optional path to rabbitmq_federation_config.yml - containing the details of the upstream hostname, port and vhost. - - - Example configuration for federation is available - in examples/configurations/rabbitmq/rabbitmq_federation_config.yml] - - - If no config file is provided, the script will prompt for - hostname (or IP address), port, and vhost of each upstream node you - would like to add. Hostname provided should match the hostname in the - SSL certificate of the upstream server. For bi-directional data flow, - we will have to run the same script on both the nodes. - - b. Create a user in the upstream server(publisher) with - username= (i.e. (instance-name)-admin) and - provide it access to the virtual host of the upstream RabbitMQ server. Run - the below command in the upstream server - - .. code-block:: bash - - vctl rabbitmq add-user - Do you want to set READ permission [Y/n] - Do you want to set WRITE permission [Y/n] - Do you want to set CONFIGURE permission [Y/n] - -5. Test the federation setup. - - a. On the downstream server run a listener agent which subscribes to messages from all platforms - - - Open the file examples/ListenerAgent/listener/agent.py. Search for @PubSub.subscribe('pubsub', '') and replace that line with @PubSub.subscribe('pubsub', 'devices', all_platforms=True) - - updgrade the listener - - .. code-block:: bash - - scripts/core/upgrade-listener - - - b. Install master driver, configure fake device on upstream server and start volttron and master driver. vcfg --agent master_driver command can install master driver and setup a fake device. - - .. code-block:: bash - - ./stop-volttron - vcfg --agent master_driver - ./start-volttron - vctl start --tag master_driver - - - c. Verify listener agent in downstream VOLTTRON instance is able to receive the messages. downstream volttron instance's volttron.log should display device data scrapped by master driver agent in upstream volttron instance - -6. Open ports and https service if needed - On Redhat based systems ports used by RabbitMQ (defaults to 5671, 15671 for - SSL, 5672 and 15672 otherwise) might not be open by default. Please - contact system administrator to get ports opened on the downstream server. - - Following are commands used on centos 7. - - .. code-block:: bash - - sudo firewall-cmd --zone=public --add-port=15671/tcp --permanent - sudo firewall-cmd --zone=public --add-port=5671/tcp --permanent - sudo firewall-cmd --reload - -7. How to remove federation link - - a. Using the management web interface - - Log into management web interface using downstream server's admin username. - Navigate to admin tab and then to federation management page. The status of the - upstream link will be displayed on the page. Click on the upstream link name and - delete it. - - b. Using "vctl" command on the upstream server. - - .. code-block:: bash - - vctl rabbitmq list-federation-parameters - NAME URI - upstream-volttron2-rabbit-2 amqps://rabbit-2:5671/volttron2?cacertfile=/home/nidd494/.volttron1/certificates/certs/volttron1-root-ca.crt&certfile=/home/nidd494/.volttron1/certificates/certs/volttron1-admin.crt&keyfile=/home/nidd494/.volttron1/certificates/private/volttron1-admin.pem&verify=verify_peer&fail_if_no_peer_cert=true&auth_mechanism=external&server_name_indication=rabbit-2 - - Grab the upstream link name and run the below command to remove it. - - .. code-block:: bash - - vctl rabbitmq remove-federation-parameters upstream-volttron2-rabbit-2 diff --git a/docs/source/setup/RabbitMQ/index.rst b/docs/source/setup/RabbitMQ/index.rst deleted file mode 100644 index fcee383e09..0000000000 --- a/docs/source/setup/RabbitMQ/index.rst +++ /dev/null @@ -1,10 +0,0 @@ -.. _RabbitMQ-Resources: - -========================== -Using RabbitMQ message bus -========================== -.. toctree:: - :glob: - :maxdepth: 1 - - * diff --git a/docs/source/setup/RabbitMQ/shovel_plugin.rst b/docs/source/setup/RabbitMQ/shovel_plugin.rst deleted file mode 100644 index 4a87022212..0000000000 --- a/docs/source/setup/RabbitMQ/shovel_plugin.rst +++ /dev/null @@ -1,386 +0,0 @@ -.. _shovel-plugin: - -RabbitMQ Multi-Platform Deployment Using Shovel Plugin -====================================================== - -In RabbitMQ based VOLTTRON, forwarder and data mover agents will be replaced by shovels -to send or receive remote pubsub messages. -Shovel behaves like a well written client application that connects to its source -( can be local or remote ) and destination ( can be local or remote instance ), -reads and writes messages, and copes with connection failures. In case of shovel, apart -from configuring the hostname, port and virtual host of the remote instance, we will -also have to provide list of topics that we want to forward to remote instance. Shovels -can also be used for remote RPC communication in which case we would have to create shovel -in both the instances, one to send the RPC request and other to send the response back. - -Pubsub Communication -~~~~~~~~~~~~~~~~~~~~ - -1. Setup two VOLTTRON instances using the steps described in installation section. -Please note that each instance should have a unique instance name. - -2. In a multi platform setup that need to communicate with each other with - RabbitMQ over SSL, each VOLTTRON instance should should trust the ROOT CA of - the other instance(RabbitMQ root ca) - - a. Transfer (scp/sftp/similar) - voltttron_home/certificates/certs/-root-ca.crt to a temporary - location on the other volttron instance machine. For example, if you have two - instance v1 and v2, scp v1's v1-root-ca.crt to v2 and - v2-root-ca.crt to v1. - - b. Append the contents of the transferred root ca to the instance's root ca. - - For example: - - On v1 - - cat /tmp/v2-root-ca.crt >> VOLTTRON_HOME/certificates/v1-root-ca.crt - - On v2 - - cat /tmp/v1-root-ca.crt >> VOLTTRON_HOME/certificates/v2-root-ca.crt - -3. Identify the instance that is going to act as the "publisher" instance. Suppose - "v1" instance is the "publisher" instance and "v2" instance is the "subscriber" - instance. Then we need to create a shovel on "v1" to forward messages matching - certain topics to remote instance "v2". - - a. On the publisher node, - - .. code-block:: bash - - vcfg --rabbitmq shovel [optional path to rabbitmq_shovel_config.yml] - - rabbitmq_shovel_config.yml should contain the details of the remote hostname, port, vhost - and list of topics to forward. Example configuration for shovel is available - in examples/configurations/rabbitmq/rabbitmq_shovel_config.yml - - - For this example, let's set the topic to "devices" - - If no config file is provided, the script will prompt for - hostname (or IP address), port, vhost and list of topics for each - remote instance you would like to add. For - bi-directional data flow, we will have to run the same script on both the nodes. - - b. Create a user in the subscriber node with username set to publisher instance's - agent name ( (instance-name)-PublisherAgent ) and allow the shovel access to - the virtual host of the subscriber node. - - .. code-block:: bash - - cd $RABBITMQ_HOME - vctl add-user - -4. Test the shovel setup. - - a. Start VOLTTRON on publisher and subscriber nodes. - - b. On the publisher node, start a master driver agent that publishes messages related to - a fake device. ( Easiest way is to run volttron-cfg command and follow the steps ) - - c. On the subscriber node, run a listener agent which subscribes to messages - from all platforms (set @PubSub.subscribe('pubsub', 'devices', all_platforms=True) - instead of @PubSub.subscribe('pubsub', '') ) - - d. Verify listener agent in subscriber node is able to receive the messages - matching "devices" topic. - -5. How to remove the shovel setup. - - a. Using the management web interface - - Log into management web interface using publisher instance's admin username. - Navigate to admin tab and then to shovel management page. The status of the - shovel will be displayed on the page. Click on the shovel name and delete the shovel. - - b. Using "volttron-ctl" command on the publisher node. - - .. code-block:: bash - - vctl rabbitmq list-shovel-parameters - NAME SOURCE ADDRESS DESTINATION ADDRESS BINDING KEY - shovel-rabbit-3-devices amqps://rabbit-1:5671/volttron1?cacertfile=/home/nidd494/.volttron1/certificates/certs/volttron1-root-ca.crt&certfile=/home/nidd494/.volttron1/certificates/certs/volttron1-admin.crt&keyfile=/home/nidd494/.volttron1/certificates/private/volttron1-admin.pem&verify=verify_peer&fail_if_no_peer_cert=true&auth_mechanism=external&server_name_indication=rabbit-1 amqps://rabbit-3:5671/volttron3?cacertfile=/home/nidd494/.volttron1/certificates/certs/volttron1-root-ca.crt&certfile=/home/nidd494/.volttron1/certificates/certs/volttron1-admin.crt&keyfile=/home/nidd494/.volttron1/certificates/private/volttron1-admin.pem&verify=verify_peer&fail_if_no_peer_cert=true&auth_mechanism=external&server_name_indication=rabbit-3 __pubsub__.volttron1.devices.# - - - Grab the shovel name and run the below command to remove it. - - .. code-block:: bash - - vctl rabbitmq remove-shovel-parameters shovel-rabbit-3-devices - -RPC Communication -~~~~~~~~~~~~~~~~~ -Following are the steps to create Shovel for multi-platform RPC communication. - -1. Setup two VOLTTRON instances using the steps described in installation section. - Please note that each instance should have a unique instance name. - -2. In a multi platform setup that need to communicate with each other with - RabbitMQ over SSL, each VOLTTRON instance should should trust the ROOT CA of - the other instance(RabbitMQ root ca) - - a. Transfer (scp/sftp/similar) - voltttron_home/certificates/certs/-root-ca.crt to a temporary - location on the other volttron instance machine. For example, if you have two - instance v1 and v2, scp v1's v1-root-ca.crt to v2 and - v2-root-ca.crt to v1. - - b. Append the contents of the transferred root ca to the instance's root ca. - For example: - - On v1 - - cat /tmp/v2-root-ca.crt >> VOLTTRON_HOME/certificates/v1-root-ca.crt - - On v2 - - cat /tmp/v1-root-ca.crt >> VOLTTRON_HOME/certificates/v2-root-ca.crt - -3. Typically RPC communication is 2 way communication so we will to setup shovel in both the VOLTTRON instances. In RPC calls - there are two instances of shovel. One serving as the caller (makes RPC request) and the other acting as a callee (replies - to RPC request). Identify the instance is the "caller" and which is the "callee." Suppose "v1" instance is the "caller" - instance and "v2" instance is the "callee" instance. - - a. On both the caller and callee nodes, shovel instances need to be created. In this example, v1’s shovel would forward the - RPC call request from an agent on v1 to v2 and similarly v2’s shovel will forward the RPC reply from agent on v2 - back to v1. - - .. code-block:: bash - - vcfg --rabbitmq shovel [optional path to rabbitmq_shovel_config.yml] - - rabbitmq_shovel_config.yml should contain the details of the - **remote** hostname, port, vhost, volttron instance name (so in v1's yml file parameters would point to v2 - and vice versa), and list of agent pair identities (local caller, remote callee). Example configuration for shovel - is available in examples/configurations/rabbitmq/rabbitmq_shovel_config.yml. - - For this example, let's say that we are using the schedule-example and acutator agents. - - For v1, the agent pair identities would be: - - - [Scheduler, platform.actuator] - - For v2, they would be: - - - [platform.actuator, Scheduler] - - Indicating the flow from local agent to remote agent. - - b. On the caller node create a user with username set to callee instance's agent name ( (instance-name)-RPCCallee ) and - allow the shovel access to the virtual host of the callee node. Similarly, on the callee node, create a user with - username set to caller instance's agent name ( (instance-name)-RPCCaller ) and allow the shovel access to the virtual - host of the caller node. - - .. code-block:: bash - - cd $RABBITMQ_HOME - vctl add-user - - -4. Test the shovel setup - - a. **On caller node**: - - Make necessary changes to RPC methods of caller agent. - - For this example, in volttron/examples/SchedulerExample/schedule_example/agent.py: - - * Search for 'campus/building/unit' in publish_schedule method. Replace with - 'devices/fake-campus/fake-building/fake-device' - * Search for ['campus/building/unit3',start,end] in the use_rpc method, replace with: - - msg = ['fake-campus/fake-building/fake-device',start,end]. - * Add: kwargs = {"external_platform": 'v2'} on the line below - * On the result = self.vip.rpc.call method below, replace "msg).get(timeout=10)" with: - - .. code-block:: bash - - msg, **kwargs).get(timeout=10), - - * In the second try clause of the use_rpc method: - * Replace result['result'] with result[0]['result'] - * Add kwargs = {"external_platform": 'v2'} as the first line of the if statement - * Replace 'campus/building/unit3/some_point' with 'fake-campus/fake-building/fake-device/PowerState' - * Below 'fake-campus/fake-building/fake-device/PowerState' add: 0, - * Replace - - .. code-block:: bash - - '0.0').get(timeout=10) with **kwargs).get(timeout=10) - - - Next, install an example scheduler agent and start it: - - .. code-block:: bash - - #!/bin/bash - python /home/username/volttron/scripts/install-agent.py -c /home/username/volttron/examples/SchedulerExample/schedule-example.agent -s examples/SchedulerExample --start --force -i Scheduler - - - b. **On the callee node:** - - - Run upgrade script to install actuator agent. - - .. code-block:: bash - - #!/bin/bash - python /home/username/volttron/scripts/install-agent.py -s services/core/ActuatorAgent --start --force -i platform.actuator - - - - Run the upgrade script to install the listener agent. - - .. code-block:: bash - - scripts/core/upgrade-listener - - - - - Install master driver, configure fake device on upstream callee and start volttron and master driver. - vcfg --agent master_driver command can install master driver and setup a fake device. - - .. code-block:: bash - - ./stop-volttron - vcfg --agent master_driver - ./start-volttron - vctl start --tag master_driver - - - - Start actuator agent and listener agents. - - The output for the callee node with a successful shovel run should look similar to: - - .. code-block:: bash - - 2018-12-19 15:38:00,009 (listeneragent-3.2 13039) listener.agent INFO: Peer: pubsub, Sender: platform.driver:, Bus: , Topic: devices/fake-campus/fake-building/fake-device/all, Headers: {'Date': '2018-12-19T20:38:00.001684+00:00', 'TimeStamp': '2018-12-19T20:38:00.001684+00:00', 'min_compatible_version': '5.0', 'max_compatible_version': u'', 'SynchronizedTimeStamp': '2018-12-19T20:38:00.000000+00:00'}, Message: - [{'Heartbeat': True, 'PowerState': 0, 'ValveState': 0, 'temperature': 50.0}, - {'Heartbeat': {'type': 'integer', 'tz': 'US/Pacific', 'units': 'On/Off'}, - 'PowerState': {'type': 'integer', 'tz': 'US/Pacific', 'units': '1/0'}, - 'ValveState': {'type': 'integer', 'tz': 'US/Pacific', 'units': '1/0'}, - 'temperature': {'type': 'integer', - 'tz': 'US/Pacific', - 'units': 'Fahrenheit'}}] - - - -DataMover Communication -~~~~~~~~~~~~~~~~~~~~~~~ - -The DataMover historian running on one instance makes RPC call to platform historian running on remote -instance to store data on remote instance. Platform historian agent returns response back to DataMover -agent. For such a request-response behavior, shovels need to be created on both instances. - -1. Please ensure that preliminary steps for multi-platform communication are completed (namely, - steps 1-3 described above) . - -2. To setup a data mover to send messages from local instance (say v1) to remote instance (say v2) - and back, we would need to setup shovels on both instances. - - Example of RabbitMQ shovel configuration on v1 - - .. code-block:: json - - shovel: - # hostname of remote machine - rabbit-2: - port: 5671 - rpc: - # Remote instance name - v2: - # List of pair of agent identities (local caller, remote callee) - - [data.mover, platform.historian] - virtual-host: v1 - - This says that DataMover agent on v1 wants to make RPC call to platform historian on v2. - - .. code-block:: bash - - vcfg --rabbitmq shovel [optional path to rabbitmq_shovel_config.yml - - - Example of RabbitMQ shovel configuration on v2 - - .. code-block:: json - - shovel: - # hostname of remote machine - rabbit-1: - port: 5671 - rpc: - # Remote instance name - v1: - # List of pair of agent identities (local caller, remote callee) - - [platform.historian, data.mover] - virtual-host: v2 - - This says that Hplatform historian on v2 wants to make RPC call to DataMover agent on v1. - - a. On v1, run below command to setup a shovel from v1 to v2. - - .. code-block:: bash - - vcfg --rabbitmq shovel [optional path to rabbitmq_shovel_config.yml - - b. Create a user on v2 with username set to remote agent's username - ( for example, v1.data.mover i.e., .) and allow - the shovel access to the virtual host of v2. - - .. code-block:: bash - - cd $RABBITMQ_HOME - vctl add-user - - c. On v2, run below command to setup a shovel from v2 to v1 - - .. code-block:: bash - - vcfg --rabbitmq shovel [optional path to rabbitmq_shovel_config.yml - - d. Create a user on v1 with username set to remote agent's username - ( for example, v2.patform.historian i.e., .) and allow - the shovel access to the virtual host of the v1. - - .. code-block:: bash - - cd $RABBITMQ_HOME - vctl add-user - -3. Start Master driver agent on v1 - - .. code-block:: bash - - ./stop-volttron - vcfg --agent master_driver - ./start-volttron - vctl start --tag master_driver - -4. Install DataMover agent on v1. Contents of the install script can look like below. - - .. code-block:: bash - - #!/bin/bash - export CONFIG=$(mktemp /tmp/abc-script.XXXXXX) - cat > $CONFIG <=0.10,<0.11 - ... - + pip install --global-option --quiet --install-option --zmq=bundled --no-deps pyzmq>=14.3,<15 - ... - + pip install --global-option --quiet --editable ./lib/jsonrpc --editable . --requirement ./requirements.txt - ... - Successfully installed Smap-2.0.24c780d avro-1.7.7 configobj-5.0.6 ecdsa-0.13 flexible-jsonrpc - gevent-1.0.1 greenlet-0.4.5 monotonic-0.1 numpy-1.9.1 pandas-0.15.2 paramiko-1.15.2 - pycrypto-2.6.1 pymodbus-1.2.0 pyserial-2.7 python-dateutil-2.4.0 pytz-2014.10 requests-2.5.3 - simplejson-3.6.5 six-1.9.0 twisted-15.0.0 volttron-2.0 wheel-0.24.0 zope.interface-4.1.2 - - real 9m2.299s - user 7m51.790s - sys 0m14.450s - -Whew! The build took just over nine minutes on my nearly-4-year-old MacBook Pro running Arch Linux. In case you are wondering about my system's name, as seen in the bash prompt, *inamatus* is Latin for *unloved*. I'll leave it as an exercise for the user to determine why my system is unloved (hint: it has to do with a wonderful fruit with a bite missing from the side). - -Anyway, let's have another look at the pip download cache. - -.. code:: - - [volttron@inamatus volttron]$ find ~/.cache/pip -type f - /home/volttron/.cache/pip/http/9/a/b/2/1/9ab21efc4225c8eb9aa41d1c76abef2a53babcefa438a79fa4e981ce - /home/volttron/.cache/pip/http/9/2/6/7/2/92672ab99ac77960252018fbcb4f40984eef60ba5588229a729f18f5 - /home/volttron/.cache/pip/http/9/e/6/1/9/9e61964f51d8a05a20ecf21eef694877f28cb654a123ce1316ff77e5 - /home/volttron/.cache/pip/http/9/7/7/1/a/9771a6b64f3294ac335fdb8574cd3564e21c130924697381d72fd04d - /home/volttron/.cache/pip/http/a/a/7/e/8/aa7e8bc2af1068a43747b0f771b426b7dcf7708283ca3ce3d92a2afc - ... - /home/volttron/.cache/pip/http/8/f/9/0/d/8f90d7cf09a2b5380a319b0df8eed268be28d590b6b5f71598a3b56f - /home/volttron/.cache/pip/http/8/d/e/d/a/8deda849bcfd627b8587addf049f79bb333dd8fe1eae1d5053881039 - /home/volttron/.cache/pip/http/8/8/7/a/6/887a67fb460d57a10a50deef3658834b9ac01722244315227d334628 - /home/volttron/.cache/pip/http/5/5/4/e/2/554e2be8d96625aa74a4e0c4ee4a4b1ca10a442c2877bd3fff96e2a6 - /home/volttron/.cache/pip/http/1/d/c/8/3/1dc83c11a861a2bc20d9c0407b41089eba236796ba80c213511f1f74 - /home/volttron/.cache/pip/log/debug.log - -The output is truncated because it was long and boring. The important thing is that it now exists. Next let's remove the virtual environment and rebuild to see what effect the download cache has on our build time. - -.. code:: - - [volttron@inamatus volttron]$ rm -rf env - [volttron@inamatus volttron]$ time python3 bootstrap.py - ... - - real 8m35.387s - user 7m50.770s - sys 0m14.170s - -Notice that our CPU time was nearly the same, about 8 minutes (user + sys). So the remaining time was likely spent on I/O, which was reduced by about 30 seconds. We need something else to reduce CPU time. Enter ccache. - - -Better ------- - -What is ccache? According to the official ccache_ site, - - ccache is a compiler cache. It speeds up recompilation by caching the result of previous compilations and detecting when the same compilation is being done again. - -.. _ccache: https://ccache.samba.org/ - -Sounds like just the thing we need. ccache is already properly configured on my system, it just needs to be placed early in the ``PATH`` to be found before the official gcc compilers. - -.. code:: - - [volttron@inamatus volttron]$ which gcc - /usr/bin/gcc - [volttron@inamatus volttron]$ export PATH=/usr/lib/ccache/bin:$PATH - [volttron@inamatus volttron]$ which gcc - /usr/lib/ccache/bin/gcc - -Now to prove to ourselves that the cache will be filled during the next run, let's have a look at the cache status. - -.. code:: - - [volttron@inamatus volttron]$ ccache -s - cache directory /home/volttron/.ccache - primary config /home/volttron/.ccache/ccache.conf - secondary config (readonly) /etc/ccache.conf - cache hit (direct) 0 - cache hit (preprocessed) 0 - cache miss 0 - files in cache 0 - cache size 0.0 kB - max cache size 5.0 GB - -The cache is indeed empty. - -Nothing up my sleeve... Presto! - -.. code:: - - [volttron@inamatus volttron]$ rm -rf env - [volttron@inamatus volttron]$ time python3 bootstrap.py - ... - - real 6m3.496s - user 4m57.960s - sys 0m10.880s - -One might expect a ccache build to take slightly longer than the baseline on the first build within a single project. This build completed about two minutes faster. Let's look at the ccache status to discover why. - -.. code:: - - [volttron@inamatus volttron]$ ccache -s - cache directory /home/volttron/.ccache - primary config /home/volttron/.ccache/ccache.conf - secondary config (readonly) /etc/ccache.conf - cache hit (direct) 204 - cache hit (preprocessed) 23 - cache miss 633 - called for link 140 - called for preprocessing 95 - compile failed 1139 - preprocessor error 4 - bad compiler arguments 5 - autoconf compile/link 103 - no input file 19 - files in cache 1316 - cache size 26.1 MB - max cache size 5.0 GB - -Ah ha. There were a total of 227 cache hits, meaning that some of the files were identical across all the built packages and the cached version could be used rather than recompiling. Let's see how subsequent builds improve with few cache misses. - -.. code:: - - [volttron@inamatus volttron]$ rm -rf env - [volttron@inamatus volttron]$ time python3 bootstrap.py - ... - - real 3m15.811s - user 2m24.890s - sys 0m7.090s - -Wow! Now we're cooking with gas. Build times have been cut to nearly 1/3 of our baseline. This ccache status shows only 14 cache misses over our previous run: - -.. code:: - - [volttron@inamatus volttron]$ ccache -s - cache directory /home/volttron/.ccache - primary config /home/volttron/.ccache/ccache.conf - secondary config (readonly) /etc/ccache.conf - cache hit (direct) 1038 - cache hit (preprocessed) 35 - cache miss 647 - called for link 280 - called for preprocessing 190 - compile failed 2278 - preprocessor error 8 - bad compiler arguments 10 - autoconf compile/link 206 - no input file 38 - files in cache 1365 - cache size 35.0 MB - max cache size 5.0 GB - -So using ccache is a big win. Anyone compiling C or C++ on a Linux system should have ccache enabled. Wait, make that *must*. Go, now, and enable it on your Linux boxen. Or maybe finish reading this and then go do it. But do it! - -Best ----- - -Now you're thinking "how could it get any better," right? Well, it can. What if those compiled packages only needed to be rebuilt when a new version was required instead of every time they are installed. - -When pip installs a package, it downloads the source and executes the packages ``setup.py`` like so: ``python setup.py install``. The install command builds the package and installs it directly into the file system. What if we could package up the build results into an archive and just extract them to the file system when the package is installed. Enter **wheel**. - -pip supports the latest Python packaging format known as wheel. Typically this just means that it can install packages in the `wheel format`_. However, if the wheel_ package is installed, pip can also `build wheels`_ from source, executing ``python setup.py bdist_wheel``. By default, wheels are placed in the *wheelhouse* directory in the current working directory. But we can alter that location by setting an environment variable (read more on configuring pip here_). - -.. _wheel format: http://wheel.readthedocs.org/en/latest -.. _wheel: https://pypi.python.org/pypi/wheel -.. _build wheels: https://pip.pypa.io/en/latest/reference/pip_wheel.html - -.. code:: - - [volttron@inamatus volttron]$ export PIP_WHEEL_DIR=$HOME/.cache/pip/wheelhouse - -We also need to tell pip to look for the wheels, again using an environment variable. The directory needs to exist because while the wheel command will create the directory when creating the packages, pip may try to search the directory first. - -.. code:: - - [volttron@inamatus volttron]$ export PIP_FIND_LINKS=file://$PIP_WHEEL_DIR - [volttron@inamatus volttron]$ mkdir $PIP_WHEEL_DIR - -So to get this all working, bootstrapping now has to occur in three steps: install the virtual environment, build the wheels, and install the requirements. ``bootstrap.py`` takes options that control its behavior. The first pass requires the ``-o`` or ``--only-virtenv`` option to stop bootstrap after installing the virtual environment and prevent the update stage. - -.. code:: - - [volttron@inamatus volttron]$ rm -rf env - [volttron@inamatus volttron]$ time python3 bootstrap.py --only-virtenv - Creating virtual Python environment - Downloading virtualenv DOAP record - Downloading virtualenv 12.0.7 - New python executable in /home/volttron/volttron/env/bin/python2.7 - Also creating executable in /home/volttron/volttron/env/bin/python - Installing setuptools, pip...done. - - real 0m3.866s - user 0m1.480s - sys 0m0.230s - -The second step requires the ``-w`` or ``--wheel`` option to build the wheels. Because the virtual environment already exists, ``bootstrap.py`` must be called with the virtual environment Python, not the system Python. - -.. code:: - - [volttron@inamatus volttron]$ time env/bin/python bootstrap.py --wheel - Building required packages - + pip install --global-option --quiet wheel - ... - + pip wheel --global-option --quiet --build-option --zmq=bundled --no-deps pyzmq>=14.3,<15 - ... - + pip wheel --global-option --quiet --editable ./lib/jsonrpc --editable . --requirement ./requirements.txt - ... - Destination directory: /home/volttron/.cache/pip/wheelhouse - Successfully built numpy pandas gevent monotonic pymodbus simplejson Smap greenlet pycrypto - twisted pyserial configobj avro zope.interface - - real 3m15.431s - user 2m17.980s - sys 0m5.630s - -It took 3.25 minutes to build the wheels (with ccache still enabled). Repeating this command results in nothing new being compiled and takes only 4 seconds. Only new versions of packages meeting the requirements will be built. - -.. code:: - - [volttron@inamatus volttron]$ time env/bin/python bootstrap.py --wheel - Building required packages - ... - Skipping numpy, due to already being wheel. - Skipping pandas, due to already being wheel. - Skipping python-dateutil, due to already being wheel. - Skipping requests, due to already being wheel. - Skipping flexible-jsonrpc, due to being editable - Skipping pyzmq, due to already being wheel. - Skipping gevent, due to already being wheel. - Skipping monotonic, due to already being wheel. - Skipping paramiko, due to already being wheel. - Skipping pymodbus, due to already being wheel. - Skipping setuptools, due to already being wheel. - Skipping simplejson, due to already being wheel. - Skipping Smap, due to already being wheel. - Skipping wheel, due to already being wheel. - Skipping volttron, due to being editable - Skipping pytz, due to already being wheel. - Skipping six, due to already being wheel. - Skipping greenlet, due to already being wheel. - Skipping ecdsa, due to already being wheel. - Skipping pycrypto, due to already being wheel. - Skipping pyserial, due to already being wheel. - Skipping twisted, due to already being wheel. - Skipping configobj, due to already being wheel. - Skipping avro, due to already being wheel. - Skipping zope.interface, due to already being wheel. - - real 0m3.998s - user 0m3.580s - sys 0m0.360s - -And let's see what is in the wheelhouse. - -.. code:: - - [volttron@inamatus volttron]$ ls ~/.cache/pip/wheelhouse - Smap-2.0.24c780d-py2-none-any.whl - Twisted-15.0.0-cp27-none-linux_x86_64.whl - avro-1.7.7-py2-none-any.whl - configobj-5.0.6-py2-none-any.whl - ecdsa-0.13-py2.py3-none-any.whl - gevent-1.0.1-cp27-none-linux_x86_64.whl - greenlet-0.4.5-cp27-none-linux_x86_64.whl - monotonic-0.1-py2-none-any.whl - numpy-1.9.1-cp27-none-linux_x86_64.whl - pandas-0.15.2-cp27-none-linux_x86_64.whl - paramiko-1.15.2-py2.py3-none-any.whl - pycrypto-2.6.1-cp27-none-linux_x86_64.whl - pymodbus-1.2.0-py2-none-any.whl - pyserial-2.7-py2-none-any.whl - python_dateutil-2.4.0-py2.py3-none-any.whl - pytz-2014.10-py2.py3-none-any.whl - pyzmq-14.5.0-cp27-none-linux_x86_64.whl - requests-2.5.3-py2.py3-none-any.whl - setuptools-12.2-py2.py3-none-any.whl - simplejson-3.6.5-cp27-none-linux_x86_64.whl - six-1.9.0-py2.py3-none-any.whl - wheel-0.24.0-py2.py3-none-any.whl - zope.interface-4.1.2-cp27-none-linux_x86_64.whl - -Now ``bootstrap.py`` can be run without options to complete the bootstrap process, again using the virtual environment Python. - -.. code:: - - [volttron@inamatus volttron]$ time env/bin/python bootstrap.py - Installing required packages - + easy_install BACpypes>=0.10,<0.11 - ... - + pip install --global-option --quiet --install-option --zmq=bundled --no-deps pyzmq>=14.3,<15 - ... - + pip install --global-option --quiet --editable ./lib/jsonrpc --editable . --requirement ./requirements.txt - ... - Successfully installed Smap-2.0.24c780d avro-1.7.7 configobj-5.0.6 ecdsa-0.13 flexible-jsonrpc - gevent-1.0.1 greenlet-0.4.5 monotonic-0.1 numpy-1.9.1 pandas-0.15.2 paramiko-1.15.2 - pycrypto-2.6.1 pymodbus-1.2.0 pyserial-2.7 python-dateutil-2.4.0 pytz-2014.10 requests-2.5.3 - simplejson-3.6.5 six-1.9.0 twisted-15.0.0 volttron-2.0 zope.interface-4.1.2 - - real 0m11.137s - user 0m8.930s - sys 0m0.950s - -Installing from wheels completes in only 11 seconds. And if we blow away the environment and bootstrap again, it takes under 15 seconds. - -.. code:: - - [volttron@inamatus volttron]$ rm -rf env - [volttron@inamatus volttron]$ time python3 bootstrap.py - ... - - real 0m14.644s - user 0m10.380s - sys 0m1.240s - -Building a clean environment now occurs in less than 15 seconds instead of the 9 minute baseline. That, my friends, is fast. - - -Why care? ---------- - -The average VOLTTRON developer probably won't care or see much benefit from the wheel optimization. The typical developer workflow does not include regularly removing the virtual environment and rebuilding. This is, however, very important for continuous integration (CI). With CI, a build server should check out a fresh copy of the source code, build it in a clean environment, and perform unit tests, notifying offending users when their changes break things. Ideally, notification of breakage should happen as soon as possible. We just shaved nearly nine minutes off the turnaround time. It also reduces the load on a shared CI build server, which is nice for everyone. - - -Taking it further ------------------ - -Two additional use cases present themselves: offline installs and shared builds. - - -Offline Installs -++++++++++++++++ - -Let's say we have a system that is not connected to the Internet and, therefore, cannot download packages from PyPi_ or any other package index. Or perhaps it doesn't have a suitable compiler. Wheels can be built on another *similar*, connected system and transferred by USB drive to the offline system, where they can then be installed. Note that the architecture must be identical and the OS must be very similar between the two systems for this to work. - -If the two systems differ too much for a compatible binary build and the offline system has a suitable compiler, then source files can be copied from the pip download cache and transferred from the online system to the offline system for building. - - -Shared Builds -+++++++++++++ - -If many developers are working on the same project, why not share the results of a build with the rest of the team? Here are some ideas to make it work: - -* Put wheels on a shared network drive -* Run a private package index server (maybe with pypiserver_) -* Expose CI built wheels using Apache, Nginx, or SimpleHTTPServer_ - -.. _pypiserver: https://pypi.python.org/pypi/pypiserver -.. _SimpleHTTPServer: https://docs.python.org/2.7/library/simplehttpserver.html#module-SimpleHTTPServer - - -Issues ------- - -Here are some of the issues/drawbacks to the methods described above and some possible solutions. - -* Configuring pip using environment variables - - No worries. Pip uses configuration files too. And a benefit to using them is that it makes all these wheels available to other Python projects you may be working on, and vise versa. - - .. code:: - - # /home/volttron/.config/pip/pip.conf - [global] - wheel-dir = /home/volttron/.cache/pip/wheelhouse - find-links = file:///home/volttron/.cache/pip/wheelhouse - - Find more on configuring pip here_. - - .. _here: https://pip.pypa.io/en/latest/user_guide.html#configuration - -* pip does not clean the wheelhouse - - This is not a deal-breaker. The wheel directory can just be removed and it will be recreated. Or a script can be used to remove all but the latest versions of packages. - -* Requires an additional step or two - - That's the price for speed. But it can be mitigated by writing a script or bash alias to perform the steps. - - -Conclusion ----------- - -Here is a quick summary of the build times executed above: - -======================= ====== ====== - Method Time (minutes) ------------------------ -------------- -Each builds on previous CPU Total -======================= ====== ====== -baseline 8:07 9:02 -with download cache 8:05 8:35 -ccache, first run 5:09 6:03 -ccache, subsequent runs 2:32 3:16 -wheel, first run 2:35 3:30 -wheel, subsequent runs 0:12 0:15 -======================= ====== ====== - -Not everyone cares about build times, but for those who do, pre-building Python wheels is a great way to improve install times. At a very minimum, every Python developer installing compiled packages will benefit from using ccache. - -The techniques used in this document aren't just for VOLTTRON, either. They are generally useful for all moderately sized Python projects. - -If you haven't installed ccache yet, go do it. There is no excuse. - -.. vim: ft=rst spell wrap: diff --git a/docs/source/setup/VOLTTRON-Prerequisites.rst b/docs/source/setup/VOLTTRON-Prerequisites.rst deleted file mode 100644 index 5818bf0c7c..0000000000 --- a/docs/source/setup/VOLTTRON-Prerequisites.rst +++ /dev/null @@ -1,92 +0,0 @@ -.. _VOLTTRON-Prerequisites: - -Required Software: Linux -======================== - -The following packages will need to be installed on the system: - -- git -- build-essential -- python3.6-dev -- python3.6-venv -- openssl -- libssl-dev -- libevent-dev - -On **Debian-based systems**, these can all be installed with the following -command: - -.. code-block:: bash - - sudo apt-get update - sudo apt-get install build-essential python3-dev python3-venv openssl libssl-dev libevent-dev git - -On Ubuntu-based systems, available packages allow you to specify the python3 version, 3.6 or greater is required (Debian itself does not provide those packages). - -On arm-based systems (including, but not limited to, Raspbian), you must also install libffi-dev, you can do this with: - -.. code-block:: bash - - sudo apt-get install libffi-dev - -On **Redhat or CENTOS systems**, these can all be installed with the following -command: - -.. code-block:: bash - - sudo yum update - sudo yum install make automake gcc gcc-c++ kernel-devel python3-devel openssl openssl-devel libevent-devel git - -.. note:: - Python 3.6 or greater is required. - -If you have an agent which requires the pyodbc package, install the -following: - -- freetds-bin -- unixodbc-dev - -On **Debian-based systems** these can be installed with the following command: - -.. code-block:: bash - - sudo apt-get install freetds-bin unixodbc-dev - -On **Redhat or CentOS systems**, these can be installed from the Extra Packages for Enterprise Linux (EPEL) repository: - -.. code-block:: bash - - sudo yum install https://dl.fedoraproject.org/pub/epel/epel-release-latest-8.noarch.rpm - sudo yum install freetds unixODBC-devel - -.. note:: - The above command to install the EPEL repository is for Centos/Redhat 8. Change the number to match your OS version. - - EPEL packages are included in Fedora repositories, so installing EPEL is not required on Fedora. - - -Possible issues -~~~~~~~~~~~~~~~ - -The /tmp directory must allow exec. This error could manifest itself -during the building of gevent. - -:: - - # Executing mount should have an entry like the following - mount - - tmpfs on /tmp type tmpfs (rw,nosuid,nodev) - -To change the mount you can use the following code - -:: - - # remount /tmp to allow exec - sudo mount -o remount,exec /tmp - -:: - - # remount /tmp to disallow exec - sudo mount -o remount,noexec /tmp - diff --git a/docs/source/setup/VOLTTRON-Source-Options.rst b/docs/source/setup/VOLTTRON-Source-Options.rst deleted file mode 100644 index 74879fd3c0..0000000000 --- a/docs/source/setup/VOLTTRON-Source-Options.rst +++ /dev/null @@ -1,31 +0,0 @@ -.. _Repository-Structure: - -Repository Structure -==================== - -There are several options for using the VOLTTRON code depending on -whether you require the most stable version of the code or want the -latest updates as they happen. In order of decreasing stability and -increasing currency: - -For most stable, download the source code for the latest release at: -https://github.com/VOLTTRON/volttron/releases These are purely source -code and are not tied to the git repository. To update them will require -downloading the newest source code and re-installing. - -The master branch is now the default branch for VOLTTRON (meaning this -is what you clone if you do not use the “-b” option). This branch will -get the latest stable features as they are pushed. The current major -version is 7.x. - -The “develop” branch contains the latest features as they are developed. -Once a feature is considered “finished” it is merged back into develop. -Develop will be merged into master once it is considered stable and -ready for release. This branch can be cloned by those wanting to work -from the latest version of the platform but should not be used in -deployments. - -Features are developed on “feature” branches or developers' forks of -the main repository. It is not -recommended to clone these branches except for exploring a new -feature. diff --git a/docs/source/setup/bootstrap-options.rst b/docs/source/setup/bootstrap-options.rst deleted file mode 100644 index 69ca0e4cb1..0000000000 --- a/docs/source/setup/bootstrap-options.rst +++ /dev/null @@ -1,126 +0,0 @@ -.. _Bootstrap-Options: - -VOLTTRON Bootstrap Script -========================= - -The bootstrap.py Python script in the root directory of the VOLTTRON repository may be used to create -VOLTTRON's Python virtual environment and install or update VOLTTRON dependencies into the virtual -environment. - -Bootstrapping is broken into two stages. The first stage should only be invoked once per virtual -environment. It downloads Virtualenv and creates a virtual Python environment in the virtual -environment directory (defaults to a subdirectory named env in the same directory as this script). -It then executes stage two using the newly installed virtual environment. Stage two uses the -new virtual Python environment to install VOLTTRON and its dependencies. - -If a new dependency is added, this script may be run again using the Python executable in the -virtual environment to re-run stage two: - - env/bin/python bootstrap.py - -To speed up bootstrapping in a test environment, use the --wheel feature, which might look something -like this: - - $ export PIP_WHEEL_DIR=/path/to/cache/wheelhouse - $ export PIP_FIND_LINKS=file://$PIP_WHEEL_DIR - $ mkdir -p $PIP_WHEEL_DIR - $ python2.7 bootstrap.py -o - $ env/bin/python bootstrap.py --wheel - $ env/bin/python bootstrap.py - -Instead of setting the environment variables, a pip configuration file may be used. Look here for more -information on configuring pip: - - https://pip.pypa.io/en/latest/user_guide.html#configuration - -Bootstrap Options ------------------ - -To facilitate bootstrapping the various configurations of the VOLTTRON platform, the bootstrap script -provides several options. Options exist for each message bus, specifying a new environment, updating -an existing environment, and installing some optional dependencies for features like historians. - -These options may be invoked to alter the operation of the bootstrap script. - -.. code-block:: - - --envdir VIRTUAL_ENV: This option allows the user to specify the directory for the creation of a - new environment. If an environment exists, this can be used to create a second environment with an - alternative set of dependencies. - - --force: This option will force bootstrapping in a non-empty directory. This may be used to reset - an environment or if a previous bootstrapping attempt has failed. - - -o, --only-virtenv: This option will cause bootstrap to create a new Python virtual environment - without installing any VOLTTRON dependencies. - - --prompt PROMPT: Specify prompt to use in an activated environment, defaults to (volttron) - (Prompt specifies the string proceeding @ in an activated environment, e.i. Running - bootstrap with --prompt test would result in "(test) @:~/volttron$ " in bash) - - --offline: Install from Pip cache, prevents downloading dependencies - - -u, --upgrade: Upgrade installed packages to newest version - - -w, --wheel: Build wheels in the Pip wheelhouse (Pip package cache) - - -Optional Arguments -~~~~~~~~~~~~~~~~~~ - -These options can be added to the command to run the bootstrap script to cause the process to produce -varying levels of output during operation. - -.. code-block:: - - -help, --help: This option will display a message describing the options described below, and then - exist the bootstrap script. - - -q, --quiet: This option will limit the output of the bootstrap script. - - -v, --verbose: This option will cause the bootstrap script to produce additional output. - -Packaging Arguments -~~~~~~~~~~~~~~~~~~~ - -Packaging arguments can be added to the bootstrap argument list to specify an additional set of packages -to install beyond those required for "vanilla" VOLTTRON. Multiple packaging arguments can be specified -(e.i. python3 bootstrap.py --testing --databases ...) - -.. code-block:: - - --crate: Install crate.io Python database driver (crate) for use with Crate historian - - --databases: Install Python database drivers for historians - Crate (crate), InfluxDB (influxdb), - MongoDB (pymongo), MySQL (mysql-connector-python-rf) - - --dnp3: Install Python Distributed Network Protocol 3 wrapper (pydnp3) - - --documentation: Install requirements for building VOLTTRON documentation - Mock (mock), MySQL - (mysql-connector-python-rf), PSUtil (psutil), MongoDB (pymongo), Sphinx (sphinx), - Recommonmark (recommonmark), Read the Docs Sphinx theme (sphinx-rtd-theme) - - --drivers: Install device communication wrappers for VOLTTRON driver framework - Modbus (pymodbus), - Modbus Test Kit (modbus-tk), BACnet (bacpypes), Serial (pyserial) - - --influxdb: Install InfluxDB Python database driver (influxdb) for use with influxdb historian - - --market: Install requirements for VOLTTRON Market Service - NumPy (numpy), Transitions (transitions) - - --mongo: Install MongoDB Python database driver (pymongo) for use with MongoDB historian - - --mysql: Install MySQL database connector for Python (mysql-connector-python-rf) - - --pandas: Install Pandas (pandas) and NumPy (numpy) - - --postgres: Install Psycopg (postgres) - - --testing: Install testing infrastructure dependencies - Mock (mock), PyTest (pytest), PyTest-timeout - (pytest-timeout), Websocket-Client (websocket-client) - - --rabbitmq : Install Python Pika client library for use with RabbitMQ VOLTTRON deployments - (gevent-pika) If RabbitMQ is not installed at /rabbitmq_server, the user should specify the optional - argument. RabbitMQ deployments require additional setup, for more information please read the RabbitMQ portion - of section 3 in the README in the root VOLTTRON directory. - - --weather: Install Python unit conversion library Pint (point) diff --git a/docs/source/setup/images/VOLTTRON_User_Guide.pdf b/docs/source/setup/images/VOLTTRON_User_Guide.pdf deleted file mode 100755 index 4c8b77a890..0000000000 Binary files a/docs/source/setup/images/VOLTTRON_User_Guide.pdf and /dev/null differ diff --git a/docs/source/setup/images/add-chart.png b/docs/source/setup/images/add-chart.png deleted file mode 100755 index 6d30604c8f..0000000000 Binary files a/docs/source/setup/images/add-chart.png and /dev/null differ diff --git a/docs/source/setup/images/clone-existing.png b/docs/source/setup/images/clone-existing.png deleted file mode 100755 index 465f351bf0..0000000000 Binary files a/docs/source/setup/images/clone-existing.png and /dev/null differ diff --git a/docs/source/setup/images/dashboard-blank.png b/docs/source/setup/images/dashboard-blank.png deleted file mode 100755 index 2e62a2331d..0000000000 Binary files a/docs/source/setup/images/dashboard-blank.png and /dev/null differ diff --git a/docs/source/setup/images/eclipse-marketplace.png b/docs/source/setup/images/eclipse-marketplace.png deleted file mode 100755 index cf6a0136b2..0000000000 Binary files a/docs/source/setup/images/eclipse-marketplace.png and /dev/null differ diff --git a/docs/source/setup/images/eclipse-marketplace2.png b/docs/source/setup/images/eclipse-marketplace2.png deleted file mode 100755 index 91b4b5f706..0000000000 Binary files a/docs/source/setup/images/eclipse-marketplace2.png and /dev/null differ diff --git a/docs/source/setup/images/edit-chart.png b/docs/source/setup/images/edit-chart.png deleted file mode 100755 index 771bcff359..0000000000 Binary files a/docs/source/setup/images/edit-chart.png and /dev/null differ diff --git a/docs/source/setup/images/example_market.png b/docs/source/setup/images/example_market.png deleted file mode 100644 index ba953dcb7e..0000000000 Binary files a/docs/source/setup/images/example_market.png and /dev/null differ diff --git a/docs/source/setup/images/finish-import.png b/docs/source/setup/images/finish-import.png deleted file mode 100755 index 5a32e44151..0000000000 Binary files a/docs/source/setup/images/finish-import.png and /dev/null differ diff --git a/docs/source/setup/images/general-project.png b/docs/source/setup/images/general-project.png deleted file mode 100755 index 106228a41d..0000000000 Binary files a/docs/source/setup/images/general-project.png and /dev/null differ diff --git a/docs/source/setup/images/git-view.png b/docs/source/setup/images/git-view.png deleted file mode 100755 index 0b59e8fcc3..0000000000 Binary files a/docs/source/setup/images/git-view.png and /dev/null differ diff --git a/docs/source/setup/images/import-project.png b/docs/source/setup/images/import-project.png deleted file mode 100755 index 5ac524f41d..0000000000 Binary files a/docs/source/setup/images/import-project.png and /dev/null differ diff --git a/docs/source/setup/images/install-volttron-restricted.png b/docs/source/setup/images/install-volttron-restricted.png deleted file mode 100755 index 73465e547e..0000000000 Binary files a/docs/source/setup/images/install-volttron-restricted.png and /dev/null differ diff --git a/docs/source/setup/images/linux-mint.png b/docs/source/setup/images/linux-mint.png deleted file mode 100644 index 1697fb8522..0000000000 Binary files a/docs/source/setup/images/linux-mint.png and /dev/null differ diff --git a/docs/source/setup/images/listener-all-vars.png b/docs/source/setup/images/listener-all-vars.png deleted file mode 100755 index f4fdde5296..0000000000 Binary files a/docs/source/setup/images/listener-all-vars.png and /dev/null differ diff --git a/docs/source/setup/images/login-screen.png b/docs/source/setup/images/login-screen.png deleted file mode 100755 index a9a9d18efb..0000000000 Binary files a/docs/source/setup/images/login-screen.png and /dev/null differ diff --git a/docs/source/setup/images/logout-button.png b/docs/source/setup/images/logout-button.png deleted file mode 100755 index 81fab0beff..0000000000 Binary files a/docs/source/setup/images/logout-button.png and /dev/null differ diff --git a/docs/source/setup/images/new-python-run.png b/docs/source/setup/images/new-python-run.png deleted file mode 100755 index 580b462c8f..0000000000 Binary files a/docs/source/setup/images/new-python-run.png and /dev/null differ diff --git a/docs/source/setup/images/overview.png b/docs/source/setup/images/overview.png deleted file mode 100755 index aa6a136158..0000000000 Binary files a/docs/source/setup/images/overview.png and /dev/null differ diff --git a/docs/source/setup/images/pick-python.png b/docs/source/setup/images/pick-python.png deleted file mode 100755 index 2d3e8eafaf..0000000000 Binary files a/docs/source/setup/images/pick-python.png and /dev/null differ diff --git a/docs/source/setup/images/pin-to-dashboard.png b/docs/source/setup/images/pin-to-dashboard.png deleted file mode 100755 index b4041b15df..0000000000 Binary files a/docs/source/setup/images/pin-to-dashboard.png and /dev/null differ diff --git a/docs/source/setup/images/platform-default.png b/docs/source/setup/images/platform-default.png deleted file mode 100755 index b224d4d817..0000000000 Binary files a/docs/source/setup/images/platform-default.png and /dev/null differ diff --git a/docs/source/setup/images/platform-run-config.png b/docs/source/setup/images/platform-run-config.png deleted file mode 100755 index 083e157d83..0000000000 Binary files a/docs/source/setup/images/platform-run-config.png and /dev/null differ diff --git a/docs/source/setup/images/platforms.png b/docs/source/setup/images/platforms.png deleted file mode 100755 index 6845265011..0000000000 Binary files a/docs/source/setup/images/platforms.png and /dev/null differ diff --git a/docs/source/setup/images/pydev-python.png b/docs/source/setup/images/pydev-python.png deleted file mode 100755 index e20f45bdb2..0000000000 Binary files a/docs/source/setup/images/pydev-python.png and /dev/null differ diff --git a/docs/source/setup/images/register-new-platform-authorization.png b/docs/source/setup/images/register-new-platform-authorization.png deleted file mode 100755 index 5a03488d0b..0000000000 Binary files a/docs/source/setup/images/register-new-platform-authorization.png and /dev/null differ diff --git a/docs/source/setup/images/register-new-platform.png b/docs/source/setup/images/register-new-platform.png deleted file mode 100755 index 796c1029f6..0000000000 Binary files a/docs/source/setup/images/register-new-platform.png and /dev/null differ diff --git a/docs/source/setup/images/run-results.png b/docs/source/setup/images/run-results.png deleted file mode 100755 index 5568a59585..0000000000 Binary files a/docs/source/setup/images/run-results.png and /dev/null differ diff --git a/docs/source/setup/images/select-branch.png b/docs/source/setup/images/select-branch.png deleted file mode 100755 index b26ba74292..0000000000 Binary files a/docs/source/setup/images/select-branch.png and /dev/null differ diff --git a/docs/source/setup/images/select-path.png b/docs/source/setup/images/select-path.png deleted file mode 100755 index 7f690f5696..0000000000 Binary files a/docs/source/setup/images/select-path.png and /dev/null differ diff --git a/docs/source/setup/images/select-repo.png b/docs/source/setup/images/select-repo.png deleted file mode 100755 index d74fd20b45..0000000000 Binary files a/docs/source/setup/images/select-repo.png and /dev/null differ diff --git a/docs/source/setup/images/set-as-pydev.png b/docs/source/setup/images/set-as-pydev.png deleted file mode 100755 index 681eeefd60..0000000000 Binary files a/docs/source/setup/images/set-as-pydev.png and /dev/null differ diff --git a/docs/source/setup/images/setup-python.png b/docs/source/setup/images/setup-python.png deleted file mode 100755 index 6e90e4537c..0000000000 Binary files a/docs/source/setup/images/setup-python.png and /dev/null differ diff --git a/docs/source/setup/images/start-agent.png b/docs/source/setup/images/start-agent.png deleted file mode 100755 index 57b3b0006a..0000000000 Binary files a/docs/source/setup/images/start-agent.png and /dev/null differ diff --git a/docs/source/setup/images/transport-payload.png b/docs/source/setup/images/transport-payload.png deleted file mode 100755 index 9826244157..0000000000 Binary files a/docs/source/setup/images/transport-payload.png and /dev/null differ diff --git a/docs/source/setup/images/vbox-bidirectional.png b/docs/source/setup/images/vbox-bidirectional.png deleted file mode 100644 index 7a52548b5b..0000000000 Binary files a/docs/source/setup/images/vbox-bidirectional.png and /dev/null differ diff --git a/docs/source/setup/images/vbox-controller.png b/docs/source/setup/images/vbox-controller.png deleted file mode 100644 index 3339221522..0000000000 Binary files a/docs/source/setup/images/vbox-controller.png and /dev/null differ diff --git a/docs/source/setup/images/vbox-credentials.png b/docs/source/setup/images/vbox-credentials.png deleted file mode 100644 index 6e14d378f2..0000000000 Binary files a/docs/source/setup/images/vbox-credentials.png and /dev/null differ diff --git a/docs/source/setup/images/vbox-download.png b/docs/source/setup/images/vbox-download.png deleted file mode 100644 index 90ca3d7059..0000000000 Binary files a/docs/source/setup/images/vbox-download.png and /dev/null differ diff --git a/docs/source/setup/images/vbox-hard-disk-xfce.png b/docs/source/setup/images/vbox-hard-disk-xfce.png deleted file mode 100644 index 74ec8167ff..0000000000 Binary files a/docs/source/setup/images/vbox-hard-disk-xfce.png and /dev/null differ diff --git a/docs/source/setup/images/vbox-memory-size.png b/docs/source/setup/images/vbox-memory-size.png deleted file mode 100644 index 1b8c0ca542..0000000000 Binary files a/docs/source/setup/images/vbox-memory-size.png and /dev/null differ diff --git a/docs/source/setup/images/vbox-naming.png b/docs/source/setup/images/vbox-naming.png deleted file mode 100644 index b539b82a7e..0000000000 Binary files a/docs/source/setup/images/vbox-naming.png and /dev/null differ diff --git a/docs/source/setup/images/vbox-proc-settings.png b/docs/source/setup/images/vbox-proc-settings.png deleted file mode 100644 index aec8b53e0b..0000000000 Binary files a/docs/source/setup/images/vbox-proc-settings.png and /dev/null differ diff --git a/docs/source/setup/images/vc-run-demo.png b/docs/source/setup/images/vc-run-demo.png deleted file mode 100755 index af2159e55b..0000000000 Binary files a/docs/source/setup/images/vc-run-demo.png and /dev/null differ diff --git a/docs/source/setup/images/volttron-console.png b/docs/source/setup/images/volttron-console.png deleted file mode 100755 index 1a63411b9a..0000000000 Binary files a/docs/source/setup/images/volttron-console.png and /dev/null differ diff --git a/docs/source/setup/images/volttron-main-args.png b/docs/source/setup/images/volttron-main-args.png deleted file mode 100755 index fa42572509..0000000000 Binary files a/docs/source/setup/images/volttron-main-args.png and /dev/null differ diff --git a/docs/source/setup/images/volttron-main.png b/docs/source/setup/images/volttron-main.png deleted file mode 100755 index 2443671f96..0000000000 Binary files a/docs/source/setup/images/volttron-main.png and /dev/null differ diff --git a/docs/source/setup/images/volttron-pick-main.png b/docs/source/setup/images/volttron-pick-main.png deleted file mode 100755 index 13168ba066..0000000000 Binary files a/docs/source/setup/images/volttron-pick-main.png and /dev/null differ diff --git a/docs/source/setup/images/volttron-webimage.jpg b/docs/source/setup/images/volttron-webimage.jpg deleted file mode 100644 index 12dd1a4739..0000000000 Binary files a/docs/source/setup/images/volttron-webimage.jpg and /dev/null differ diff --git a/docs/source/setup/index.rst b/docs/source/setup/index.rst deleted file mode 100644 index a29b03b90b..0000000000 --- a/docs/source/setup/index.rst +++ /dev/null @@ -1,374 +0,0 @@ -.. _setup: - -.. _Building-VOLTTRON: - -Installing VOLTTRON -=================== - -.. note:: Volttron version 7.0rc1 is currently tested for Ubuntu versions 18.04 and - 18.10 as well as Linux Mint version 19.3. Version 6.x is tested for Ubuntu - versions 16.04 and 18.04 as well as Linux Mint version 19.1. - - -Install Required Software -------------------------- -Ensure that all the -:ref:`required packages ` are installed. - - -Clone VOLTTRON source code --------------------------- -From version 6.0 VOLTTRON supports two message bus - ZMQ and RabbitMQ. For the latest -build use the develop branch. For a more conservative branch -please use the master branch. - -:: - - git clone https://github.com/VOLTTRON/volttron --branch - -For other options see: :ref:`Getting VOLTTRON ` - - -Setup virtual environment -------------------------- - -The VOLTTRON project includes a bootstrap script which automatically -downloads dependencies and builds VOLTTRON. The script also creates a -Python virtual environment for use by the project which can be activated -after bootstrapping with `. env/bin/activate`. This activated Python -virtual environment should be used for subsequent bootstraps whenever -there are significant changes. The system's Python need only be used on -the initial bootstrap. - -Steps for ZMQ -~~~~~~~~~~~~~ - -:: - - cd - python bootstrap.py - source env/bin/activate - -Proceed to `Testing the Installation`_. - - -Steps for RabbitMQ -~~~~~~~~~~~~~~~~~~ - -1. Install Erlang version >= 21 -############################### - - For RabbitMQ based VOLTTRON, some of the RabbitMQ specific software packages have to be installed. - If you are running an **Debian or CentOS system**, you can install the RabbitMQ dependencies by running the - rabbit dependencies script, passing in the os name and approriate distribution as a parameter. - The following are supported - - * debian bionic (for Ubuntu 18.04) - * debian xenial (for Ubuntu 16.04) - * debian xenial (for Linux Mint 18.04) - * debian stretch (for Debian Stretch) - * centos 7 (for CentOS 7) - * centos 6 (for CentOS 6) - - Example command - - :: - - ./scripts/rabbit_dependencies.sh debian xenial - - **Alternatively** - - You can download and install Erlang from `Erlang Solution `_ - Please include OTP/components - ssl, public_key, asn1, and crypto. - Also lock version of Erlang using the `yum-plugin-versionlock `_ - -2. Configure hostname -###################### - - Rabbitmq requires a valid hostname to start. Use the command hostname on your linux machine to verify if a valid - hostname is set. If not add a valid hostname to the file /etc/hostname. You would need sudo access to edit this file - If you want your rabbitmq instance to be reachable externally, then a hostname should be resolvable to a valid ip. - In order to do this you need to have a entry in /etc/hosts file. For example, the below shows a valid /etc/hosts file - - .. code:: - - 127.0.0.1 localhost - 127.0.0.1 myhost - - 192.34.44.101 externally_visible_hostname - - After the edit, logout and log back in for the changes to take effect. - - If you are testing with VMs make please make sure to provide unique host names for each of the VM you are using. - - .. note:: - - If you change /etc/hostname after setting up rabbitmq (/rabbitmq_server-3.7.7) - -3. Bootstrap -############ - - Install the required software by running the bootstrap script with --rabbitmq option - - :: - - cd volttron - - # bootstrap.py --help will show you all of the "package options" such as - # installing required packages for volttron central or the platform agent. - - python bootstrap.py --rabbitmq [optional install directory defaults to - /rabbitmq_server] - - .. note:: If your PYTHON_PATH is configured for Python 2.7, you'll need to use - ``python3 bootstrap.py ..`` - - This will build the platform and create a virtual Python environment and - dependencies for RabbitMQ. It also installs RabbitMQ server as the current user. - If an install path is provided, path should exists and be writeable. RabbitMQ - will be installed under /rabbitmq_server-3.7.7 Rest of the - documentation refers to the directory /rabbitmq_server-3.7.7 as - $RABBITMQ_HOME - - You can check if RabbitMQ server is installed by checking it's status. - - :: - - $RABBITMQ_HOME/sbin/rabbitmqctl status - - - Please note, RABBITMQ_HOME environment variable can be set in ~/.bashrc. If doing so, - it needs to be set to RabbitMQ installation directory (default path is - /rabbitmq_server/rabbitmq_server-3.7.7) - - :: - - echo 'export RABBITMQ_HOME=$HOME/rabbitmq_server/rabbitmq_server-3.7.7'|tee --append ~/.bashrc | source ~/.bashrc - # Reload the environment variables in the current shell - source ~/.bashrc - - -4. Activate the environment -########################### - - :: - - source env/bin/activate - -5. Create RabbitMQ setup for VOLTTRON -###################################### - - :: - - vcfg --rabbitmq single [optional path to rabbitmq_config.yml] - - Refer to examples/configurations/rabbitmq/rabbitmq_config.yml for a sample configuration file. At a minimum you would - need to provide the host name and a unique common-name (under certificate-data) in the - configuration file. Note. common-name must be unique and the general conventions is to use -root-ca. - - Running the above command without the optional configuration file parameter will prompt user for all the - needed data at the command prompt and use that to generate a rabbitmq_config.yml file in VOLTTRON_HOME - directory. - - This scripts creates a new virtual host and creates SSL certificates needed for this VOLTTRON instance. - These certificates get created under the sub directory "certificates" in your VOLTTRON home - (typically in ~/.volttron). It then creates the main VIP exchange named "volttron" to route message - between platform and agents and alternate exchange to capture unrouteable messages. - - NOTE: We configure RabbitMQ instance for a single volttron_home and volttron_instance. This script will - confirm with the user the volttron_home to be configured. volttron instance name will be read from - volttron_home/config if available, if not user will be prompted for volttron instance name. To run the - scripts without any prompts, save the volttron instance name in volttron_home/config file and pass the - volttron home directory as command line argument For example: "vcfg --vhome /home/vdev/.new_vhome --rabbitmq single" - - Following is the example inputs for "vcfg --rabbitmq single" command. Since no config file is passed the - script prompts for necessary details. - - :: - - Your VOLTTRON_HOME currently set to: /home/vdev/new_vhome2 - - Is this the volttron you are attempting to setup? [Y]: - Creating rmq config yml - RabbitMQ server home: [/home/vdev/rabbitmq_server/rabbitmq_server-3.7.7]: - Fully qualified domain name of the system: [cs_cbox.pnl.gov]: - - Enable SSL Authentication: [Y]: - - Please enter the following details for root CA certificates - Country: [US]: - State: Washington - Location: Richland - Organization: PNNL - Organization Unit: Volttron-Team - Common Name: [volttron1-root-ca]: - Do you want to use default values for RabbitMQ home, ports, and virtual host: [Y]: N - Name of the virtual host under which RabbitMQ VOLTTRON will be running: [volttron]: - AMQP port for RabbitMQ: [5672]: - http port for the RabbitMQ management plugin: [15672]: - AMQPS (SSL) port RabbitMQ address: [5671]: - https port for the RabbitMQ management plugin: [15671]: - INFO:rmq_setup.pyc:Starting rabbitmq server - Warning: PID file not written; -detached was passed. - INFO:rmq_setup.pyc:**Started rmq server at /home/vdev/rabbitmq_server/rabbitmq_server-3.7.7 - INFO:requests.packages.urllib3.connectionpool:Starting new HTTP connection (1): localhost - INFO:requests.packages.urllib3.connectionpool:Starting new HTTP connection (1): localhost - INFO:requests.packages.urllib3.connectionpool:Starting new HTTP connection (1): localhost - INFO:rmq_setup.pyc: - Checking for CA certificate - - INFO:rmq_setup.pyc: - Root CA (/home/vdev/new_vhome2/certificates/certs/volttron1-root-ca.crt) NOT Found. Creating root ca for volttron instance - Created CA cert - INFO:requests.packages.urllib3.connectionpool:Starting new HTTP connection (1): localhost - INFO:requests.packages.urllib3.connectionpool:Starting new HTTP connection (1): localhost - INFO:rmq_setup.pyc:**Stopped rmq server - Warning: PID file not written; -detached was passed. - INFO:rmq_setup.pyc:**Started rmq server at /home/vdev/rabbitmq_server/rabbitmq_server-3.7.7 - INFO:rmq_setup.pyc: - - ####################### - - Setup complete for volttron home /home/vdev/new_vhome2 with instance name=volttron1 - Notes: - - Please set environment variable VOLTTRON_HOME to /home/vdev/new_vhome2 before starting volttron - - On production environments, restrict write access to - /home/vdev/new_vhome2/certificates/certs/volttron1-root-ca.crt to only admin user. For example: sudo chown root /home/vdev/new_vhome2/certificates/certs/volttron1-root-ca.crt - - A new admin user was created with user name: volttron1-admin and password=default_passwd. - You could change this user's password by logging into https://cs_cbox.pnl.gov:15671/ Please update /home/vdev/new_vhome2/rabbitmq_config.yml if you change password - - ####################### - - -Testing the Installation ------------------------- - -We are now ready to start VOLTTRON instance. If configured with RabbitMQ message bus a config file would have been -generated in $VOLTTRON_HOME/config with the entry message-bus=rmq. If you need to revert back to ZeroMQ based VOLTTRON, -you will have to either remove "message-bus" parameter or set it to default "zmq" in $VOLTTRON_HOME/config. -The following command starts volttron process in the background - -:: - - volttron -vv -l volttron.log& - -This enters the virtual Python environment and then starts the platform in debug (vv) mode with a log file -named volttron.log. Alternatively you can use the utility script start-volttron script that does the same. To stop -stop volttron you can use the stop-volttron script. - -:: - - ./start-volttron - - -.. warning:: - If you plan on running VOLTTRON in the background and detaching it from the - terminal with the ``disown`` command be sure to redirect stderr and stdout to ``/dev/null``. - Some libraries which VOLTTRON relies on output directly to stdout and stderr. - This will cause problems if those file descriptors are not redirected to ``/dev/null`` - - :: - - #To start the platform in the background and redirect stderr and stdout - #to /dev/null - volttron -vv -l volttron.log > /dev/null 2>&1& - - - -Installing and Running Agents ------------------------------ - -VOLTTRON platform comes with several built in services and example agents out of the box. To install a agent -use the script install-agent.py - -:: - - python scripts/install-agent.py -s [-c ] - - -For example, we can use the command to install and start the Listener Agent - a simple agent that periodically publishes -heartbeat message and listens to everything on the message bus. Install and start the Listener agent using the -following command. - -:: - - python scripts/install-agent.py -s examples/ListenerAgent --start - - -Check volttron.log to ensure that the listener agent is publishing heartbeat messages. - -:: - - tail volttron.log - -:: - - 2016-10-17 18:17:52,245 (listeneragent-3.2 11367) listener.agent INFO: Peer: 'pubsub', Sender: 'listeneragent-3.2_1':, Bus: u'', Topic: 'heartbeat/listeneragent-3.2_1', Headers: {'Date': '2016-10-18T01:17:52.239724+00:00', 'max_compatible_version': u'', 'min_compatible_version': '3.0'}, Message: {'status': 'GOOD', 'last_updated': '2016-10-18T01:17:47.232972+00:00', 'context': 'hello'} - - -You can also use the vctl or volttron-ctl command to start, stop or check the status of an agent - -:: - - (volttron)volttron@volttron1:~/git/rmq_volttron$ vctl status - AGENT IDENTITY TAG STATUS HEALTH - 6 listeneragent-3.2 listeneragent-3.2_1 running [13125] GOOD - f master_driveragent-3.2 platform.driver master_driver - -:: - - vctl stop - - -To stop the platform: - -:: - - volttron-ctl shutdown --platform - -or - -:: - - ./stop-volttron - -**Note:** The default working directory is ~/.volttron. The default -directory for creation of agent packages is ~/.volttron/packaged - - - -Next Steps ----------- - -Now that the project is configured correctly: - -See the following links for core services and volttron features: - - * :ref:`Core Services` - * :ref:`Platform Specifications` - -See the following links for agent development: - - * :ref:`Agent Development ` - * :ref:`VOLTTRON Development in Eclipse ` - * :ref:`VOLTTRON Development in PyCharm ` - - -Please refer to related topics to for advanced setup instructions - -Related Topics --------------- - -.. toctree:: - :glob: - :maxdepth: 2 - - RabbitMQ/index - * - diff --git a/docs/source/setup/planning-install.rst b/docs/source/setup/planning-install.rst deleted file mode 100644 index 6acb8882b4..0000000000 --- a/docs/source/setup/planning-install.rst +++ /dev/null @@ -1,97 +0,0 @@ -.. _planning-install: - -=========================== -Planning a VOLTTRON Install -=========================== - -The 3 major installation types for VOLTTRON are doing development, doing research using VOLTTRON, and -collecting and managing physical devices. - -Development and Research installation tend to be smaller footprint installations. For development, the -data is usually synthetic or copied from another source. The existing documentation covers development -installs in significant detail. - -Other deployments will have a better installation experience if they consider certain kinds of questions -while they plan their installation. - -Questions -========= - - * Do you want to send commands to the machines ? - * Do you want to store the data centrally ? - * How many machines do you expect to collect data from on each "collector" ? - * How often will the machines collect data ? - * Are all the devices visible to the same network ? - * What types of VOLTTRON applications do you want to run ? - - -Commands --------- - -If you wish to send commands to the devices, you will want to install and configure the Volttron Central -agent. If you are only using VOLTTRON to securely collect the data, you can turn off the extra agents -to reduce the footprint. - -Storing Data ------------- - -VOLTTRON supports multiple historians. mySQL and MongoDB are the most commonly used. As you plan your -installation, you should consider how quickly you need access to the data and where. If you are looking -at the health and well-being of an entire suite of devices, its likely that you want to do that from a -central location. Analytics can be performed at the edge by VOLTTRON applications or can be performed -across the data usually from a central data repository. The latency that you can tolerate in your data -being available will also determine choices in different agents (ForwardHistorian versus Data Mover) - - -How Many --------- - -The ratio of how many devices-to-collector machine is based on several factors. These include: - - * how much memory and network bandwidth the collection machine has. More = More devices - * how fast the local storage is can affect how fast the data cache can be written. Very slow - storage devices can fall behind - -The second half of the "how many" question is how many collector paltforms are writing to a single -VOLTTRON platform to store data - and whether that storage is local, remote, big enough, etc. - -If you are storing more than moderate amount of data, you will probably benefit from installing -your database on a different machine than your concreate historian machine. Note: This is -contra-indicated if you have a slow network connection between you concrete historian and your database machine. - -In synthetic testing up to 6 virtual machines hosting 500 devices each ( 18 points) were easily -supported by a single centralized platform writing to a Mongo database - using a high speed network. -That central platform experienced very little CPU or memory load when the VOLTTRON Central agent was disabled. - - -How Often ---------- - -This question is closely related to the last. A higher sampling frequency will create more data. This -wil place more work in the storage phase. - - -Networks --------- - -In many cases, there are constraints on how networks can interact with each other. In many cases, -these include security considerations. On some sites, the primary network will be protected from less -secure networks and may require different installation considerations. For example, if a data collector -machine and the database machine are on the same network with sufficient security, you may choose -to have the data collector write directly to the database. If the collector is on an isolated building -network then you will likely need to use the ForwardHistorian to bridge the two networks. - - -Other Considerations --------------------- - -Physical location and maintenance of collector machines must be considered in all live deployments. -Although the number of data points may imply a heavy load on a data collection box, the physical constraints -may limit the practicality of having more than a single box. The other side of that discussion is deploying -many collector boxes may be simpler initially, but may create a maintenance challenge if you don't -plan ahead on how you apply patches, etc. - -Naming conventions should also be considered. The ability to trace data through the system and identify -the collector machine and device can be invaluable in debugging and analysis. - - diff --git a/docs/source/specifications/agent-vip-id.rst b/docs/source/specifications/agent-vip-id.rst deleted file mode 100644 index 67b4127197..0000000000 --- a/docs/source/specifications/agent-vip-id.rst +++ /dev/null @@ -1,98 +0,0 @@ -.. _vip-identity-assignment: - -=========================================== -Agent VIP IDENTITY Assignment Specification -=========================================== - -This document explains how an agent obtains it's VIP IDENTITY, how the platform sets an agent's VIP IDENTITY at startup, and what mechanisms are available to the user to set the VIP IDENTITY for any agent. - -What is a VIP IDENTITY ----------------------- - -A VIP IDENTITY is a platform instance unique identifier for agents. The IDENTITY is used to route messages from one Agent through the VOLTTRON router to the recipiant Agent. The VIP IDENTITY provides a consistant, user defined, and human readable character set to build a VIP IDENTITY. VIP IDENTITIES should be composed of both upper and lowercase lettters, numbers and the following special caracters _.-. - - -Runtime -------- - -The primary interface for obtaining a VIP IDENTITY *at runtime* is via the runtime environment of the agent. At startup the utility function vip_main shall check for the environment variable **AGENT_VIP_IDENTITY**. If the **AGENT_VIP_IDENTITY** environment variable is not set then the vip_main function will fall back to a supplied identity argument. vip_main will pass the appropriate identity argument to the agent constructor. If no identity is set the Agent class will create a random VIP IDENTITY using python's uuid4 function. - -An agent that inherits from the platform's base Agent class can get it's current VIP IDENTITY by retrieving the value of self.core.identity. - -The primary use of the 'identity' argument to vip_main is for agent development. For development it allows agents to specify a default VIP IDENTITY when run outside the platform. As platform Agents are not started via vip_main they will simply receive their VIP IDENTITY via the identity argument when they are instantiated. Using the identity argument of the Agent constructor to set the VIP IDENTITY via agent configuration is no longer supported. - -At runtime the platform will set the environment variable **AGENT_VIP_IDENTITY** to the value set at installation time. - -Agents not based on the platform's base Agent should set their VIP IDENTITY by setting the identity of the ZMQ socket before the socket connects to the platform. If the agent fails to set it's VIP IDENTITY via the ZMQ socket it will be selected automatically by the platform. This platform chosen ID is currently not discoverable to the agent. - -Agent Implementation --------------------- - -If an Agent has a preferred VIP IDENTITY (for example the MasterDriverAgent prefers to use "platform.driver") it may specify this as a default packed value. This is done by including a file named IDENTITY containing only the desired VIP IDENTITY in ASCII plain text in the same directory at the setup.py file for the Agent. This will cause the packaged agent wheel to include an instruction to set the VIP IDENTITY at installation time. - -This value may be overridden at packaging or installation time. - -Packaging ---------- - -An Agent may have it's VIP IDENTITY configured when it is packaged. The packaged value may be used by the platform to set the **AGENT_VIP_IDENTITY** environment variable for the agent process. - -The packaged VIP IDENTITY may be overridden at installation time. This overrides any preferred VIP IDENTITY of the agent. This will cause the packaged agent wheel to include an instruction to set the VIP IDENTITY at installation time. - -To specify the VIP IDENTITY when packaging use the *--vip-identity* option when running "volttron-pkg package". - -Installation ------------- - -An agent may have it's VIP IDENTITY configured when it is installed. This overrides any VIP IDENTITY specified when the agent was packaged. - -To specify the VIP IDENTITY when packaging use the *--vip-identity* option when running "volttron-ctl install". - -Installation Default VIP IDENTITY -********************************* - -If no VIP IDENTITY has been specified by installation time the platform will assign one automatically. - -The platform uses the following template to generate a VIP IDENTITY: - -.. code-block:: python - - "{agent_name}_{n}" - -{agent_name} is substituted with the name of the actual agent such as "listeneragent-0.1" - -{n} is a number to make VIP IDENTITY unique. {n} is set to the first unused number (starting from 1) for all installed instances of an agent. e.g. If there are 2 listener agents installed and the first (VIP IDENTITY listeneragent-0.1_1) is uninstalled leaving the second (VIP IDENTITY "listeneragent-0.1_2") a new listener agent will receive the VIP IDENTITY "listeneragent-0.1_1" when installed. The next installed listener will receive a VIP IDENTITY of "listeneragent-0.1_3". - -The # sign is used to prevent confusing the agent version number with the installed instance number. - -If an agent is repackaged with a new version number it is treated as a new agent and the number will start again from 1. - -VIP IDENTITY Conflicts During Installation -****************************************** - -If an agent is assigned a VIP IDENTITY besides the default value given to it by the platform it is possible for VIP IDENTITY conflicts to exist between installed agents. In this case the platform rejects the installation of an agent with a conflicting VIP IDENTITY and reports an error to the user. - -VIP IDENTITY Conflicts During Runtime -************************************* - -In the case where agents are not started through the platform (usually during development or when running standalone agents) it is possible to encounter a VIP IDENTITY conflict during runtime. In this case the first agent to use a VIP IDENTITY will function as normal. Subsequent agents will still connect to the ZMQ socket but will be silently rejected by the platform router. The router will not route any message to that Agent. Agents using the platforms base Agent class will detect this automatically during the initial handshake with the platform. This condition will shutdown the Agent with an error indicating a VIP IDENTITY conflict as the most likely cause of the problem. - -Auto Numbering With Non-Default VIP IDENTITYs ---------------------------------------------- - -It is possible to use the auto numbering mechanism that the default VIP IDENTITY scheme uses. Simply include the string "{n}" somewhere in the requested VIP IDENTITY and it will be replaced with a number in the same manner as the default VIP IDENTITY is. Python string.format() escaping rules apply. `See this question on StackOverflow. `__ - -Script Features ---------------- - -The scripts/install-agent.py script supports specifying the desired VIP IDENTITY using the -i (or --vip-identity) option - -Security/Privacy ----------------- - -Currently, much like the TAG file in an installed agent, there is nothing to stop someone from modifying the IDENTITY file in the installed agent. - -Constraints and Limitations ---------------------------- - -Currently there is no way for an agent based on the platform base Agent class to recover from a VIP IDENTITY conflict. As that is case only affects developers and a very tiny minority of users and is reported via an error message, there are no plans to fix it. diff --git a/docs/source/specifications/configuration-store.rst b/docs/source/specifications/configuration-store.rst deleted file mode 100644 index 5807ea8faa..0000000000 --- a/docs/source/specifications/configuration-store.rst +++ /dev/null @@ -1,286 +0,0 @@ -.. _ConfigurationStore: - -Agent Configuration Store -========================= - -This document describes the configuration store feature and explains how an agent uses it. - -The configuration store enables users to store agent configurations on the platform and allows the agent to automatically retrieve them during runtime. Users may update the configurations and the agent will automatically be informed of the changes. - -Compatibility -------------- - -Supporting the configuration store will *not* be required by Agents, however the usage will be strongly encouraged as it should substantially improve user experience. - -The previous method for configuring an agent will still be available to agents (and in some cases required). However agents can be created to only work with the configuration store and not support the old method at all. - -It will be possible to create an agent to use the traditional method for configuration to establish defaults if no configuration exist in the platform configuration store. - - -Configuration Names and Paths ------------------------------ - -Any valid OS file path name is a valid configuration name. Any leading or trailing "/", "\" and whitespace is removed by the store. - -The canonical name for the main agent configuration is "config". - -The configuration subsystem remembers the case of configuration names. Name matching is case insensitive both on the Agent and platform side. Configuration names are reported to agent callbacks in the original case used when adding them to the configuration. If a new configuration is store with a different case of an existing name the new name case is used. - -Configuration Ownership ------------------------ - -Each configuration belongs to one agent and one agent only. When an agent refers to a configuration file via it's path it does not need to supply any information about its identity to the platform in the file path. The only configurations an agent has direct access to are it's own. The platform will only inform the owning agent configuration changes. - - -Configuration File Types ------------------------- - -Configurations files come in three types: json, csv, and raw. The type of a configuration file is declared when it is added to or changed in the store. - -The parser assumes the first row of every CSV file is a header. - -Invalid json or csv files are rejected at the time they are added to the store. - -Raw files are unparsed and accepted as is. - -Other parsed types may be added in the future. - -Configuration File Representation to Agents -------------------------------------------- - -JSON -**** - -A json file is parsed and represented as appropriate data types to the requester. - -Consider a file with the following contents: - -.. code-block:: json - - { - "result": "PREEMPTED", - "info": null, - "data": { - "agentID": "my_agent", - "taskID": "my_task" - } - } - -The file will be parsed and presented as a dictionary with 3 values to the requester. - -CSV -*** - -A CSV file is represented as a list of objects. Each object represents a row in the CSV file. - -For instance this (simplified) CSV file: - -.. csv-table:: Example CSV - :header: Volttron Point Name,Modbus Register,Writable,Point Address - - ReturnAirCO2,>f,FALSE,1001 - ReturnAirCO2Stpt,>f,TRUE,1011 - -Will be represented like this: - -.. code-block:: json - - [ - { - "Volttron Point Name": "ReturnAirCO2", - "Modbus Register": ">f", - "Writable": "FALSE", - "Point Address": "1001" - }, - { - "Volttron Point Name": "ReturnAirCO2Stpt", - "Modbus Register": ">f", - "Writable": "TRUE", - "Point Address": "1011" - } - ] - -Raw -*** - -Raw files are represented as a string containing the contents of the file. - -File references ---------------- - -The Platform Configuration Store supports referencing one configuration file from another. If a referenced file exists the contents of that file will replace the file reference when the file is sent to the owning agent. Otherwise the reference will be replaced with None. - -Only configurations that are parsed by the platform (currently "json" or "csv") will be examined for references. If the file referenced is another parsed file type (json or csv, currently) then the replacement will be the parsed contents of the file. - -In a json object the name of a value will never be considered a reference. - -A file reference is any value string that starts with "config://". The rest of the string is the path in the config store to that configuration. The config store path is converted to lower case for comparison purposes. - -Consider the following configuration files named "devices/vav1.config" and "registries/vav.csv", respectively: - -.. code-block:: json - - { - "driver_config": {"device_address": "10.1.1.5", - "device_id": 500}, - - "driver_type": "bacnet", - "registry_config":"config://registries/vav.csv", - "campus": "pnnl", - "building": "isb1", - "unit": "vav1" - } - -.. csv-table:: vav.csv - :header: Volttron Point Name,Modbus Register,Writable,Point Address - - ReturnAirCO2,>f,FALSE,1001 - ReturnAirCO2Stpt,>f,TRUE,1011 - -The resulting configuration returns when an agent asks for "devices/vav1.config". The python object will have the following configuration: - -.. code-block:: python - - { - "driver_config": {"device_address": "10.1.1.5", - "device_id": 500}, - - "driver_type": "bacnet", - "registry_config":[ - { - "Volttron Point Name": "ReturnAirCO2", - "Modbus Register": ">f", - "Writable": "FALSE", - "Point Address": "1001" - }, - { - "Volttron Point Name": "ReturnAirCO2Stpt", - "Modbus Register": ">f", - "Writable": "TRUE", - "Point Address": "1011" - } - ], - "campus": "pnnl", - "building": "isb1", - "unit": "vav1" - } - -Circular references are not allowed. Adding a file that creates a circular reference will cause that file to be rejected by the platform. - -If a file is changed in anyway ("NEW", "UPDATE", or "DELETE") and that file is referred to by another file then the platform considers the referring configuration as changed. The configuration subsystem on the Agent will call every callback listening to a file or any file referring to that file either directly or indirectly. - -Agent Configuration Sub System ------------------------------- - -The configuration store shall be implemented on the Agent(client) side in the form of a new subsystem called config. - -The subsystem caches configurations as the platform updates the state to the agent. Changes to the cache triggered by an RPC call from the platform will trigger callbacks in the agent. - -No callback methods are called until the "onconfig" phase of agent startup. A new phase to agent startup called "onconfig" will be added to the Core class. Originally it was planned to have this run after the "onstart" phase has completed but that is currently not possible. Ideally if an agent is using the config store feature it will not need any "onstart" methods. - -When the "onconfig" phase is triggered the subsystem will retrieve the current configuration state from the platform and call all callbacks registered to a configuration in the store to the "NEW" action. No callbacks are called before this point in agent startup. - -The first time callbacks are called at agent startup any callbacks subscribed to a configuration called "config" are called first. - -Configuration Sub System Agent Methods -************************************** - -These methods are part of the interface available to the Agent. - -config.get( config_name="config" ) - Get the contents of a configuration. If no name is provided the contents of the main agent configuration "config" is returned. This may not be called before "ONSTART" methods are called. If called during "ONSTART" phase it will trigger the subsystem to initialize early but will not trigger any callbacks. - -config.subscribe(callback, action=("NEW", "UPDATE", "DELETE"), pattern="*") - Sets up a callback for handling a configuration change. The platform will automatically update the agent when a configuration changes ultimately triggering all callbacks that match the pattern specified. The action argument describes the types of configuration change action that will trigger the callback. Possible actions are "NEW", "UPDATE", and "DELETE" or a tuple of any combination of actions. If no action is supplied the callback happens for all changes. A list of actions can be supplied if desired. If no file name pattern is supplied then the callback is called for all configurations. The pattern is an regex used match the configuration name. - -The callback will also be called if any file referenced by a configuration file is changed. - - The signature of the callback method is callback(config_name, action, contents) where file_name is the file that triggered the callback, action is the action that triggered the callback, and contents are the new contents of the configuration. Contents will be None on a "DELETE" action. All callbacks registered for "NEW" events will be called at agent startup after all "ONSTART" methods have been called. Unlike pubsub subscriptions, this may be called at any point in an agent's lifetime. - -config.unsubscribe(callback=None, config_name_pattern=None) - Unsubscribe from configuration changes. Specifying a callback only will unsubscribe that callback from all config name patterns they have been bound to. If a pattern only is specified then all callbacks bound to that pattern will be removed. Specifying both will remove that callback from that pattern. Calling with no arguments will remove all subscriptions. This will not be available in the first version of config store. - -config.unsubscribe_all() - Unsubscribe from all configuration changes. - -config.set( config_name, contents, trigger_callback=False ) - Set the contents of a configuration. This may not be called before "ONSTART" methods are called. This can be used by an agent to store agent state across agent installations. This will *not* trigger any callbacks unless trigger_callback is set to True. To prevent deadlock with the platform this method may not be called from a configuration callback function. Doing so will raise a RuntimeError exception. - - This will not modify the local configuration cache the Agent maintains. It will send the configuration change to the platform and rely on the subsequent update_config call. - -config.delete( config_name, trigger_callback=False ) - Remove the configuration from the store. This will *not* trigger any callbacks unless trigger_callback is True. To prevent deadlock with the platform this method may not be called from a configuration callback function. Doing so will raise a RuntimeError exception. - -config.list( ) - Returns a list of configuration names. - -config.set_default(config_name, contents, trigger_callback=False) - Set a default value for a configuration. DOES NOT modify the platform's configuration store but creates a default configuration that is used for agent configuration callbacks if the configuration does not exist in the store or the configuration is deleted from the store. The callback will only be triggered if trigger_callback is true and the configuration store subsystem on the agent is not aware of a configuration with that name from the platform store. - - Typically this will be called in the __init__ method of an agent with the parsed contents of the packaged configuration file. This may not be called from a configuration callback. Doing so will raise a RuntimeError. - -config.delete_default(config_name, trigger_callback=False) - Delete a default value for a configuration. I have no idea why you would ever call this. It is here for completeness. This may not be called from a configuration callback. Doing so will raise a RuntimeError. - - -Configuration Sub System RPC Methods -************************************ - -These methods are made available on each agent to allow the platform to communicate changes to a configuration to the affected agent. - -As these methods are not part of the exposed interface they are subject to change. - -config.update( config_name, action, contents=None, trigger_callback=True ) - called by the platform when a configuration was changed by some method other than the Agent changing the configuration itself. Trigger callback tells the agent whether or not to call any callbacks associate with the configuration. - -Notes on trigger_callback -************************* - -As the configuration subsystem calls all callbacks in the "onconfig" phase and none are called beforehand the trigger_callback setting is effectively ignored if an agent sets a configuration or default configuration before the end of the "onstart" phase. - -Platform Configuration Store ----------------------------- - -The platform configuration store handles the storage and maintenance of configuration states on the platform. - -As these methods are not part of the exposed interface they are subject to change. - -Platform RPC Methods -******************** - -Methods for Agents -++++++++++++++++++ - -Agent methods that change configurations do not trigger any callbacks unless trigger_callback is True. - -set_config( config_name, contents, trigger_callback=False ) - Change/create a configuration file on the platform. - -get_configs( ) - Get all of the configurations for an Agent. - -delete_config( config_name, trigger_callback=False ) - Delete a configuration. - -Methods for Management -++++++++++++++++++++++ - -manage_store_config( identity, config_name, contents, config_type="raw" ) - Change/create a configuration on the platform for an agent with the specified identity - -manage_delete_config( identity, config_name ) - Delete a configuration for an agent with the specified identity. Calls the agent's update_config with the action "DELETE_ALL" and no configuration name. - -manage_delete_store( identity ) - Delete all configurations for a VIP IDENTITY. - -manage_list_config( identity ) - Get a list of configurations for an agent with the specified identity. - -manage_get_config( identity, config_name, raw=True ) - Get the contents of a configuration file. If raw is set to True this function will return the original file, otherwise it will return the parsed representation of the file. - -manage_list_stores( ) - Get a list of all the agents with configurations. - -Direct Call Methods -+++++++++++++++++++ - -Services local to the platform who wish to use the configuration store may use two helper methods on the agent class created for this purpose. This allows the auth service to use the config store before the router is started. - -delete(self, identity, config_name, trigger_callback=False) - Same as functionality as delete_config, but the caller must specify the indentity of the config store. - -store(self, identity, config_name, contents, trigger_callback=False) - Same functionality as set_config, but the caller must specify the indentity of the config store. - -Command Line Interface -********************** - -The command line interface will consist of a new commands for the volttron-ctl program called "config" with four sub-commands called "store", "delete", "list", "get". These commands will map directly to the management RPC functions in the previous section. - - -Disabling the Configuration Store -********************************* - -Agents may optionally disable support for the configuration store by passing enable_store=False to the __init__ method of the Agent class. This allows temporary agents to not spin up the subsystem when it is not needed. Platform service agents that do not yet support the configuration store and the temporary agents used by volttron-ctl will set this value. - diff --git a/docs/source/specifications/dnp3_agent.rst b/docs/source/specifications/dnp3_agent.rst deleted file mode 100644 index 27dc528160..0000000000 --- a/docs/source/specifications/dnp3_agent.rst +++ /dev/null @@ -1,222 +0,0 @@ -.. _DNP3-Agent: - -DNP3 -==== - -`DNP3 `_ (Distributed Network Protocol) is -a set of communications protocols that are widely used by utilities such as -electric power companies, primarily for `SCADA `_ purposes. -It was adopted in 2010 -as `IEEE Std 1815-2010 `_, -later updated to `1815-2012 `_. - -VOLTTRON's DNP3Agent is an implementation of a DNP3 Outstation as specified in -IEEE Std 1815-2012. It engages in bidirectional network communications with a DNP3 Master, -which might be located at a power utility. - -Like some other VOLTTRON protocol agents (e.g. IEEE2030_5Agent), DNP3Agent can optionally be -front-ended by a DNP3 device driver running under VOLTTRON's MasterDriverAgent. This -allows a DNP3 Master to be treated like any other device in VOLTTRON's ecosystem. - -The VOLTTRON DNP3Agent implementation of an Outstation is built on PyDNP3, -an open-source library from Kisensum containing Python language -bindings for Automatak's C++ `opendnp3 `_ -library, the de facto reference implementation of DNP3. - -DNP3Agent exposes DNP3 application-layer functionality, creating an extensible -base from which specific custom behavior can be designed and supported. By default, DNP3Agent -acts as a simple transfer agent, publishing data received from the Master on -the VOLTTRON Message Bus, and responding to RPCs from other VOLTTRON agents -by sending data to the Master. - -Requirements ------------- - -PyDNP3 can be installed in an activated environment with: - -:: - - pip install pydnp3 - -RPC Calls -~~~~~~~~~ - -DNP3Agent exposes the following VOLTTRON RPC calls: - -.. code-block:: python - - def get_point(self, point_name): - """ - Look up the most-recently-received value for a given output point. - - @param point_name: The point name of a DNP3 PointDefinition. - @return: The (unwrapped) value of a received point. - """ - - def get_point_by_index(self, group, index): - """ - Look up the most-recently-received value for a given point. - - @param group: The group number of a DNP3 point. - @param index: The index of a DNP3 point. - @return: The (unwrapped) value of a received point. - """ - - def get_points(self): - """ - Look up the most-recently-received value of each configured output point. - - @return: A dictionary of point values, indexed by their VOLTTRON point names. - """ - - def set_point(self, point_name, value): - """ - Set the value of a given input point. - - @param point_name: The point name of a DNP3 PointDefinition. - @param value: The value to set. The value's data type must match the one in the DNP3 PointDefinition. - """ - - def set_points(self, point_list): - """ - Set point values for a dictionary of points. - - @param point_list: A dictionary of {point_name: value} for a list of DNP3 points to set. - """ - - def config_points(self, point_map): - """ - For each of the agent's points, map its VOLTTRON point name to its DNP3 group and index. - - @param point_map: A dictionary that maps a point's VOLTTRON point name to its DNP3 group and index. - """ - - def get_point_definitions(self, point_name_list): - """ - For each DNP3 point name in point_name_list, return a dictionary with each of the point definitions. - - The returned dictionary looks like this: - - { - "point_name1": { - "property1": "property1_value", - "property2": "property2_value", - ... - }, - "point_name2": { - "property1": "property1_value", - "property2": "property2_value", - ... - } - } - - If a definition cannot be found for a point name, it is omitted from the returned dictionary. - - :param point_name_list: A list of point names. - :return: A dictionary of point definitions. - """ - -Pub/Sub Calls -~~~~~~~~~~~~~ - -DNP3Agent uses two topics when publishing data to the VOLTTRON message bus: - - * **Point Values (default topic: dnp3/point)**: As DNP3Agent communicates with the Master, - it publishes received point values on the VOLTTRON message bus. - - * **Outstation status (default topic: dnp3/status)**: If the status of the DNP3Agent outstation - changes, for example if it is restarted, it publishes its new status on the VOLTTRON message bus. - -Data Dictionary of Point Definitions -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -DNP3Agent loads and uses a data dictionary of point definitions, which are maintained by -agreement between the (DNP3Agent) Outstation and the DNP3 Master. -The data dictionary is stored in the agent's registry. - -Current Point Values -~~~~~~~~~~~~~~~~~~~~ - -DNP3Agent tracks the most-recently-received value for each point definition in its -data dictionary, regardless of whether the point value's source is a VOLTTRON RPC call or -a message from the DNP3 Master. - -Agent Configuration -~~~~~~~~~~~~~~~~~~~ - -The DNP3Agent configuration file specifies the following fields: - - - **local_ip**: (string) - Outstation's host address (DNS resolved). - Default: 0.0.0.0. - - **port**: (integer) - Outstation's port number - the port that the remote endpoint (Master) is listening on. - Default: 20000. - - **point_topic**: (string) - VOLTTRON message bus topic to use when publishing DNP3 point values. - Default: dnp3/point. - - **outstation_status_topic**: (string) - Message bus topic to use when publishing outstation status. - Default: dnp3/outstation_status. - - **outstation_config**: (dictionary) - Outstation configuration parameters. All are optional. Parameters include: - - -- **database_sizes**: (integer) - Size of each outstation database buffer. - Default: 10. - -- **event_buffers**: (integer) - Size of the database event buffers. - Default: 10. - -- **allow_unsolicited**: (boolean) - Whether to allow unsolicited requests. - Default: True. - -- **link_local_addr**: (integer) - Link layer local address. - Default: 10. - -- **link_remote_addr**: (integer) - Link layer remote address. - Default: 1. - -- **log_levels**: (list) - List of bit field names (OR'd together) that filter what gets logged by DNP3. - Default: [NORMAL]. Possible values: ALL, ALL_APP_COMMS, ALL_COMMS, NORMAL, NOTHING. - -- **threads_to_allocate**: (integer) - Threads to allocate in the manager's thread pool. - Default: 1. - -A sample DNP3Agent configuration file is available in `services/core/DNP3Agent/dnp3agent.config`. - -VOLTTRON DNP3 Device Driver ---------------------------- - -VOLTTRON's DNP3 device driver exposes get_point/set_point calls, and scrapes, for DNP3 points. - -The driver periodically issues DNP3Agent RPC calls to refresh its cached -representation of DNP3 data. It issues RPC calls to DNP3Agent as needed when -responding to get_point, set_point and scrape_all calls. - -For information about the DNP3 driver, see :ref:`DNP3 Driver Configuration `. - -Installing DNP3Agent --------------------- - -To install DNP3Agent, please consult the installation advice in `services/core/DNP3Agent/README.md`. -README.md specifies a default agent configuration, which can be overridden as needed. - -An agent installation script is available: - -.. code-block:: python - - $ export VOLTTRON_ROOT= - $ cd $VOLTTRON_ROOT - $ source services/core/DNP3Agent/install_dnp3_agent.sh - -When installing MesaAgent, please note that the agent's point definitions must be -loaded into the agent's config store. See install_dnp3_agent.sh for -an example of how to load them. - -For Further Information ------------------------ - -Questions? Please contact: - - - Rob Calvert (rob@kisensum.com) diff --git a/docs/source/specifications/driver-override.rst b/docs/source/specifications/driver-override.rst deleted file mode 100644 index fce4b90c75..0000000000 --- a/docs/source/specifications/driver-override.rst +++ /dev/null @@ -1,54 +0,0 @@ -.. _DriverOverride: - -Driver Override Specification -============================== -This document describes the specification for the global override feature. -By default, every user is allowed write access to the devices by the master driver. The override feature will allow the user (for example, building administrator) to override this default behavior and enable the user to lock the write access on the devices for a specified duration of time or indefinitely. - -Functional Capabilities ------------------------------ - -1. User shall be able to specify the following when turning on the override behavior on the devices. - - * Override pattern, for example, - - If pattern is campus/building1/* - Override condition is turned on for all the devices under campus/building1/. - - If pattern is campus/building1/ahu1 - Override condition is turned on for only campus/building1/ahu1 - - The pattern matching shall use bash style filename matching semantics. - - * Time duration over which override behavior is applicable. If the time duration is negative, then override condition is applied indefinitely. - - * Optional revert-to-fail-safe-state flag. If the flag is set, master driver shall set all the set points falling under the override condition to its default state/value immediately. This is to ensure that the devices are in fail-safe state when the override/lock feature is removed. If the flag is not set, the device state/value is untouched. - - * Optional staggered revert flag. If this flag is set, reverting of devices will be staggered. - -2. User shall be able to disable/turn off the override behavior on devices by specifying: - - * Pattern on which the override/lock feature has be disabled. (example: campus/building/\*) - -3. User shall be able to get a list of all the devices with the override condition set. - -4. User shall be able to get a list of all the override patterns that are currently active. - -5. User shall be able to clear all the overrides. - -6. Any changes to override patterns list shall be stored in the config store. On startup, list of override patterns and corresponding end times are retrieved from the config store. If the end time is indefinite or greater than current time for any pattern, then override is set on the matching devices for remaining duration of time. - -7. Whenever a device is newly configured, a check is made to see if it is part of the overridden patterns. If yes, it is added to list of overridden devices. - -8. When a device is being removed, a check is made to see if it is part of the overridden devices. If yes, it is removed from the list of overridden devices. - -Driver RPC Methods -******************** -set_override_on( pattern, duration=0.0, failsafe_revert=True, staggered_revert=True ) - Turn on override condition on all the devices matching the pattern. Time duration for the override condition has to be in seconds. For indefinite duration, the time duration has to be <= 0.0. - -set_override_off( pattern ) - Turn off override condition on all the devices matching the pattern. The specified pattern will be removed from the override patterns list. All the devices falling under the given pattern will be removed from the list of overridden devices. - -get_override_devices( ) - Get a list of all the devices with override condition. - -get_override_patterns( ) - Get a list of override patterns that are currently active. - -clear_overrides( ) - Clear all the overrides. - diff --git a/docs/source/specifications/external-rpc-enhancement.rst b/docs/source/specifications/external-rpc-enhancement.rst deleted file mode 100644 index 350a02f209..0000000000 --- a/docs/source/specifications/external-rpc-enhancement.rst +++ /dev/null @@ -1,157 +0,0 @@ -.. _ExternalRPCEnhancement: - -RPC Communication Between Remote Platforms -========================================== - -This document describes RPC communication between different platforms. In the current setup of VOLTTRON, if an agent in -one platform wants to make a RPC method call on an agent in a different platform, it responsible for establishing and -managing the connection with the target platform. Instead, if allow the VIP routers of each platform to make the -connection and manage the RPC communication internally, this will reduce the burden on the agents and enable a more -seamless RPC communication between agents on different platforms. - - -VIP Router -********** - -The VIP Router on each platform is responsible for establishing and maintaining the connection with remote platforms. - - -Router Functional Capabilities -****************************** - -1. Each VOLTTRON platform shall have a list of other VOLTTRON platforms that it has to establish connection in a config -file. - -2. The VIP router of each platform connects to other platforms on startup. It is responsible for maintaining the -connection (detects disconnects and intiate reconnects etc). - -3. The VIP router routes the external RPC message as described in "Messages for External RPC communication" section. - - -External RPC Subsystem -********************** - -External RPC subsystem allows an agent to make RPC method calls on agents running in remote platforms. - - -External RPC Functional Capabilities -************************************ -1. The agent needs to specify the remote platform name as an additional argument in the original RPC call or notify -method. - -2. The external RPC subsystem on the agent side adds the remote platform name into its VIP frame and sends to the -VIP router for routing to correct destination platform. It is described in detail in the next section. - - -Messages for External RPC communication -*************************************** - -The VIP router and external RPC subsystem on the agent side will be using VIP protocol for communication. The -communication between the VIP routers and the external RPC susbsytem on the agent side can be best explained with an -example. Suppose an agent 1 on platform V1 wants to make RPC method call on agent 2 in platform V2. Then the underlying -messages exchanged between the two platforms will look like below. - -Message format for external RPC subsystem of agent 1 on platform V1 to send to its VIP router. -:: - - +-+ - | | Empty recipient frame (implies VIP router is the destination) - +-+----+ - | VIP1 | Signature frame - +-+---------+ - |V1 user id | Empty user ID frame - +-+---------+ - | 0001 | Method request ID, for example "0001" - +-------------++ - | external_rpc | Subsystem, "external_rpc" - +-----------------------------+ - | external RPC request message| Dictionary containing destination platform name, destination agent identity, - | | source agent identity, method name and method arguments - +-----------------------------+ - - -Message sent by VIP router on platform V1 to VIP router of platform V2. - -:: - - +-----+ - | V2 | Destination platform ID, "V2" in this case - +-+---+ - | | Empty recipient frame - +-+----+ - | VIP1 | Signature frame - +-+---------+ - |V1 user id | Empty user ID frame - +-+---------+ - | 0001 | Method Request ID, for example "0001" - +--------------+ - | external_rpc | Subsystem, "external_rpc" - +------------------------------+ - | external RPC request message | Dictionary containing destination platform name, destination agent identity, - | | source platform name, source agent identity, method and arguments - +------------------------------+ - - -When the VIP router of platform V2 receives the message, it extracts the destination agent identity from the external -RPC request message frame and routes it to the intended agent. - -The result of the RPC method execution needs to be returned back to the calling agent. So the messages for the return -path are as follows. The source and destination platforms and agents are interchanged in the reply message. - -Message sent by external RPC subsystem of agent 2 on platform V2 to its VIP router. - -:: - - +-+ - | | Empty recipient frame (implies destination is VIP router) - +-+----+ - | VIP1 | Signature frame - +-+---------+ - |V2 user id | Empty user ID frame - +-+---------+ - | 0001 | Method Request ID, for example "0001" - +--------------+ - | external_rpc | Subsystem, "external_rpc" - +------------------------------+ - | external rpc reply message | Dictionary containing destination platform name, destination agent identity - | | source platform name, source agent identity and method result - +------------------------------+ - - -Message sent by VIP router of platform V2 to VIP router of platform V1. -:: - - +-----+ - | V1 | Source platform ID frame, "V1" in this case - +-+---+ - | | Empty recipient frame - +-+----+ - | VIP1 | Signature frame - +-+---------+ - |V1 user id | Empty user ID frame - +-+---------+ - | 0001 | Method Request ID, for example "0001" - +--------------+ - | external_rpc | Subsystem, "external_rpc" - +------------------------------+ - | external rpc reply message | Dictionary containing destination platform name, destination agent identity - | | source platform name, source agent identity and method result - +------------------------------+ - -The VIP router of platform V1 extracts the destination agent identity from the external RPC reply message frame and -routes it to the calling agent. - - -Methods for External RPC Subsystem -********************************** - -call(peer, method, \*args, \**kwargs) - New 'external_platform' parameter need to be added in kwargs to the -original RPC subsystem call. If the platform name of the target platform is passed into the 'external_platform' -parameter, the RPC method on the target platform gets executed. - -notify(peer, method, \*args, \**kwargs) - New 'external_platform' parameter need to be added in kwargs to the -original RPC subsystem notify method. If the platform name of the target platform is passed into the 'external_platform' -parameter, the RPC method on the target platform gets executed. - -handle_external_rpc_subsystem(message) - Handler for the external RPC subsystem messages. It executes the requested RPC -method and returns the result to the calling platform. diff --git a/docs/source/specifications/ieee2030_5_agent.rst b/docs/source/specifications/ieee2030_5_agent.rst deleted file mode 100644 index 1471549c3b..0000000000 --- a/docs/source/specifications/ieee2030_5_agent.rst +++ /dev/null @@ -1,229 +0,0 @@ -.. _IEEE2030_5: - -IEEE 2030.5 DER Support -======================= - -Version 1.0 - -Smart Energy Profile 2.0 (SEP2, IEEE 2030.5) specifies a REST architecture built -around the core HTTP verbs: GET, HEAD, PUT, POST and DELETE. -A specification for the IEEE 2030.5 protocol can be found -`here `_. - -IEEE 2030.5 EndDevices (clients) POST XML resources representing their state, -and GET XML resources containing command and control information from the server. -The server never reaches out to the client unless a "subscription" is -registered and supported for a particular resource type. This implementation -does not use IEEE 2030.5 registered subscriptions. - -The IEEE 2030.5 specification requires HTTP headers, and it explicitly requires RESTful -response codes, for example: - - - 201 - "Created" - - 204 - "No Content" - - 301 - "Moved Permanently" - - etc. - -IEEE 2030.5 message encoding may be either XML or EXI. -Only XML is supported in this implementation. - -IEEE 2030.5 requires HTTPS/TLS version 1.2 along with support for the -cipher suite TLS_ECDHE_ECDSA_WITH_AES_128_CCM_8. -Production installation requires a certificate issued by a IEEE 2030.5 CA. -The encryption requirement can be met by using a web server such as -Apache to proxy the HTTPs traffic. - -IEEE 2030.5 discovery, if supported, must be implemented by an xmDNS server. -Avahi can be modified to perform this function. - -Function Sets -------------- - -IEEE 2030.5 groups XML resources into "Function Sets." Some of these function sets -provide a core set of functionality used across higher-level function sets. -This implementation implements resources from the following function sets: - - - Time - - Device Information - - Device Capabilities - - End Device - - Function Set Assignments - - Power Status - - Distributed Energy Resources - -Distributed Energy Resources ----------------------------- - -Distributed energy resources (DERs) are devices that generate energy, e.g., solar inverters, -or store energy, e.g., battery storage systems, electric vehicle supply equipment (EVSEs). -These devices are managed by a IEEE 2030.5 DER server using DERPrograms which are described by -the IEEE 2030.5 specification as follows: - - Servers host one or more DERPrograms, which in turn expose DERControl events to DER clients. - DERControl instances contain attributes that allow DER clients to respond to events - that are targeted to their device type. A DERControl instance also includes scheduling - attributes that allow DER clients to store and process future events. These attributes - include start time and duration, as well an indication of the need for randomization of - the start and / or duration of the event. The IEEE 2030.5 DER client model is based on the - SunSpec Alliance Inverter Control Model [SunSpec] which is derived from - IEC 61850-90-7 [61850] and [EPRI]. - -EndDevices post multiple IEEE 2030.5 resources describing their status. The following is an -example of a Power Status resource that might be posted by an EVSE (vehicle charging -station): - -.. code-block:: xml - - - 4 - 1487812095 - 1 - 9300 - - - 3 - -5 - - - 3 - 22 - - - 3 - 7 - - 11280 - 10000 - 9223372036854775807 - 1487812095 - - - -Design Details --------------- - -.. image:: files/volttron_ieee2030_5.jpg - -VOLTTRON's IEEE 2030.5 implementation includes a IEEE2030_5Agent and a IEEE 2030.5 device driver, -as described below. - -VOLTTRON IEEE2030_5Agent -~~~~~~~~~~~~~~~~~~ - -IEEE2030_5Agent implements a IEEE 2030.5 server that receives HTTP POST/PUT -requests from IEEE 2030.5 devices. The requests are routed to IEEE2030_5Agent over the VOLTTRON -message bus by VOLTTRON's MasterWebService. IEEE2030_5Agent returns an appropriate HTTP -response. In some cases (e.g., DERControl requests), this response includes a data -payload. - -IEEE2030_5Agent maps IEEE 2030.5 resource data to a VOLTTRON IEEE 2030.5 data model based on SunSpec, -using block numbers and point names as defined in the SunSpec Information Model, -which in turn is harmonized with 61850. The data model is given in detail below. - -Each device's data is stored by IEEE2030_5Agent in an EndDevice memory structure. This -structure is not persisted to a database. Each EndDevice retains only the most -recently received value for each field. - -IEEE2030_5Agent exposes RPC calls for getting and setting EndDevice data. - -VOLTTRON IEEE 2030.5 Device Driver -~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -The IEEE 2030.5 device driver is a new addition to VOLTTRON MasterDriverAgent's family of -standard device drivers. It exposes get_point/set_point calls for IEEE 2030.5 EndDevice fields. - -The IEEE 2030.5 device driver periodically issues IEEE2030_5Agent RPC calls to refresh its cached -representation of EndDevice data. It issues RPC calls to IEEE2030_5Agent as needed when -responding to get_point, set_point and scrape_all calls. - -Field Definitions -~~~~~~~~~~~~~~~~~ - -These field IDs correspond to the ones in the IEEE 2030.5 device driver's configuration file, ieee2030_5.csv. -They have been used in that file's "Volttron Point Name" column and also in its "Point Name" column. - -================= ======================== ==================================================== ======= ====== -Field ID IEEE 2030.5 Resource/Property Description Units Type -================= ======================== ==================================================== ======= ====== -b1_Md device_information Model (32 char lim). string - mfModel -b1_Opt device_information Long-form device identifier (32 char lim). string - lfdi -b1_SN abstract_device Short-form device identifier (32 char lim). string - sfdi -b1_Vr device_information Version (16 char lim). string - mfHwVer -b113_A mirror_meter_reading AC current. A float - PhaseCurrentAvg -b113_DCA mirror_meter_reading DC current. A float - InstantPackCurrent -b113_DCV mirror_meter_reading DC voltage. V float - LineVoltageAvg -b113_DCW mirror_meter_reading DC power. W float - PhasePowerAvg -b113_PF mirror_meter_reading AC power factor. % float - PhasePFA -b113_WH mirror_meter_reading AC energy. Wh float - EnergyIMP -b120_AhrRtg der_capability Usable capacity of the battery. Ah float - rtgAh Maximum charge minus minimum charge. -b120_ARtg der_capability Maximum RMS AC current level capability of the A float - rtgA inverter. -b120_MaxChaRte der_capability Maximum rate of energy transfer into the device. W float - rtgMaxChargeRate -b120_MaxDisChaRte der_capability Maximum rate of energy transfer out of the device. W float - rtgMaxDischargeRate -b120_WHRtg der_capability Nominal energy rating of the storage device. Wh float - rtgWh -b120_WRtg der_capability Continuous power output capability of the inverter. W float - rtgW -b121_WMax der_settings Maximum power output. Default to WRtg. W float - setMaxChargeRate -b122_ActWh mirror_meter_reading AC lifetime active (real) energy output. Wh float - EnergyEXP -b122_StorConn der_status CONNECTED=0, AVAILABLE=1, OPERATING=2, TEST=3. enum - storConnectStatus -b124_WChaMax der_control Setpoint for maximum charge. This is the only W float - opModFixedFlow field that is writable with a set_point call. -b403_Tmp mirror_meter_reading Pack temperature. C float - InstantPackTemp -b404_DCW PEVInfo Power flow in or out of the inverter. W float - chargingPowerNow -b404_DCWh der_availability Output energy (absolute SOC). Wh float - availabilityDuration Calculated as (availabilityDuration / 3600) * WMax. -b802_LocRemCtl der_status Control Mode: REMOTE=0, LOCAL=1. enum - localControlModeStatus -b802_SoC der_status State of Charge %. % WHRtg float - stateOfChargeStatus -b802_State der_status DISCONNECTED=1, INITIALIZING=2, CONNECTED=3, enum - inverterStatus STANDBY=4, SOC PROTECTION=5, FAULT=99. -================= ======================== ==================================================== ======= ====== - -Revising and Expanding the Field Definitions --------------------------------------------- - -The IEEE 2030.5-to-SunSpec field mappings in this implementation are a relatively thin subset of all possible -field definitions. Developers are encouraged to expand the definitions. - -The procedure for expanding the field mappings requires you to make changes in two places: - -1. Update the driver's point definitions in services/core/MasterDriverAgent/master_driver/ieee2030_5.csv -2. Update the IEEE 2030.5-to-SunSpec field mappings in services/core/IEEE2030_5Agent/ieee2030_5/end_device.py and __init__.py - -When updating VOLTTRON's IEEE 2030.5 data model, please use field IDs that conform to the SunSpec -block-number-and-field-name model outlined in the SunSpec Information Model Reference -(see the link below). - -For Further Information ------------------------ - -SunSpec References: - - - Information model specification: http://sunspec.org/wp-content/uploads/2015/06/SunSpec-Information-Models-12041.pdf - - Information model reference spreadsheet: http://sunspec.org/wp-content/uploads/2015/06/SunSpec-Information-Model-Reference.xlsx - - Inverter models: http://sunspec.org/wp-content/uploads/2015/06/SunSpec-Inverter-Models-12020.pdf - - Energy storage models: http://sunspec.org/wp-content/uploads/2015/06/SunSpec-Energy-Storage-Models-12032.pdf - -Questions? Please contact: - - - Rob Calvert (rob@kisensum.com) or James Sheridan (james@kisensum.com) diff --git a/docs/source/specifications/index.rst b/docs/source/specifications/index.rst deleted file mode 100644 index ce6c403d3b..0000000000 --- a/docs/source/specifications/index.rst +++ /dev/null @@ -1,11 +0,0 @@ -.. _platform-specifications: - -======================= -Platform Specifications -======================= - -.. toctree:: - :glob: - :maxdepth: 1 - - * diff --git a/docs/source/specifications/mesa_agent.rst b/docs/source/specifications/mesa_agent.rst deleted file mode 100644 index 32ca1b3ed9..0000000000 --- a/docs/source/specifications/mesa_agent.rst +++ /dev/null @@ -1,263 +0,0 @@ -.. _MESA: - -MesaAgent ---------- - -MesaAgent is a VOLTTRON agent that handles MESA-ESS DNP3 outstation communications. -It subclasses and extends the functionality of VOLTTRON's DNP3Agent. Like DNP3Agent, -MesaAgent models a DNP3 outstation, communicating with a DNP3 master. - -`DNP3 `_ (Distributed Network Protocol) is -a set of communications protocols that are widely used by utilities such as -electric power companies, primarily for `SCADA `_ purposes. -It was adopted in 2010 -as `IEEE Std 1815-2010 `_, -later updated to `1815-2012 `_. - -VOLTTRON's MesaAgent and DNP3Agent are implementations of a DNP3 Outstation as specified in -IEEE Std 1815-2012. They engage in bidirectional network communications with a DNP3 Master, -which might be located at a power utility. - -MESA-ESS is an extension and enhancement to DNP3. It builds on the basic DNP3 communications -protocol, adding support for more complex structures, including functions, arrays, curves and schedules. -The draft specification for MESA-ESS, as well as a spreadsheet of point definitions, can be -found at **http://mesastandards.org/mesa-ess-2016/**. - -VOLTTRON's DNP3Agent and MesaAgents implementations of an Outstation are built on pydnp3, -an open-source library from Kisensum containing Python language -bindings for Automatak's C++ `opendnp3 `_ -library, the de facto reference implementation of DNP3. - -MesaAgent exposes DNP3 application-layer functionality, creating an extensible -base from which specific custom behavior can be designed and supported, including support -for MESA functions, arrays and selector blocks. By default, MesaAgent -acts as a simple transfer agent, publishing data received from the Master on -the VOLTTRON Message Bus, and responding to RPCs from other VOLTTRON agents -by sending data to the Master. Properties of the point and function definitions also enable -the use of more complex controls for point data capture and publication. - -MesaAgent was developed by Kisensum for use by 8minutenergy, which provided generous -financial support for the open-source contribution to the VOLTTRON platform, along with -valuable feedback based on experience with the agent in a production context. - -RPC Calls -~~~~~~~~~ - -MesaAgent exposes the following VOLTTRON RPC calls: - -.. code-block:: python - - def get_point(self, point_name): - """ - Look up the most-recently-received value for a given output point. - - @param point_name: The point name of a DNP3 PointDefinition. - @return: The (unwrapped) value of a received point. - """ - - def get_point_by_index(self, data_type, index): - """ - Look up the most-recently-received value for a given point. - - @param data_type: The data_type of a DNP3 point. - @param index: The index of a DNP3 point. - @return: The (unwrapped) value of a received point. - """ - - def get_points(self): - """ - Look up the most-recently-received value of each configured output point. - - @return: A dictionary of point values, indexed by their point names. - """ - - def get_configured_points(self): - """ - Look up the most-recently-received value of each configured point. - - @return: A dictionary of point values, indexed by their point names. - """ - - def set_point(self, point_name, value): - """ - Set the value of a given input point. - - @param point_name: The point name of a DNP3 PointDefinition. - @param value: The value to set. The value's data type must match the one in the DNP3 PointDefinition. - """ - - def set_points(self, point_dict): - """ - Set point values for a dictionary of points. - - @param point_dict: A dictionary of {point_name: value} for a list of DNP3 points to set. - """ - - def config_points(self, point_map): - """ - For each of the agent's points, map its VOLTTRON point name to its DNP3 group and index. - - @param point_map: A dictionary that maps a point's VOLTTRON point name to its DNP3 group and index. - """ - - def get_point_definitions(self, point_name_list): - """ - For each DNP3 point name in point_name_list, return a dictionary with each of the point definitions. - - The returned dictionary looks like this: - - { - "point_name1": { - "property1": "property1_value", - "property2": "property2_value", - ... - }, - "point_name2": { - "property1": "property1_value", - "property2": "property2_value", - ... - } - } - - If a definition cannot be found for a point name, it is omitted from the returned dictionary. - - :param point_name_list: A list of point names. - :return: A dictionary of point definitions. - """ - - def get_selector_block(self, point_name, edit_selector): - """ - Return a dictionary of point values for a given selector block. - - :param point_name: Name of the first point in the selector block. - :param edit_selector: The index (edit selector) of the block. - :return: A dictionary of point values. - """ - - def reset(self): - """ - Reset the agent's internal state, emptying point value caches. Used during iterative testing. - """ - -Pub/Sub Calls -~~~~~~~~~~~~~ - -MesaAgent uses three topics when publishing data to the VOLTTRON message bus: - - * **Point Values (default topic: dnp3/point)**: As MesaAgent communicates with the Master, - it publishes received point values on the VOLTTRON message bus. - - * **Functions (default topic: mesa/function)**: When MesaAgent receives a function step - with a "publish" action value, it publishes the current state of the function (all - steps received to date) on the VOLTTRON message bus. - - * **Outstation status (default topic: mesa/status)**: If the status of the MesaAgent outstation - changes, for example if it is restarted, it publishes its new status on the VOLTTRON message bus. - -Data Dictionaries of Point and Function Definitions -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -MesaAgent loads and uses data dictionaries of point and function definitions, -which are maintained by agreement between the (MesaAgent) Outstation and the DNP3 Master. -The data dictionaries are stored in the agent's registry. - -Current Point Values -~~~~~~~~~~~~~~~~~~~~ - -MesaAgent tracks the most-recently-received value for each point definition in its -data dictionary, regardless of whether the point value's source is a VOLTTRON RPC call or -a message from the DNP3 Master. - -Agent Configuration -~~~~~~~~~~~~~~~~~~~ - -The MesaAgent configuration specifies the following fields: - - - **local_ip**: (string) - Outstation's host address (DNS resolved). - Default: 0.0.0.0. - - **port**: (integer) - Outstation's port number - the port that the remote endpoint (Master) is listening on. - Default: 20000. - - **point_topic**: (string) - VOLTTRON message bus topic to use when publishing DNP3 point values. - Default: dnp3/point. - - **function_topic**: (string) - Message bus topic to use when publishing MESA-ESS functions. - Default: mesa/function. - - **outstation_status_topic**: (string) - Message bus topic to use when publishing outstation status. - Default: mesa/outstation_status. - - **all_functions_supported_by_default**: (boolean) - When deciding whether to reject points for unsupported - functions, ignore the values of their 'supported' points: simply treat all functions as - supported. Used primarily during testing. - Default: False. - - **function_validation**: (boolean) - When deciding whether to support sending single points to MesaAgent. - If function_validation is True, MesaAgent will raise an exception when receiving any - invalid point in current function. If function_validation is False, MesaAgent will - reset current function to None instead of raising the exception. - Default: False. - - **outstation_config**: (dictionary) - Outstation configuration parameters. All are optional. Parameters include: - - -- **database_sizes**: (integer) - Size of each outstation database buffer. - Default: 10. - -- **event_buffers**: (integer) - Size of the database event buffers. - Default: 10. - -- **allow_unsolicited**: (boolean) - Whether to allow unsolicited requests. - Default: True. - -- **link_local_addr**: (integer) - Link layer local address. - Default: 10. - -- **link_remote_addr**: (integer) - Link layer remote address. - Default: 1. - -- **log_levels**: (list) - List of bit field names (OR'd together) that filter what gets logged by DNP3. - Default: [NORMAL]. Possible values: ALL, ALL_APP_COMMS, ALL_COMMS, NORMAL, NOTHING. - -- **threads_to_allocate**: (integer) - Threads to allocate in the manager's thread pool. - Default: 1. - -A sample MesaAgent configuration file is available in **services/core/DNP3Agent/mesaagent.config**. - -Installing MesaAgent --------------------- - -To install MesaAgent, please consult the installation advice in **services/core/DNP3Agent/README.md**, -which includes advice on installing **pydnp3**, a library upon which DNP3Agent depends. - -After installing libraries as described in README.md, -the agent can be installed from a command-line shell as follows: - -.. code-block:: python - - $ export VOLTTRON_ROOT= - $ cd $VOLTTRON_ROOT - $ source services/core/DNP3Agent/install_mesa_agent.sh - -README.md specifies a default agent configuration, which can be overridden as needed. - -Here are some things to note when installing MesaAgent: - - - MesaAgent source code resides in, and is installed from, a dnp3 subdirectory, thus allowing it - to be implemented as a subclass of the base DNP3 agent class. - When installing MesaAgent, inform the install script that it should build from the - mesa subdirectory by exporting the following environment variable: - - -- $ export AGENT_MODULE=dnp3.mesa.agent - - - The agent's point and function definitions must be loaded into the agent's config store. See the - install_mesa_agent.sh script for an example of how to load them. - -For Further Information ------------------------ - -Questions? Please contact: - - - Anh Nguyen at ChargePoint (anh.nguyen@chargepoint.com) diff --git a/docs/source/specifications/pubsub-enhancement.rst b/docs/source/specifications/pubsub-enhancement.rst deleted file mode 100644 index 210b192e55..0000000000 --- a/docs/source/specifications/pubsub-enhancement.rst +++ /dev/null @@ -1,278 +0,0 @@ -.. _PubSubEnhancement: - -PubSub Communication Between Remote Platforms -============================================= - -This document describes pubsub communication between different platforms. The goal of this specification is to improve -the current setup of having a forward historian to forward local pubsub messages to remote platforms. So the agents -interested in receiving PubSub messages from external platforms will not need to have a forward historian running in -source platform to forward pubsub messages to the interested destination platforms. The VIP router will now do all the -work; it shall use Routing Service to internally manage connections with external VOLTTRON platforms and use PubSubService -for the actual inter platform PubSub communication. -For Future: -The specification will need to be extended to support PubSub communication between platforms that are multiple hops away. -The VIP router of each platform shall need to maintain a routing table and use it to forward pubsub messages to subscribed -platforms that are multiple hops away. The routing table shall contain shortest path to each destination platform. - - -Functional Capabilities -*********************** - -1. Each VOLTTRON platform shall have a list of other VOLTTRON platforms that it has to connect to in a config file. - -2. Routing Service of each platform connects to other platforms on startup. - -3. The Routing Service in each platform is responsible for connecting to (and also initiate reconnection if required), -monitoring and disconnecting from each external platform. The function of Routing Service is explained in detail in -Routing Service section. - -4. Platform to platform pubsub communication shall be using VIP protocol with the subsystem frame set to "pubsub". - -5. PubSubService of each VOLTTRON platform shall maintain a list of local and external subscriptions. - -6. Each VIP router sends its list of external subscriptions to other connected platforms in the following cases - - a. On startup - - b. When a new subscription is added - - c. When an existing subscription is removed - - d. When a new platform gets connected - -7. When a remote platform disconnection is detected, all stale subscriptions related to that platform shall be removed. - -8. Whenever an agent publishes a message to a specific topic, the PubSubService on the local platform first checks the -topic against its list of local subscriptions. If a local subscription exists, it sends the publish message to -corresponding local subscribers. - -9. PubSubService shall also check the topic against list of external subscriptions. If an external subscription exists, -it shall use Routing Service to send the publish message to the corresponding external platform. - -10. Whenever a router receives messages from other platform, it shall check the destination platform in the incoming -message. - - a. If the destination platform is the local platform, it hand overs the publish message to PubSubService which - checks the topic against list of external subscriptions. If an external subscription matches, PubSubService forwards - the message to all the local subscribers subscribed to that topic. - - b. If the destination platform is not the local platform, it discards the message. - - -Routing Service -+++++++++++++++ - -1. Routing Service shall maintain connection status (CONNECTING, CONNECTED, DISCONNECTED etc.) for each external platform. - -2. In order to establish connection with an external VOLTTRON platform, the server key of the remote platform is needed. - The Routing Service shall connect to an external platform once it obtains the server key for that platform from the - KeyDiscoveryService. - -3. Routing Service shall exchange "hello"/"welcome" handshake messages with the newly connected remote platform to - confirm the connection. It shall use VIP protocol with the subsystem frame set to “routing_table” for the handshake - messages. - -4. Routing Service shall monitor the connection status and inform PubSubService whenever a remote platform gets - connected/disconnected. - - -For Future - -1. Each VIP router shall exchange its routing table with its connected platforms on startup and whenever a new platform - gets connected or disconnected. - -2. The router shall go through each entry in the routing table that it received from other platforms and calculate the - shortest, most stable path to each remote platform. It then sends the updated routing table to other platforms for - adjustments in the forwarding paths (in their local routing table) if any. - -3. Whenever a VIP router detects a new connection, it adds an entry into the routing table and sends updated routing - table to its neighboring platforms. Each router in the other platforms shall update and re-calculate the forwarding - paths in its local routing table and forward to rest of the platforms. - -4. Similarly, whenever a VIP router detects a remote platform disconnection, it deletes the entry in the routing table - for that platform and forwards the routing table to other platforms to do the same. - - -KeyDiscovery Service -++++++++++++++++++++ - -1. Each platform tries to obtain the platform discovery information - platform name, VIP address and server key of - remote VOLTTRON platforms through HTTP discovery service at startup. - -2. If unsuccessful, it shall make regular attempts to obtain discovery information until successful. - -3. The platform discovery information shall then be sent to the Routing Service using VIP protocol with subsystem - frame set to "routing_table". - - -Messages for Routing Service -**************************** -Below shows example messages that are applicable to the Routing Service. - -Message sent by KeyDiscovery Service containing the platform discovery information (platform name, VIP address and -server key) of a remote platform. -:: - - +-+ - | | Empty recipient frame - +-+----+ - | VIP1 | Signature frame - +-+----+ - | | Empty user ID frame - +-+----+ - | 0001 | Request ID, for example "0001" - +---------------+ - | routing_table | Subsystem, "routing_table" - +---------------+----------------+ - | normalmode_platform_connection | Type of operation, "normalmode_platform_connection" - +--------------------------------+ - | platform discovery information | - | of external platform | platform name, VIP address and server key of external platform - +--------------------------------+ - | platform name | Remote platform for which the server key belongs to. - +---------------------+ - - -Handshake messages between two newly connected external VOLTTRON platform to confirm successful connection. - -Message from initiating platform -:: - - +-+ - | | Empty recipient frame - +-+----+ - | VIP1 | Signature frame - +-+----+ - | | Empty user ID frame - +-+----+ - | 0001 | Request ID, for example "0001" - +--------------++ - | routing_table | Subsystem, "routing_table" - +---------------+ - | hello | Operation, "hello" - +--------+ - | hello | Hello handshake request frame - +--------+------+ - | platform name | Platform initiating a "hello" - +---------------+ - - -Reply message from the destination platform -:: - - +-+ - | | Empty recipient frame - +-+----+ - | VIP1 | Signature frame - +-+----+ - | | Empty user ID frame - +-+----+ - | 0001 | Request ID, for example "0001" - +--------------++ - | routing_table | Subsystem, "routing_table" - +--------+------+ - | hello | Operation, "hello" - +--------++ - | welcome | Welcome handshake reply frame - +---------+-----+ - | platform name | Platform sending reply to "hello" - +---------------+ - -Messages for PubSub communication -********************************* -The VIP routers of each platform shall send pubsub messages between platforms using VIP protocol message semantics. -Below shows an example of external subscription list message sent by VOLTTRON platform V1 router to VOLTTRON platform V2. - -:: - - +-+ - | | Empty recipient frame - +-+----+ - | VIP1 | Signature frame - +-+---------+ - |V1 user id | Empty user ID frame - +-+---------+ - | 0001 | Request ID, for example "0001" - +-------++ - | pubsub | Subsystem, "pubsub" - +-------------+-+ - | external_list | Operation, "external_list" in this case - +---------------+ - | List of | - | subscriptions | Subscriptions dictionary consisting of VOLTTRON platform id and list of topics as - +---------------+ key - value pairings, for example: { "V1": ["devices/rtu3"]} - - -This shows an example of external publish message sent by VOLTTRON platform V2 router to VOLTTRON platform V1. -:: - - - +-+ - | | Empty recipient frame - +-+----+ - | VIP1 | Signature frame - +-+---------+ - |V1 user id | Empty user ID frame - +-+---------+ - | 0001 | Request ID, for example "0001" - +-------++ - | pubsub | Subsystem, "pubsub" - +------------------+ - | external_publish | Operation, "external_publish" in this case - +------------------+ - | topic | Message topic - +------------------+ - | publish message | Actual publish message frame - +------------------+ - -API -*** - - -Methods for Routing Service -+++++++++++++++++++++++++++ - -external_route( ) - This method receives message frames from external platforms, checks the subsystem frame and -redirects to appropriate subsystem (routing table, pubsub) handler. It shall run within a separate thread and get -executed whenever there is a new incoming message from other platforms. - -setup( ) - This method initiates socket connections with all the external VOLTTRON platforms configured in the config -file. It also starts monitor thread to monitor connections with external platforms. - -handle_subsystem( frames ) - Routing Service subsytem handler to handle serverkey message from KeyDiscoveryService and -"hello/welcome" handshake message from external platforms. - -send_external( instance_name, frames ) - This method sends input message to specified VOLTTRON platform/instance. - -register( type, handler ) - Register method for PubSubService to register for connection and disconnection events. - -disconnect_external_instances( instance_name ) - Disconnect from specified VOLTTRON platform. - -close_external_connections( ) - Disconnect from all external VOLTTRON platforms. - -get_connected_platforms( ) - Return list of connected platforms. - - -Methods for PubSubService -+++++++++++++++++++++++++ - -external_platform_add( instance_name ) - Send external subscription list to newly connected external VOLTTRON platform. - -external_platform_drop( instance_name ) - Remove all subscriptions for the specified VOLTTRON platform - -update_external_subscriptions( frames ) - Store/Update list of external subscriptions as per the subscription list -provided in the message frame. - -_distribute_external( frames ) - Publish the message all the external platforms that have subscribed to the topic. It -uses send_external_pubsub_message() of router to send out the message. - -external_to_local_publish( frames ) - This method retrieves actual message from the message frame, checks the message -topic against list of external subscriptions and sends the message to corresponding subscribed agents. - - -Methods for agent pubsub subsystem -++++++++++++++++++++++++++++++++++ - -subscribe(peer, prefix, callback, bus='', all_platforms=False) - The existing 'subscribe' method is modified to include -optional keyword argument - 'all_platforms'. If 'all_platforms' is set to True, the agent is subscribing to topic from -local publisher and from external platform publishers. diff --git a/docs/source/specifications/webframework.rst b/docs/source/specifications/webframework.rst deleted file mode 100644 index d96d27322e..0000000000 --- a/docs/source/specifications/webframework.rst +++ /dev/null @@ -1,82 +0,0 @@ -.. _WebFramework: - -VOLTTRON Web Framwwork -====================== - -This document describes the interaction between web enabled agents and the MasterWebService agent. - -The web framework enables agent developers to expose JSON, static, and websocket endpoints. - -Web SubSystem -+++++++++++++ - -Enabling --------- - -The web subsystem is not enabled by default as it is only required by a small subset of agents. To enable the web subsystem the platform instance must have an enabled the web server and the agent must pass enable_web=True to the agent constructor. - -Methods -------- - -The web subsystem allows an agent to register three different types of endpoints; path based, JSON and websocket. A path based endpoint allows the agent to specify a prefix and a static path on the file system to serve static files. The prefix can be a regular expression. - -.. note:: The web subsystem is only available when the constructor contains enable_web=True. - -The below examples are within the context of an object that has extended the :class:`volttron.platform.vip.agent.Agent` base class. - -.. note:: For all endpoint methods the first match wins. Therefore ordering which endpoints are registered first becomes important. - -.. code-block:: python - - @Core.receiver('onstart') - def onstart(self, sender, **kwargs): - """ - Allow serving of static content from /var/www - """ - self.vip.web.register_path(r'^/vc/.*', '/var/www') - -JSON endpoints allows an agent to serve data responses to specific queries from a web client.non-static responses. The agent will pass a callback to the subsystem which will be called when the endpoint is triggered. - -.. code-block:: python - - def jsonrpc(env, data): - """ - The main entry point for jsonrpc data - """ - return {'dyamic': 'data'} - - @Core.receiver('onstart') - def onstart(self, sender, **kwargs): - """ - Register the /vc/jsonrpc endpoint for doing json-rpc based methods - """ - self.vip.web.register_endpoint(r'/vc/jsonrpc', self.jsonrpc) - - -Websocket endpoints allow bi-directional communication between the client and the server. Client connections can be authenticated during the opening of a websocket through the response of an open callback. - - -.. code-block:: python - - def _open_authenticate_ws_endpoint(self, fromip, endpoint): - """ - A client attempted to open an endpoint to the server. - - Return True or False if the endpoint should be allowed. - - :rtype: bool - """ - return True - - def _ws_closed(self, endpoint): - _log.debug("CLOSED endpoint: {}".format(endpoint)) - - def _ws_received(self, endpoint, message): - _log.debug("RECEIVED endpoint: {} message: {}".format(endpoint, - message)) - - @Core.receiver('onstart') - def onstart(self, sender, **kwargs): - self.vip.web.register_websocket(r'/vc/ws', self.open_authenticate_ws_endpoint, self._ws_closed, self._ws_received) - - diff --git a/docs/source/styleguide.rst b/docs/source/styleguide.rst deleted file mode 100644 index e1ff2c775b..0000000000 --- a/docs/source/styleguide.rst +++ /dev/null @@ -1,66 +0,0 @@ -.. _styleguide: -.. Reference anchor should be the same as the filename - - -================================== -This is the main title of the page -================================== - -.. _code blocks: - -Example Code Blocks --------------------- - -Use bash for commands or user actions - -.. code-block:: bash - - ls -al - - -Use this for the results of a command - -.. code-block:: console - - total 5277200 - drwxr-xr-x 22 volttron volttron 4096 Oct 20 09:44 . - drwxr-xr-x 23 volttron volttron 4096 Oct 19 18:39 .. - -rwxr-xr-x 1 volttron volttron 164 Sep 29 17:08 agent-setup.sh - drwxr-xr-x 3 volttron volttron 4096 Sep 29 17:13 applications - - -Use this when Python source code is displayed - -.. code-block:: python - - @RPC.export - def status_agents(self): - return self._aip.status_agents() - - -Directives ----------- - -Taken from this `reference `_ - -.. DANGER:: - - Something very bad! - -.. tip:: - - This is something good to know - -Some other directives -~~~~~~~~~~~~~~~~~~~~~ - -"attention", "caution", "danger", "error", "hint", "important", "note", "tip", "warning", "admonition" - -You can use anchors for internal :ref:`references ` too - -Other resources ---------------- - -- http://pygments.org/docs/lexers/ -- http://documentation-style-guide-sphinx.readthedocs.io/en/latest/style-guide.html -- http://www.sphinx-doc.org/en/stable/markup/code.html diff --git a/docs/source/supporting/examples/ConfigActuation.rst b/docs/source/supporting/examples/ConfigActuation.rst deleted file mode 100644 index 6949b19f7a..0000000000 --- a/docs/source/supporting/examples/ConfigActuation.rst +++ /dev/null @@ -1,22 +0,0 @@ -.. _ConfigActuation: - -Config Actuation Example -========================= - -The ConfigActuation example attempts to set points on a device when files -are added or updated in its :ref:`configuration store `. - - -Configuration -------------- - -The name of a configuration file must match the name of the device to -be actuated. The configuration file is a json dictionary of point name and value -pairs. Any number of points on the device can be listed in the config. - -.. code-block:: python - - { - "point0": value, - "point1": value - } diff --git a/docs/source/unused/deprecated/Logging.rst b/docs/source/unused/deprecated/Logging.rst deleted file mode 100644 index e3c1b2a96f..0000000000 --- a/docs/source/unused/deprecated/Logging.rst +++ /dev/null @@ -1,64 +0,0 @@ -Data Logging ------------- - -A mechanism allowing agents to store timeseries data has been provided. -In VOLTTRON 2.0 this facility was provided by an sMAP agent but it has -now been folded into the new Historians. This service still uses the old -format to maintain compatibility. - -Data Logging Format -~~~~~~~~~~~~~~~~~~~ - -Data sent to the data logger should be sent as a JSON object that -consists of a dictionary of dictionaries. The keys of the outer -dictionary are used as the points to store the data items. The inner -dictionary consists of 2 required fields and 1 optional. The required -fields are "Readings" and "Units". Readings contains the data that will -be written. It may contain either a single value, or a list of lists -which consists of timestamp/value pairs. Units is a string that -identifies the meaning of the scale values of the data. The optional -entry is data\_type, which indicates the type of the data to be stored. -This may be either long or double. - -:: - - { - "test3": { - "Readings": [[1377788595, 1.1],[1377788605,2.0]], - "Units": "KwH", - "data_type": "double" - }, - "test4": { - "Readings": [[1377788595, 1.1],[1377788605,2.0]], - "Units": "TU", - "data_type": "double" - } - } - -Example Code -~~~~~~~~~~~~ - -:: - - headers[headers_mod.FROM] = self._agent_id - headers[headers_mod.CONTENT_TYPE] = headers_mod.CONTENT_TYPE.JSON - - mytime = int(time.time()) - - content = { - "listener": { - "Readings": [[mytime, 1.0]], - "Units": "TU", - "data_type": "double" - }, - "hearbeat": { - "Readings": [[mytime, 1.0]], - "Units": "TU", - "data_type": "double" - } - } - - - - self.publish('datalogger/log/', headers, json.dumps(content)) - diff --git a/docs/source/unused/deprecated/MultiBuildingMessaging.rst b/docs/source/unused/deprecated/MultiBuildingMessaging.rst deleted file mode 100644 index 6aa877c75e..0000000000 --- a/docs/source/unused/deprecated/MultiBuildingMessaging.rst +++ /dev/null @@ -1,88 +0,0 @@ -MultiBuilding Agent -=================== - -This agent has been superseded by the VIP functionality introduced in 3.0 and should be considered deprecated. However it is still a usable agent. - -Multi-building (or multi-node) messaging is implemented as a -service-style agent. Its use is optional and it can be enabled/disabled -by simply enabling/disabling the multibuilding service agent. It is -easily configured using the service configuration file and provides -several new topics for use in the local agent exchange bus. - -Configuration -~~~~~~~~~~~~~ - -The service configuration file may contain the declarations below: - -- | *building-publish-address*: - | A ØMQ address on which to listen for messages published by other - nodes. Defaults to 'tcp://0.0.0.0:9161'. - -- | *building-subscribe-address*: - | A ØMQ address on which to listen for messages subscribed to by - other nodes. Defaults to 'tcp://0.0.0.0:9160'. - -- | *public-key*, *secret-key*: - | Curve keypair (create with zmq.curve\_keypair()) to use for - authentication and encryption. If not provided, all communications - will be unauthenticated and unencrypted. - -- | *hosts*: - | A mapping (dictionary) of building names to publish/subscribe - addresses. Each entry is of the form: - - :: - - "CAMPUS/BUILDING": {"pub": "PUB_ADDRESS", "sub": "SUB_ADDRESS", "public-key": "PUBKEY", "allow": "PUB_OR_SUB"} - -- CAMPUS/BUILDING: building for which the given parameters apply -- PUB\_ADDRESS: ØMQ address used to connect to the building for - publishing -- SUB\_ADDRESS: ØMQ address used to connect to the building - subscriptions -- PUBKEY: curve public key of the host used to authenticate incoming - connections -- PUB\_OR\_SUB: the string "pub" to allow publishing only or "sub" to - allow both publish and subscribe - -- *cleanup-period*: Frequency, in seconds, to check for and close stale - connections. Defaults to 600 seconds (10 minutes). - -- *uuid*: A UUID to use in the Cookie header. If not given, one will be - automatically generated. - -Sending and Receiving Inter-building Messages -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -Three topics are provided for inter-building messaging: - -- | building/recv/\ ``CAMPUS/BUILDING/TOPIC``: - | Agents can subscribe to to this topic to receive messages sent to - ``TOPIC`` at the building specified by ``CAMPUS``/``BUILDING``. - -- | building/send/\ ``CAMPUS/BUILDING/TOPIC``: - | Agents can send messages to this topic to have them forwarded to - ``TOPIC`` at the building specified by ``CAMPUS``/``BUILDING``. - -- | building/error/\ ``CAMPUS/BUILDING/TOPIC``: - | Errors encountered during sending/receiving to/from the above two - topics will be sent over this topic. - -Limitations and Future Work -~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -- | Requires opening multiple listening ports: - | It would be nice to multiplex all inter-building communications - over a single port to decrease the attack footprint and ease firewall - administration. - -- | There is limited authorization: - | a remote host can either publish or publish and subscribe. Perhaps - a filter list can be included to limit which topics a host may - subscribe to or publish on. - -- | Remote host lookup is kept in a static file: - | Ideally, host lookup would be done through some central directory - service, but that is not currently implemented. - - diff --git a/docs/source/unused/deprecated/MultiNodeExample.rst b/docs/source/unused/deprecated/MultiNodeExample.rst deleted file mode 100644 index 77a48eab0d..0000000000 --- a/docs/source/unused/deprecated/MultiNodeExample.rst +++ /dev/null @@ -1,141 +0,0 @@ -Multinode Example -================= - -The MultiNode example agent demonstrates how to setup and make use of -the [[MultiBuildingMessaging]] agent. - -For convenience, this example is setup to be run from a single machine -but could be easily modified to run off multiple systems. Multiple -instances of VOLTTRON can be run on a single machine with the proper -configuration. For this example, two separate VOLTTRON homes are setup -and the MultiBuilding service binds to different local addresses. -[[PlatformConfiguration]] shows how the VOLTTRON\_HOME is used for -platform directories. - -The example agent directory contains the config files for the example -agents and the multibuilding agents. Each is setup to know about the -other platform instance and contains its own pub and sub addresses. -Please see [[MultiBuildingMessaging]] for details on the configuration -file. - -MultiBuilding config: - -:: - - { - "building-publish-address": "tcp://127.0.0.1:12201", - "building-subscribe-address": "tcp://127.0.0.1:12202", - "uuid": "MultiBuildingService", - "hosts": { - "campus/platform1": { - "pub": "tcp://127.0.0.1:12201", - "sub": "tcp://127.0.0.1:12202" - }, - "campus/platform2": { - "pub": "tcp://127.0.1.1:12201", - "sub": "tcp://127.0.1.1:12202" - } - } - } - -Each GreeterAgent is setup with the other hosts it will be publishing to -in the publish\_heartbeat method. - -GreeterAgent config: - -:: - - { - "agentid": "Greeter1", - "receiving_platforms": ["platform2"] - } - -In order to run this example: - -- First activate the platform: - - :: - - . env/bin/activate - -- Then, create the directories which will be used by each platform as - its VOLTTRON\_HOME: - - :: - - mkdir ~/.platform1 - mkdir ~/.platform2 - -- Start the first platform: - - :: - - VOLTTRON_HOME=~/.platform1 volttron -vv -l platform1.log& - -- Build, configure, and install the multibuilding platform agent. The - Agent is installed with "multinode=" to tag the agent at the same - time it is installed. This is a convenient way to refer to the agent - later. - - :: - - VOLTTRON_HOME=~/.platform1 volttron-pkg package Agents/MultiBuilding - VOLTTRON_HOME=~/.platform1 volttron-pkg configure ~/.volttron/packaged/multibuildingagent-0.1-py2-none-any.whl Agents/MultiNodeExample/multicomm.service - VOLTTRON_HOME=~/.platform1 volttron-ctl install multinode=~/.volttron/packaged/multibuildingagent-0.1-py2-none-any.whl - -- Build, configure, and install the GreeterAgent - - :: - - VOLTTRON_HOME=~/.platform1 volttron-pkg package Agents/MultiNodeExample - VOLTTRON_HOME=~/.platform1 volttron-pkg configure ~/.volttron/packaged/greeteragent-0.1-py2-none-any.whl Agents/MultiNodeExample/agent1.config - VOLTTRON_HOME=~/.platform1 volttron-ctl install greeter=~/.volttron/packaged/greeteragent-0.1-py2-none-any.whl - -- Start the second platform: - - :: - - VOLTTRON_HOME=~/.platform2 volttron -vv -l platform2.log& - -- Build, configure, and install the MultiBuilding service for the - second platform - - :: - - VOLTTRON_HOME=~/.platform2 volttron-pkg package Agents/MultiBuilding - VOLTTRON_HOME=~/.platform2 volttron-pkg configure ~/.volttron/packaged/multibuildingagent-0.1-py2-none-any.whl Agents/MultiNodeExample/multicomm2.service - VOLTTRON_HOME=~/.platform2 volttron-ctl install multinode=~/.volttron/packaged/multibuildingagent-0.1-py2-none-any.whl - -- Build, configure, and install the GreeterAgent for the second - platform - - :: - - VOLTTRON_HOME=~/.platform2 volttron-pkg package Agents/MultiNodeExample - VOLTTRON_HOME=~/.platform2 volttron-pkg configure ~/.volttron/packaged/greeteragent-0.1-py2-none-any.whl Agents/MultiNodeExample/agent2.config - VOLTTRON_HOME=~/.platform2 volttron-ctl install greeter=~/.volttron/packaged/greeteragent-0.1-py2-none-any.whl - -- Start up the agents on both platforms by referring to them by the tag - they were installed with - - :: - - VOLTTRON_HOME=~/.platform1 volttron-ctl start --tag multinode - VOLTTRON_HOME=~/.platform1 volttron-ctl start --tag greeter - VOLTTRON_HOME=~/.platform2 volttron-ctl start --tag multinode - VOLTTRON_HOME=~/.platform2 volttron-ctl start --tag greeter - -- Check the logs for each platform for the presence of messages from - the other platform's GreeterAgent - - :: - - grep Greeter2 platform1.log -a - - ``2014-09-30 17:13:41,840 (greeteragent-0.1 13878) greeter.agent DEBUG: Topic: greetings/hello, Headers: Headers({u'Date': u'2014-10-01 00:13:41.831539Z', u'Cookie': u'Greeter2', u'AgentID': u'Greeter2', u'Content-Type': [u'application/json']}), Message: ['{"message":"HELLO from Greeter2!"}']`` - - :: - - grep Greeter1 platform2.log -a - - diff --git a/docs/source/volttron-topics/change-log/index.rst b/docs/source/volttron-topics/change-log/index.rst new file mode 100644 index 0000000000..c488332810 --- /dev/null +++ b/docs/source/volttron-topics/change-log/index.rst @@ -0,0 +1,15 @@ +.. _Change-Log: + +========== +Change Log +========== + +This section includes individual documents describing important changes to platform components, such as the RabbitMQ +message bus implementation. For information on specific changes, please refer to the corresponding document. + + +.. toctree:: + + scalability/scalability + version-history + upgrading-versions diff --git a/docs/source/devguides/scalability/Scalability-Improvements.rst b/docs/source/volttron-topics/change-log/scalability/scalability-improvements.rst similarity index 100% rename from docs/source/devguides/scalability/Scalability-Improvements.rst rename to docs/source/volttron-topics/change-log/scalability/scalability-improvements.rst diff --git a/docs/source/devguides/scalability/Scalability.rst b/docs/source/volttron-topics/change-log/scalability/scalability.rst similarity index 92% rename from docs/source/devguides/scalability/Scalability.rst rename to docs/source/volttron-topics/change-log/scalability/scalability.rst index cbfeafbe01..c2f2e31de8 100644 --- a/docs/source/devguides/scalability/Scalability.rst +++ b/docs/source/volttron-topics/change-log/scalability/scalability.rst @@ -56,10 +56,10 @@ Core Services once without issue. - ModBUS opens up a TCP connection for each communication with a device and then closes it when finished. This has the potential to hit the - limit for open file descriptors available to the master driver + limit for open file descriptors available to the platform driver process. (Before, each driver would run in a separate process, but that quickly uses up sockets available to the platform.) To protect - from this the master driver process raises the total allowed open + from this the platform driver process raises the total allowed open sockets to the hard limit. The number of concurrently open sockets is throttled at 80% of the max sockets. On most Linux systems this is about 3200. Once that limit is hit additional device communications @@ -109,7 +109,7 @@ platform (and how does it scale with the hardware)? developer-mode? (Option to turn off encryption, no longer available) - | Regulation Agent - | Every 10 minutes there is an action the master node determines. + | Every 10 minutes there is an action the VOLTTRON Central node determines. Duty cycle cannot be faster than that but is set to 2 seconds for simulation. | Some clients miss duty cycle signal @@ -119,10 +119,15 @@ platform (and how does it scale with the hardware)? Chaos router to introduce delays and dropped packets. -MasterNode needs to have vip address of clients. +MasterNode needs to have :term:`VIP` address of clients. Experiment capture historian - not listening to devices, just capturing results - Go straight to db to see how far behind other historians + +.. toctree:: + + scalability-improvements + testing-driver-scalability diff --git a/docs/source/devguides/scalability/Testing-Driver-Scalability.rst b/docs/source/volttron-topics/change-log/scalability/testing-driver-scalability.rst similarity index 91% rename from docs/source/devguides/scalability/Testing-Driver-Scalability.rst rename to docs/source/volttron-topics/change-log/scalability/testing-driver-scalability.rst index f83465cd19..74be46aeef 100644 --- a/docs/source/devguides/scalability/Testing-Driver-Scalability.rst +++ b/docs/source/volttron-topics/change-log/scalability/testing-driver-scalability.rst @@ -10,9 +10,9 @@ Goals with via a single Volttron platform. - Determine how scaling out affects the rate at which devices are scraped. i.e. How long from the first device scrape to the last? -- Determine the effects of socket throttling in the master driver on +- Determine the effects of socket throttling in the platform driver on the performance of Modbus device scraping. -- Measure total memory consumption of the Master Driver Agent at scale. +- Measure total memory consumption of the Platform Driver Agent at scale. - Measure how well the base history agent and one or more of the concrete agents handle a large amount of data. - Determine the volume of messages that can be achieved on the pubsub @@ -42,9 +42,9 @@ Launcher Script - The script (probably a fabric script) will push out code for and launch one or more test devices on one or more machines for the platform to scrape. -- The script will generate all of the master driver configuration files - to launch the master driver. -- The script may launch the master driver. +- The script will generate all of the platform driver configuration files + to launch the platform driver. +- The script may launch the platform driver. - The script may launch any other agents used to measure performance. Shutdown Script @@ -52,7 +52,7 @@ Shutdown Script - The script (probably the same fabric script run with different options) will shutdown all virtual drivers on the network. -- The script may shutdown the master driver. +- The script may shutdown the platform driver. - The script may shutdown any related agents. Performance Metrics Agent @@ -69,7 +69,7 @@ Additional Benefits ~~~~~~~~~~~~~~~~~~~ Most parts of a test bed run should be configurable. If a user wanted to -verify that the Master Driver worked, for instance, they could run the +verify that the Platform Driver worked, for instance, they could run the test bed with only a few virtual device to confirm that the platform is working correctly. @@ -134,7 +134,7 @@ Real Driver Benchmarking Scalability testing using actual MODBUS or BACnet drivers can be done using the virtual device applications in the scripts/scalability-testing/virtual-drivers/ directory. The -configuration of the master driver and launching of these virtual +configuration of the platform driver and launching of these virtual devices on a target machine can be done automatically with fabric. Setup @@ -181,7 +181,7 @@ on the target run When prompted enter the password for the target machine. Upon completion virtual devices will be running on the target and configuration files -written for the master driver. +written for the platform driver. Launch Test ^^^^^^^^^^^ diff --git a/docs/source/volttron-topics/change-log/upgrading-versions.rst b/docs/source/volttron-topics/change-log/upgrading-versions.rst new file mode 100644 index 0000000000..c0cf0fffa0 --- /dev/null +++ b/docs/source/volttron-topics/change-log/upgrading-versions.rst @@ -0,0 +1,127 @@ +.. _Upgrading-Versions: + +============================== +Upgrading Existing Deployments +============================== + +It is often recommended that users upgrade to the latest stable release of VOLTTRON for their deployments. Major +releases include helpful new features, bug fixes, and other improvements. Please see the guides below for upgrading +your existing deployment to the latest version. + + +VOLTTRON 7 +========== + +VOLTTRON 7 includes a migration from Python 2.7 to Python 3.6, as well as security features, new agents, and more. + +From 6.x +-------- + +From version 6.x to 7.x important changes have been made to the virtual environment as well as :term:`VOLTTRON_HOME`. +Take the following steps to upgrade: + +.. note:: + + The following instructions are for debian based Linux distributions (including Ubuntu and Linux Mint). For Red Hat, + Arch or other distributions, please use the corresponding package manager and commands. + +#. Install the VOLTTRON dependencies using the following command: + + .. code-block:: bash + + sudo apt install python3-dev python3-venv libffi-dev + + .. note:: + + This assumes you have existing 6.x dependencies installed. If you're unsure, refer to the + :ref:`platform installation ` instructions. + +#. Remove your existing virtual environment and run the bootstrap process. + + To remove the virtual environment, change directory to the VOLTTRON project root and run the `rm` command with the + ``-r`` option. + + .. code-block:: bash + + cd $VOLTTRON_ROOT/ + rm -r env + + Now you can use the included `bootstrap.py` script to set up the new virtual environment. For information on how + to install dependencies for VOLTTRON integrations, run the script with the ``--help`` option. + + .. code-block:: bash + + python3 bootstrap.py + + .. note:: + + Because the new environment uses a different version of Python, using the ``--force`` option with bootstrap will + throw errors. Please follow the above instructions when upgrading. + +#. Make necessary `VOLTTRON_HOME` changes + + + .. warning:: + + It is possible that some existing agents may continue to operate after the platform upgrade, however this is not + true for most agents, and it is recommended to reinstall the agent to ensure the agent wheel is compatible and + that there are no side-effects. + + A. Reinstall Agents + + It is recommended to reinstall all agents that exist on the platform to ensure the agent wheel is compatible with + Python3 VOLTTRON. In many cases, the configurations for version 7.x are backwards compatible with 6.x, requiring no + additional changes from the user. For information on individual agent configs, please read through that agent's + documentation. + + B. Modify Agent Directories + + .. note:: + + Modifying the agent directories is only necessary if not reinstalling agents. + + To satisfy the security requirements of the secure agents feature included with VOLTTRON 7, changes have been made + to the agent directory structure. + + 1. Keystore.json + + The agent keystore file has been moved from the agent's `agent-data` directory to the agent's `dist-info` + directory. To move the file, change directory to the agents install directory and use the `mv` command. + + .. code-block:: bash + + cd $VOLTTRON_HOME/agents// + mv agent.agent-data/keystore.json agent.dist-info/ + + 2. Historian Database + + Historians with a local database file have had their default location change do the `data` directory inside of + the agent's install directory. It is recommended to relocate the file from $VOLTTRON_HOME/data to the agent's + data directory. Alternatively, a path can be used if the user the agent is run as (the VOLTTRON user for + deployments not using the secure agents feature) has read-write permissions for the file. + + .. code-block:: bash + + mv $VOLTTRON_HOME/data/historian.sqlite $VOLTTRON_HOME/agents///data + + .. warning:: + + If not specifying a path to the database, the database will be created in the agent's data directory. This + is important if removing or uninstalling the historian as the database file will be removed when the agent + dir is cleaned up. Copy the database file to a temporary directory, reinstall the agent, and move the + database file back to the agent's data directory + +#. Forward Historian + + For deployments which are passing data from 6.x VOLTTRON to the latest 7.x release, some users will experience + timeout issues with the Forward Historian. By updating the 6.x deployment to the latest from the releases/6.x + branch, and restarting the platform and forwarder, this issue can be resolved. + + .. code-block:: bash + + . env/bin/activate + ./stop-volttron + git pull + git checkout releases/6.x + ./start-volttron + vctl start diff --git a/docs/source/volttron-topics/change-log/version-history.rst b/docs/source/volttron-topics/change-log/version-history.rst new file mode 100644 index 0000000000..1671bfc04d --- /dev/null +++ b/docs/source/volttron-topics/change-log/version-history.rst @@ -0,0 +1,164 @@ +.. _Version-History: + +=============== +Version History +=============== + +VOLTTRON 1.0 – 1.2 +================== + +- Agent execution platform +- Message bus +- Modbus and BACnet drivers +- Historian +- Data logger +- Device scheduling +- Device actuation +- Multi-node communication +- Weather service + + +VOLTTRON 2.0 +============ + +- Advanced Security Features +- Guaranteed resource allocation to agents using execution contracts +- Signing and verification of agent packaging +- Agent mobility +- Admin can send agents to another platform +- Agent can request to move +- Enhanced command framework + + +VOLTTRON 3.0 +============ + +- Modularize Data Historian +- Modularize Device Drivers +- Secure and accountable communication using the VIP +- Web Console for Monitoring and Administering VOLTTRON Deployments + + +VOLTTRON 4.0 +============ + +- Documentation moved to ReadTheDocs +- VOLTTRON Configuration Wizard +- Configuration store to dynamically configure agents +- Aggregator agent for aggregating topics +- More reliable remote install mechanism +- UI for device configuration +- Automatic registration of VOLTTRON instances with management agent + + +VOLTTRON 5.0 +============ + +- Tagging service for attaching metadata to topics for simpler retrieval +- Message bus performance improvement +- Multi-platform publish/subscribe for simpler coordination across platforms +- Drivers contributed back for SEP 2.0 and ChargePoint EV + + +VOLTTRON 6.0 +============ + +- Maintained backward compatibility with communication between zmq and rmq deployments. +- Added DarkSky Weather Agent +- Web Based Additions +- Added CSR support for multiplatform communication +- Added SSL support to the platform for secure communication +- Backported SSL support to zmq based deployments. +- Upgraded VC to use the platform login. +- Added docker support to the test environment for easier Rabbitmq testing. +- Updated volttron-config (vcfg) to support both RabbitMQ and ZMQ including https based instances. +- Added test support for RabbitMQ installations of all core agents. +- Added multiplatform (zmq and rmq based platform) testing. +- Integrated RabbitMQ documentation into the core documentation. + + +VOLTTRON 7.0rc1 +=============== + + +Python3 Upgrade +--------------- + +- Update libraries to appropriate and compatible versions +- String handling efficiency +- Encode/Decode of strings has been simplified and centralized +- Added additional test cases for frame serialization in ZMQ +- Syntax updates such difference in handling exceptions, dictionaries, sorting lists, pytest markers etc. +- Made bootstrap process simpler +- Resolved gevent monkey patch issues when using third party libraries + + +RabbitMQ Message Bus +-------------------- + +- Client code for integrating non-VOLTTRON applications with the message bus + available at: https://github.com/VOLTTRON/external-clients-for-rabbitmq +- Includes support for MQTT, non-VOLTTRON Python, and Java-based RabbitMQ + clients + + +Config store secured +-------------------- + +- Agents can prevent other agents from modifying their configuration store entry + + +Known Issues which will be dealt with for the final release: +------------------------------------------------------------ + +- Python 3.7 has conflicts with some libraries such as gevent +- The VOLTTRON Central agent is not fully integrated into Python3 +- CFFI library has conflicts on the Raspian OS which interferes with bootstrapping + + +VOLTTRON 7.0 Full Release +========================= + +This is a full release of the 7.0 version of VOLTTRON which has been refactored to work with Python3. This release +incorporates community feedback from the release candidate as well as new contributions and features. +Major new features and highlights since the release candidate include: + +* Added secure agent user feature which allows agents to be launched as a user separate from the platform. This + protects the platform against malformed or malicious agents accessing platform level files +* Added a driver to interface with the Ecobee smart thermostat and make data available to agents on the platform +* Updated VOLTTRON Central UI to work with Python3 +* Added web support to authenticate remote VOLTTRON ZMQ message bus-based connections +* Updated ZMQ-based multiplatform RPC with Python 3 +* To reduce installation size and complexity, fewer services are installed by default +* MasterDriver dependencies are not installed by default during bootstrap. To use MasterDriver, please use the + following command: + + .. code-block:: bash + + python3 bootstrap.py --driver + +* Web dependencies are not installed by default during bootstrap. To use the MasterWeb service, please use the + following command: + + .. code-block:: bash + + python3 bootstrap.py --web + +* Added initial version of test cases for `volttron-cfg` (`vcfg`) utility +* On all arm-based systems, `libffi` is now a required dependency, this is reflected in the installation instructions +* On arm-based systems, Raspbian >= 10 or Ubuntu >= 18.04 is required +* Updated examples and several contributed features to Python 3 +* Inclusion of docker in test handling for databases +* A new `/gs` endpoint to access platform services without using Volttron Central through Json-RPC +* A new SCPAgent to transfer files between two remote systems + +Known Issues +------------ + +* Continued documentation updates to ensure correctness +* Rainforest Eagle driver is not yet upgraded to Python3 +* A bug in the Modbus TK library prevents creating connections from 2 different masters to a single slave. +* BACnet Proxy Agent and BACnet auto configuration scripts require the version of BACPypes installed in the virtual + environment of VOLTTRON to be version 0.16.7. We have pinned it to version 0.16.7 since it does not work properly in + later versions of BACPypes. +* VOLTTRON 7.0 code base is not fully tested in Ubuntu 20.04 LTS so issues with this combination have not been addressed diff --git a/docs/source/volttron-topics/troubleshooting/index.rst b/docs/source/volttron-topics/troubleshooting/index.rst new file mode 100644 index 0000000000..ecbc727fa0 --- /dev/null +++ b/docs/source/volttron-topics/troubleshooting/index.rst @@ -0,0 +1,13 @@ +.. _Troubleshooting: + +=============== +Troubleshooting +=============== + +This section contains individual documents intended to help the user troubleshoot various platform components. For +troubleshooting of individual agents and drivers please refer to the corresponding document for each. + + +.. toctree:: + + troubleshooting-rmq diff --git a/docs/source/volttron-topics/troubleshooting/troubleshooting-rmq.rst b/docs/source/volttron-topics/troubleshooting/troubleshooting-rmq.rst new file mode 100644 index 0000000000..a471678a93 --- /dev/null +++ b/docs/source/volttron-topics/troubleshooting/troubleshooting-rmq.rst @@ -0,0 +1,93 @@ +.. _Troubleshooting-RMQ: + +======================== +RabbitMQ Troubleshooting +======================== + + +Check the status of the federation connection +--------------------------------------------- + +.. code-block:: bash + + $RABBITMQ_HOME/sbin/rabbitmqctl eval 'rabbit_federation_status:status().' + +If everything is properly configured, then the status is set to `running`. If not look for the error status. Some of +the typical errors are: + +a. **failed_to_connect_using_provided_uris** - Check if RabbitMQ user is created in downstream server node. Refer to + step 3-b of federation setup + +b. **unknown ca** - Check if the root CAs are copied to all the nodes correctly. Refer to step 2 of federation setup + +c. **no_suitable_auth_mechanism** - Check if the AMPQ/S ports are correctly configured. + + +Check the status of the shovel connection +----------------------------------------- + +.. code-block:: bash + + RABBITMQ_HOME/sbin/rabbitmqctl eval 'rabbit_shovel_status:status().' + +If everything is properly configured, then the status is set to `running`. If not look for the error status. Some of +the typical errors are: + +a. **failed_to_connect_using_provided_uris** - Check if RabbitMQ user is created in subscriber node. Refer to step 3-b + of shovel setup + +b. **unknown ca** - Check if the root CAs are copied to remote servers correctly. Refer to step 2 of shovel setup + +c. **no_suitable_auth_mechanism** - Check if the AMPQ/S ports are correctly configured. + + +Check the RabbitMQ logs for any errors +--------------------------------------- + +.. code-block:: bash + + tail -f /rabbitmq.log + + +Rabbitmq startup hangs +---------------------- + +a. Check for errors in the RabbitMQ log. There is a `rabbitmq.log` file in your VOLTTRON source directory that is a + symbolic link to the RabbitMQ server logs. + +b. Check for errors in syslog (`/var/log/syslog` or `/var/log/messages`) + +c. If there are no errors in either of the logs, restart the RabbitMQ server in foreground and see if there are any + errors written on the console. Once you find the error you can kill the process by entering `Ctl+C`, fix the error + and start rabbitmq again using ``./start-rabbitmq`` from VOLTTRON source directory. + + .. code-block:: bash + + ./stop-volttron + ./stop-rabbitmq + @RABBITMQ_HOME/sbin/rabbitmq-server + + +SSL trouble shooting +-------------------- +There are few things that are essential for SSL certificates to work right. + +a. Please use a unique common-name for CA certificate for each VOLTTRON instance. This is configured under + `certificate-data` in the `rabbitmq_config.yml` or if no yml file is used while configuring a VOLTTRON single + instance (using ``vcfg --rabbitmq single``). Certificate generated for agent will automatically get agent's VIP + identity as the certificate's common-name + +b. The host name in the SSL certificate should match hostname used to access the server. For example, if the fully + qualified domain name was configured in the `certificate-data`, you should use the fully qualified domain name to + access RabbitMQ's management url. + +c. Check if your system time is correct especially if you are running virtual machines. If the system clock is not + right, it could lead to SSL certificate errors + + +DataMover troubleshooting +------------------------- + +If output from `volttron.log` is not as expected check for ``{'alert_key': 'historian_not_publishing'}`` in the callee +node's `volttron.log`. Most likely cause is the historian is not running properly or credentials between caller and +callee nodes was not set properly. diff --git a/docs/source/volttron_applications/files/1-simulation-out.jpg b/docs/source/volttron-topics/volttron-applications/files/1-simulation-out.jpg similarity index 100% rename from docs/source/volttron_applications/files/1-simulation-out.jpg rename to docs/source/volttron-topics/volttron-applications/files/1-simulation-out.jpg diff --git a/docs/source/devguides/supporting/applications/files/1_Example_Passive_AFDD_Agent_Configuration_file.jpg b/docs/source/volttron-topics/volttron-applications/files/1_Example_Passive_AFDD_Agent_Configuration_file.jpg similarity index 100% rename from docs/source/devguides/supporting/applications/files/1_Example_Passive_AFDD_Agent_Configuration_file.jpg rename to docs/source/volttron-topics/volttron-applications/files/1_Example_Passive_AFDD_Agent_Configuration_file.jpg diff --git a/docs/source/volttron_applications/files/2-simulation-out.png b/docs/source/volttron-topics/volttron-applications/files/2-simulation-out.png similarity index 100% rename from docs/source/volttron_applications/files/2-simulation-out.png rename to docs/source/volttron-topics/volttron-applications/files/2-simulation-out.png diff --git a/docs/source/devguides/supporting/applications/files/2_File_Selection_Dialog_Box.jpg b/docs/source/volttron-topics/volttron-applications/files/2_File_Selection_Dialog_Box.jpg similarity index 100% rename from docs/source/devguides/supporting/applications/files/2_File_Selection_Dialog_Box.jpg rename to docs/source/volttron-topics/volttron-applications/files/2_File_Selection_Dialog_Box.jpg diff --git a/docs/source/volttron_applications/files/3-simulation-out.png b/docs/source/volttron-topics/volttron-applications/files/3-simulation-out.png similarity index 100% rename from docs/source/volttron_applications/files/3-simulation-out.png rename to docs/source/volttron-topics/volttron-applications/files/3-simulation-out.png diff --git a/docs/source/devguides/supporting/applications/files/3_Sample_of_CSV_Data.jpg b/docs/source/volttron-topics/volttron-applications/files/3_Sample_of_CSV_Data.jpg similarity index 100% rename from docs/source/devguides/supporting/applications/files/3_Sample_of_CSV_Data.jpg rename to docs/source/volttron-topics/volttron-applications/files/3_Sample_of_CSV_Data.jpg diff --git a/docs/source/devguides/supporting/applications/files/4-1_Example_DR_Agent_Configuration_File.jpg b/docs/source/volttron-topics/volttron-applications/files/4-1_Example_DR_Agent_Configuration_File.jpg similarity index 100% rename from docs/source/devguides/supporting/applications/files/4-1_Example_DR_Agent_Configuration_File.jpg rename to docs/source/volttron-topics/volttron-applications/files/4-1_Example_DR_Agent_Configuration_File.jpg diff --git a/docs/source/devguides/supporting/applications/files/4-2_Example_DR_Agent_Configuration_File.jpg b/docs/source/volttron-topics/volttron-applications/files/4-2_Example_DR_Agent_Configuration_File.jpg similarity index 100% rename from docs/source/devguides/supporting/applications/files/4-2_Example_DR_Agent_Configuration_File.jpg rename to docs/source/volttron-topics/volttron-applications/files/4-2_Example_DR_Agent_Configuration_File.jpg diff --git a/docs/source/volttron_applications/files/4-simulation-out.png b/docs/source/volttron-topics/volttron-applications/files/4-simulation-out.png similarity index 100% rename from docs/source/volttron_applications/files/4-simulation-out.png rename to docs/source/volttron-topics/volttron-applications/files/4-simulation-out.png diff --git a/docs/source/volttron-topics/volttron-applications/index.rst b/docs/source/volttron-topics/volttron-applications/index.rst new file mode 100755 index 0000000000..41cc71b982 --- /dev/null +++ b/docs/source/volttron-topics/volttron-applications/index.rst @@ -0,0 +1,20 @@ +.. _Volttron-Applications: + +============ +Applications +============ + +These resources summarize the use of the sample applications that have been created by VOLTTRON users. For detailed +information on these applications, refer to the report +`Transactional Network Platform `_. + +Note, as of VOLTTRON 4.0, applications are now in their own repository at: +https://github.com/VOLTTRON/volttron-applications + +.. toctree:: + :maxdepth: 2 + + sample-applications + simulated-drivers + openadr-vtn/index + matlab/driven-matlab-agent-walk-through diff --git a/docs/source/devguides/walkthroughs/DrivenMatlabAgent-Walkthrough.rst b/docs/source/volttron-topics/volttron-applications/matlab/driven-matlab-agent-walk-through.rst similarity index 100% rename from docs/source/devguides/walkthroughs/DrivenMatlabAgent-Walkthrough.rst rename to docs/source/volttron-topics/volttron-applications/matlab/driven-matlab-agent-walk-through.rst diff --git a/docs/source/devguides/walkthroughs/files/matlab-archi.png b/docs/source/volttron-topics/volttron-applications/matlab/files/matlab-archi.png similarity index 100% rename from docs/source/devguides/walkthroughs/files/matlab-archi.png rename to docs/source/volttron-topics/volttron-applications/matlab/files/matlab-archi.png diff --git a/docs/source/volttron-topics/volttron-applications/openadr-vtn/files/1-simulation-out.jpg b/docs/source/volttron-topics/volttron-applications/openadr-vtn/files/1-simulation-out.jpg new file mode 100644 index 0000000000..784186670a Binary files /dev/null and b/docs/source/volttron-topics/volttron-applications/openadr-vtn/files/1-simulation-out.jpg differ diff --git a/docs/source/volttron-topics/volttron-applications/openadr-vtn/files/2-simulation-out.png b/docs/source/volttron-topics/volttron-applications/openadr-vtn/files/2-simulation-out.png new file mode 100644 index 0000000000..d36dbeb827 Binary files /dev/null and b/docs/source/volttron-topics/volttron-applications/openadr-vtn/files/2-simulation-out.png differ diff --git a/docs/source/volttron-topics/volttron-applications/openadr-vtn/files/3-simulation-out.png b/docs/source/volttron-topics/volttron-applications/openadr-vtn/files/3-simulation-out.png new file mode 100644 index 0000000000..abc59f1d1d Binary files /dev/null and b/docs/source/volttron-topics/volttron-applications/openadr-vtn/files/3-simulation-out.png differ diff --git a/docs/source/volttron-topics/volttron-applications/openadr-vtn/files/4-simulation-out.png b/docs/source/volttron-topics/volttron-applications/openadr-vtn/files/4-simulation-out.png new file mode 100644 index 0000000000..06a6c83749 Binary files /dev/null and b/docs/source/volttron-topics/volttron-applications/openadr-vtn/files/4-simulation-out.png differ diff --git a/docs/source/core_services/openadr/files/vtn_add_customer_screen.png b/docs/source/volttron-topics/volttron-applications/openadr-vtn/files/vtn_add_customer_screen.png similarity index 100% rename from docs/source/core_services/openadr/files/vtn_add_customer_screen.png rename to docs/source/volttron-topics/volttron-applications/openadr-vtn/files/vtn_add_customer_screen.png diff --git a/docs/source/core_services/openadr/files/vtn_create_event.png b/docs/source/volttron-topics/volttron-applications/openadr-vtn/files/vtn_create_event.png similarity index 100% rename from docs/source/core_services/openadr/files/vtn_create_event.png rename to docs/source/volttron-topics/volttron-applications/openadr-vtn/files/vtn_create_event.png diff --git a/docs/source/core_services/openadr/files/vtn_create_new_site.png b/docs/source/volttron-topics/volttron-applications/openadr-vtn/files/vtn_create_new_site.png similarity index 100% rename from docs/source/core_services/openadr/files/vtn_create_new_site.png rename to docs/source/volttron-topics/volttron-applications/openadr-vtn/files/vtn_create_new_site.png diff --git a/docs/source/core_services/openadr/files/vtn_create_program.png b/docs/source/volttron-topics/volttron-applications/openadr-vtn/files/vtn_create_program.png similarity index 100% rename from docs/source/core_services/openadr/files/vtn_create_program.png rename to docs/source/volttron-topics/volttron-applications/openadr-vtn/files/vtn_create_program.png diff --git a/docs/source/core_services/openadr/files/vtn_event_overview.png b/docs/source/volttron-topics/volttron-applications/openadr-vtn/files/vtn_event_overview.png similarity index 100% rename from docs/source/core_services/openadr/files/vtn_event_overview.png rename to docs/source/volttron-topics/volttron-applications/openadr-vtn/files/vtn_event_overview.png diff --git a/docs/source/core_services/openadr/files/vtn_export_report_data.png b/docs/source/volttron-topics/volttron-applications/openadr-vtn/files/vtn_export_report_data.png similarity index 100% rename from docs/source/core_services/openadr/files/vtn_export_report_data.png rename to docs/source/volttron-topics/volttron-applications/openadr-vtn/files/vtn_export_report_data.png diff --git a/docs/source/core_services/openadr/files/vtn_login_screen.png b/docs/source/volttron-topics/volttron-applications/openadr-vtn/files/vtn_login_screen.png similarity index 100% rename from docs/source/core_services/openadr/files/vtn_login_screen.png rename to docs/source/volttron-topics/volttron-applications/openadr-vtn/files/vtn_login_screen.png diff --git a/docs/source/core_services/openadr/files/vtn_offline_site.png b/docs/source/volttron-topics/volttron-applications/openadr-vtn/files/vtn_offline_site.png similarity index 100% rename from docs/source/core_services/openadr/files/vtn_offline_site.png rename to docs/source/volttron-topics/volttron-applications/openadr-vtn/files/vtn_offline_site.png diff --git a/docs/source/core_services/openadr/files/vtn_overview_screen.png b/docs/source/volttron-topics/volttron-applications/openadr-vtn/files/vtn_overview_screen.png similarity index 100% rename from docs/source/core_services/openadr/files/vtn_overview_screen.png rename to docs/source/volttron-topics/volttron-applications/openadr-vtn/files/vtn_overview_screen.png diff --git a/docs/source/core_services/openadr/files/vtn_overview_screen_with_customers.png b/docs/source/volttron-topics/volttron-applications/openadr-vtn/files/vtn_overview_screen_with_customers.png similarity index 100% rename from docs/source/core_services/openadr/files/vtn_overview_screen_with_customers.png rename to docs/source/volttron-topics/volttron-applications/openadr-vtn/files/vtn_overview_screen_with_customers.png diff --git a/docs/source/core_services/openadr/files/vtn_site_detail_screen.png b/docs/source/volttron-topics/volttron-applications/openadr-vtn/files/vtn_site_detail_screen.png similarity index 100% rename from docs/source/core_services/openadr/files/vtn_site_detail_screen.png rename to docs/source/volttron-topics/volttron-applications/openadr-vtn/files/vtn_site_detail_screen.png diff --git a/docs/source/core_services/openadr/files/vtn_site_with_ven_id.png b/docs/source/volttron-topics/volttron-applications/openadr-vtn/files/vtn_site_with_ven_id.png similarity index 100% rename from docs/source/core_services/openadr/files/vtn_site_with_ven_id.png rename to docs/source/volttron-topics/volttron-applications/openadr-vtn/files/vtn_site_with_ven_id.png diff --git a/docs/source/core_services/openadr/index.rst b/docs/source/volttron-topics/volttron-applications/openadr-vtn/index.rst similarity index 95% rename from docs/source/core_services/openadr/index.rst rename to docs/source/volttron-topics/volttron-applications/openadr-vtn/index.rst index 2036bc63d8..d5b52f58b7 100644 --- a/docs/source/core_services/openadr/index.rst +++ b/docs/source/volttron-topics/volttron-applications/openadr-vtn/index.rst @@ -32,7 +32,8 @@ pre-committed, an “optOut” may incur penalties.) .. toctree:: - :glob: - :maxdepth: 2 + :maxdepth: 2 - * + openadr-reference-app + vtn-server-guide + vtn-server-config diff --git a/docs/source/volttron_applications/Reference-App.rst b/docs/source/volttron-topics/volttron-applications/openadr-vtn/openadr-reference-app.rst similarity index 98% rename from docs/source/volttron_applications/Reference-App.rst rename to docs/source/volttron-topics/volttron-applications/openadr-vtn/openadr-reference-app.rst index 5b56d17692..2f28486669 100644 --- a/docs/source/volttron_applications/Reference-App.rst +++ b/docs/source/volttron-topics/volttron-applications/openadr-vtn/openadr-reference-app.rst @@ -1,7 +1,8 @@ -.. _Reference-App: +.. _OpenADR-Reference-App: +===================== Reference Application -~~~~~~~~~~~~~~~~~~~~~ +===================== This reference application for VOLTTRON's OpenADR Virtual End Node (VEN) and its Simulation Subsystem demonstrates interactions between the VOLTTRON VEN agent and simulated devices. @@ -28,10 +29,10 @@ Eight VOLTTRON agents work together to run this simulation: while the ReferenceApp is managing the clock), the SimulationClockAgent runs the simulation until the agent is stopped. If no clock-speed multiplier has been provided, the simulation clock runs at normal wallclock speed. -3. **SimulationDriverAgent.** Like MasterDriverAgent, this agent is a front-end manager for +3. **SimulationDriverAgent.** Like PlatformDriverAgent, this agent is a front-end manager for device drivers. It handles get_point/set_point requests from other agents, and it periodically "scrapes" and publishes each driver's points. If a device driver has been - built to run under MasterDriverAgent, with a few minor modifications (detailed below) + built to run under PlatformDriverAgent, with a few minor modifications (detailed below) it can be adapted to run under SimulationDriverAgent. 4. **ActuatorAgent.** This agent manages write access to device drivers. Another agent may request a scheduled time period, called a Task, during which it controls a device. @@ -222,7 +223,7 @@ Point last_timestamp datetime VEN Configuration ================= -The VEN may be configured according to its documentation :ref:`here `. +The VEN may be configured according to its documentation :ref:`here `. Running the Simulation ====================== @@ -230,6 +231,7 @@ Running the Simulation There are three main ways to monitor the ReferenceApp simulation's progress. One way is to look at debug trace in VOLTTRON’s log output, for example: + :: 2018-01-08 17:41:30,333 (referenceappagent-1.0 23842) referenceapp.agent DEBUG: 2018-01-08 17:41:30.333260 Initializing drivers diff --git a/docs/source/core_services/openadr/VtnServerConfig.rst b/docs/source/volttron-topics/volttron-applications/openadr-vtn/vtn-server-config.rst similarity index 98% rename from docs/source/core_services/openadr/VtnServerConfig.rst rename to docs/source/volttron-topics/volttron-applications/openadr-vtn/vtn-server-config.rst index 1992efdaec..3808be8d6d 100644 --- a/docs/source/core_services/openadr/VtnServerConfig.rst +++ b/docs/source/volttron-topics/volttron-applications/openadr-vtn/vtn-server-config.rst @@ -1,5 +1,6 @@ -.. _VtnServerConfig: +.. _VTN-Server-Config: +================================================== OpenADR VTN Server: Installation and Configuration ================================================== diff --git a/docs/source/core_services/openadr/VtnServerGuide.rst b/docs/source/volttron-topics/volttron-applications/openadr-vtn/vtn-server-guide.rst similarity index 96% rename from docs/source/core_services/openadr/VtnServerGuide.rst rename to docs/source/volttron-topics/volttron-applications/openadr-vtn/vtn-server-guide.rst index f99c059034..4f2b80c69a 100644 --- a/docs/source/core_services/openadr/VtnServerGuide.rst +++ b/docs/source/volttron-topics/volttron-applications/openadr-vtn/vtn-server-guide.rst @@ -1,18 +1,18 @@ -.. _VtnServerGuide: +.. _VTN-Server-Guide: OpenADR VTN Server: User Guide ============================== .. Warning:: This VTN server implementation is incomplete, and is not supported by the VOLTTRON core team. For information about its status including known issues, refer to the :ref:`VTN Server Configuration docs - `. + `. This guide assumes that you have a valid user account to access and log in to the VTN application website. Login Screen ------------ -In order to begin using the VTN application, navigate to \http://yourhostname**ip:8000/vtn. +In order to begin using the VTN application, navigate to ``\http://yourhostname**ip:8000/vtn``. .. image:: files/vtn_login_screen.png diff --git a/docs/source/devguides/supporting/applications/sample-applications.rst b/docs/source/volttron-topics/volttron-applications/sample-applications.rst similarity index 100% rename from docs/source/devguides/supporting/applications/sample-applications.rst rename to docs/source/volttron-topics/volttron-applications/sample-applications.rst diff --git a/docs/source/volttron_applications/Simulated-Drivers.rst b/docs/source/volttron-topics/volttron-applications/simulated-drivers.rst similarity index 98% rename from docs/source/volttron_applications/Simulated-Drivers.rst rename to docs/source/volttron-topics/volttron-applications/simulated-drivers.rst index f87e933af0..6c50d5b104 100644 --- a/docs/source/volttron_applications/Simulated-Drivers.rst +++ b/docs/source/volttron-topics/volttron-applications/simulated-drivers.rst @@ -23,11 +23,11 @@ Three agents work together to run a simulation: and it has been asked to start a simulation, it provides the current simulated time in response to requests. If no stop time has been provided, the SimulationClockAgent continues to manage the simulation clock until the agent is stopped. If no clock-speed - multiplier has been provided, the simulation clock runs at normal wallclock speed. -2. **SimulationDriverAgent.** Like MasterDriverAgent, this agent is a front-end manager for + multiplier has been provided, the simulation clock runs at normal wall-clock speed. +2. **SimulationDriverAgent.** Like PlatformDriverAgent, this agent is a front-end manager for device drivers. It handles get_point/set_point requests from other agents, and it periodically "scrapes" and publishes each driver's points. If a device driver has been - built to run under MasterDriverAgent, with a few minor modifications (detailed below) + built to run under PlatformDriverAgent, with a few minor modifications (detailed below) it can be adapted to run under SimulationDriverAgent. 3. **SimulationAgent.** This agent configures, starts, and reports on a simulation. It furnishes a variety of configuration parameters to the other simulation agents, @@ -381,10 +381,10 @@ Using the Simulation Framework to Test a Driver =============================================== If you're developing a VOLTTRON driver, and you intend to add it to the drivers -managed by MasterDriverAgent, then with a few tweaks, you can adapt it so that it's testable from +managed by PlatformDriverAgent, then with a few tweaks, you can adapt it so that it's testable from this simulation framework. -As with drivers under MasterDriverAgent, your driver should be go in a .py module that implements +As with drivers under PlatformDriverAgent, your driver should be go in a .py module that implements a Register class and an Interface class. In order to work within the simulation framework, simulation drivers need to be adjusted as follows: diff --git a/docs/source/volttron.rst b/docs/source/volttron.rst deleted file mode 100644 index 9908c17f7f..0000000000 --- a/docs/source/volttron.rst +++ /dev/null @@ -1,8 +0,0 @@ -Volttron -======== - -.. toctree:: - :maxdepth: 4 - - volttron_api/modules - diff --git a/docs/source/volttron_applications/index.rst b/docs/source/volttron_applications/index.rst deleted file mode 100644 index 3179b92e08..0000000000 --- a/docs/source/volttron_applications/index.rst +++ /dev/null @@ -1,18 +0,0 @@ -.. _applications: - -============ -Applications -============ - -Community-contributed applications, agents and drivers that are not directly -integrated into the VOLTTRON core platform reside in a separate github -repository, https://github.com/VOLTTRON/volttron-applications. This section -provides user guides and other documents for those contributions. - -.. toctree:: - :glob: - :maxdepth: 1 - - Simulated-Drivers - ven_agent - Reference-App diff --git a/docs/source/volttron_applications/ven_agent.rst b/docs/source/volttron_applications/ven_agent.rst deleted file mode 100644 index 14f8371681..0000000000 --- a/docs/source/volttron_applications/ven_agent.rst +++ /dev/null @@ -1,232 +0,0 @@ -.. _ven_agent: - -VEN Agent: OpenADR 2.0b Interface Specification -=============================================== - -OpenADR (Automated Demand Response) is a standard for alerting and responding -to the need to adjust electric power consumption in response to fluctuations in -grid demand. - -OpenADR communications are conducted between Virtual Top Nodes (VTNs) and -Virtual End Nodes (VENs). In this implementation a VOLTTRON agent, VEN agent, -acts as a VEN, communicating with its VTN by means of EIEvent and EIReport services -in conformance with a subset of the OpenADR 2.0b specification. This document's -"VOLTTRON Interface" section defines how the VEN agent relays information to, -and receives data from, other VOLTTRON agents. - -The OpenADR 2.0b specification (http://www.openadr.org/specification) is available -from the OpenADR Alliance. This implementation also generally follows the DR program -characteristics of the Capacity Program described in Section 9.2 of the OpenADR Program Guide -(http://www.openadr.org/assets/openadr_drprogramguide_v1.0.pdf). - -DR Capacity Bidding and Events -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -The OpenADR Capacity Bidding program relies on a pre-committed agreement about the -VEN’s load shed capacity. This agreement is reached in a bidding process -transacted outside of the OpenADR interaction, typically with a long-term scope, -perhaps a month or longer. The VTN can “call an event,” indicating that a load-shed event should occur -in conformance with this agreement. The VTN indicates the level of load shedding -desired, when the event should occur, and for how long. The VEN responds with an -"optIn” acknowledgment. (It can also “optOut,” but since it has been pre-committed, -an “optOut” may incur penalties.) - -Reporting -~~~~~~~~~ - -The VEN agent reports device status and usage telemetry to the VTN, relying on -information received periodically from other VOLTTRON agents. - -General Approach -~~~~~~~~~~~~~~~~ - -Events: - -- The VEN agent maintains a persistent record of DR events. -- Event updates (including creation) trigger publication of event JSON on the VOLTTRON message bus. -- Other VOLTTRON agents can also call a get_events() RPC to retrieve the current status of - particular events, or of all active events. - -Reporting: - -- The VEN agent configuration defines telemetry values (data points) that can be reported to the VTN. -- The VEN agent maintains a persistent record of telemetry values over time. -- Other VOLTTRON agents are expected to call report_telemetry() to supply the VEN agent - with a regular stream of telemetry values for reporting. -- Other VOLTTRON agents can receive notification of changes in telemetry reporting - requirements by subscribing to publication of telemetry parameters. - -VEN Agent VOLTTRON Interface -~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -The VEN agent implements the following VOLTTRON PubSub and RPC calls. - -PubSub: event update -:: - -.. code-block:: python - - def publish_event(self, an_event): - """ - Publish an event. - - When an event is created/updated, it is published to the VOLTTRON bus - with a topic that includes 'openadr/event_update'. - - Event JSON structure: - { - "event_id" : String, - "creation_time" : DateTime, - "start_time" : DateTime, - "end_time" : DateTime or None, - "signals" : String, # Values: json string describing one or more signals. - "status" : String, # Values: unresponded, far, near, active, - # completed, canceled. - "opt_type" : String # Values: optIn, optOut, none. - } - - If an event status is 'unresponded', the VEN agent is awaiting a decision on - whether to optIn or optOut. The downstream agent that subscribes to this PubSub - message should communicate that choice to the VEN agent by calling respond_to_event() - (see below). The VEN agent then relays the choice to the VTN. - - @param an_event: an EiEvent. - """ - -PubSub: telemetry parameters update -:: - -.. code-block:: python - - def publish_telemetry_parameters_for_report(self, report): - """ - Publish telemetry parameters. - - When the VEN agent telemetry reporting parameters have been updated (by the VTN), - they are published with a topic that includes 'openadr/telemetry_parameters'. - If a particular report has been updated, the reported parameters are for that report. - - Telemetry parameters JSON example: - { - "telemetry": { - "baseline_power_kw": { - "r_id": "baseline_power", - "frequency": "30", - "report_type": "baseline", - "reading_type": "Mean", - "method_name": "get_baseline_power" - } - "current_power_kw": { - "r_id": "actual_power", - "frequency": "30", - "report_type": "reading", - "reading_type": "Mean", - "method_name": "get_current_power" - } - "manual_override": "False", - "report_status": "active", - "online": "False", - } - } - - The above example indicates that, for reporting purposes, telemetry values - for baseline_power and actual_power should be updated -- via report_telemetry() -- at - least once every 30 seconds. - - Telemetry value definitions such as baseline_power and actual_power come from the - agent configuration. - - @param report: (EiReport) The report whose parameters should be published. - """ - -RPC calls: - -.. code-block:: python - - @RPC.export - def respond_to_event(self, event_id, opt_in_choice=None): - """ - Respond to an event, opting in or opting out. - - If an event's status=unresponded, it is awaiting this call. - When this RPC is received, the VENAgent sends an eventResponse to - the VTN, indicating whether optIn or optOut has been chosen. - If an event remains unresponded for a set period of time, - it times out and automatically optsIn to the event. - - Since this call causes a change in the event's status, it triggers - a PubSub call for the event update, as described above. - - @param event_id: (String) ID of an event. - @param opt_in_choice: (String) 'OptIn' to opt into the event, anything else is treated as 'OptOut'. - """ - -.. code-block:: python - - @RPC.export - def get_events(self, event_id=None, in_progress_only=True, started_after=None, end_time_before=None): - """ - Return a list of events as a JSON string. - - Sample request: - self.get_events(started_after=utils.get_aware_utc_now() - timedelta(hours=1), - end_time_before=utils.get_aware_utc_now()) - - Return a list of events. - - By default, return only event requests with status=active or status=unresponded. - - If an event's status=active, a DR event is currently in progress. - - @param event_id: (String) Default None. - @param in_progress_only: (Boolean) Default True. - @param started_after: (DateTime) Default None. - @param end_time_before: (DateTime) Default None. - @return: (JSON) A list of events -- see 'PubSub: event update'. - """ - -.. code-block:: python - - @RPC.export - def get_telemetry_parameters(self): - """ - Return the VEN agent's current set of telemetry parameters. - - @return: (JSON) Current telemetry parameters -- see 'PubSub: telemetry parameters update'. - """ - -.. code-block:: python - - @RPC.export - def set_telemetry_status(self, online, manual_override): - """ - Update the VEN agent's reporting status. - - Set these properties to either 'TRUE' or 'FALSE'. - - @param online: (Boolean) Whether the VEN agent's resource is online. - @param manual_override: (Boolean) Whether resource control has been overridden. - """ - -.. code-block:: python - - @RPC.export - def report_telemetry(self, telemetry): - """ - Receive an update of the VENAgent's report metrics, and store them in the agent's database. - - Examples of telemetry are: - { - 'baseline_power_kw': '15.2', - 'current_power_kw': '371.1', - 'start_time': '2017-11-21T23:41:46.051405', - 'end_time': '2017-11-21T23:42:45.951405' - } - - @param telemetry_values: (JSON) Current value of each report metric, with reporting-interval start/end. - """ - -For Further Information -~~~~~~~~~~~~~~~~~~~~~~~ - -Please contact Rob Calvert at Kisensum, rob@kisensum.com diff --git a/examples/CAgent/c_agent/agent.py b/examples/CAgent/c_agent/agent.py index 766ef4c16b..d1d849b6af 100644 --- a/examples/CAgent/c_agent/agent.py +++ b/examples/CAgent/c_agent/agent.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- {{{ # vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et: # -# Copyright 2019, Battelle Memorial Institute. +# Copyright 2020, Battelle Memorial Institute. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/examples/CAgent/c_agent/driver/cdriver.py b/examples/CAgent/c_agent/driver/cdriver.py index 98b5323fd3..1ef336cdae 100644 --- a/examples/CAgent/c_agent/driver/cdriver.py +++ b/examples/CAgent/c_agent/driver/cdriver.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- {{{ # vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et: # -# Copyright 2019, Battelle Memorial Institute. +# Copyright 2020, Battelle Memorial Institute. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -39,16 +39,16 @@ __docformat__ = 'reStructuredText' """The cdriver is an example implementation of an interface that -allows the master driver to transparently call C code. +allows the platform driver to transparently call C code. This file is an `interface` and will only be usable in the -master_driver/interfaces directory. The shared object will +platform_driver/interfaces directory. The shared object will need to be somewhere it can be found by this file. """ from io import StringIO from csv import DictReader -from master_driver.interfaces import BasicRevert, BaseInterface, BaseRegister +from platform_driver.interfaces import BasicRevert, BaseInterface, BaseRegister from ctypes import * diff --git a/examples/CAgent/setup.py b/examples/CAgent/setup.py index 52c5ef26d6..9e23adad76 100644 --- a/examples/CAgent/setup.py +++ b/examples/CAgent/setup.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- {{{ # vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et: # -# Copyright 2019, Battelle Memorial Institute. +# Copyright 2020, Battelle Memorial Institute. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/examples/CSVDriver/CsvDriverAgent/CsvDriverAgent/agent.py b/examples/CSVDriver/CsvDriverAgent/CsvDriverAgent/agent.py index 96f7ed12a0..0302ed5dee 100644 --- a/examples/CSVDriver/CsvDriverAgent/CsvDriverAgent/agent.py +++ b/examples/CSVDriver/CsvDriverAgent/CsvDriverAgent/agent.py @@ -97,13 +97,13 @@ def configure(self, config_name, action, contents): def _handle_publish(self, peer, sender, bus, topic, headers, message): """ When we recieve an update from our all publish subscription, log something so we can see that we are - successfully scraping CSV points with the Master Driver + successfully scraping CSV points with the Platform Driver :param peer: unused :param sender: unused :param bus: unused :param topic: unused :param headers: unused - :param message: "All" messaged published by the Master Driver for the CSV Driver containing values for all + :param message: "All" messaged published by the Platform Driver for the CSV Driver containing values for all registers on the device """ # Just write something to the logs so that we can see our success diff --git a/examples/CSVDriver/README.rst b/examples/CSVDriver/README.rst index 913c8723c5..54a8a25a21 100644 --- a/examples/CSVDriver/README.rst +++ b/examples/CSVDriver/README.rst @@ -7,12 +7,12 @@ CSV Driver This example driver writes data to "registers" as lines in a CSV file. This driver is an example driver for development purposes only. -In order to run this driver, put the csvdriver.py file into the master driver's -interfaces directory, then configure the master driver normally using the +In order to run this driver, put the csvdriver.py file into the platform driver's +interfaces directory, then configure the platform driver normally using the included csv_driver.config configuration file and csv_registers.csv registry configuration file (see devguides > walkthroughs > Driver-Creation-Walkthrough for a more in-depth explanation of the driver framework, including configuration -, directory structure, etc.) To see csv driver publishes, start the master +, directory structure, etc.) To see csv driver publishes, start the platform driver and listener agents. ===================== @@ -35,7 +35,7 @@ example driver. This agent performs 2 functions: To use this agent as-is, install it as normal with the provided configuration file ("config" in the agent's directory), install an actuator agent instance (minimal or no configuration is necessary in the easiest case), and install a -listener agent instance. If the driver code file is in the master driver's +listener agent instance. If the driver code file is in the platform driver's interfaces directory the user should see publishes via the listener agent and logging from this agent which indicates that the driver is functioning (some values in the scrape_all publish should oscillate to demonstrate that the driver diff --git a/examples/CSVDriver/csvdriver.py b/examples/CSVDriver/csvdriver.py index a02f964292..f06ac60c78 100644 --- a/examples/CSVDriver/csvdriver.py +++ b/examples/CSVDriver/csvdriver.py @@ -37,7 +37,7 @@ # }}} import os -from master_driver.interfaces import BaseInterface, BaseRegister, BasicRevert +from platform_driver.interfaces import BaseInterface, BaseRegister, BasicRevert from csv import DictReader, DictWriter import logging @@ -155,10 +155,10 @@ def __init__(self, **kwargs): def configure(self, config_dict, registry_config_str): """ - Set the Interface attributes from the configurations provided by the Master Driver, and create the "device" if + Set the Interface attributes from the configurations provided by the Platform Driver, and create the "device" if it doesn't already exist - :param config_dict: Dictionary of configuration values passed from the Master Driver - :param registry_config_str: String representation of the registry configuration passed from the Master Driver + :param config_dict: Dictionary of configuration values passed from the Platform Driver + :param registry_config_str: String representation of the registry configuration passed from the Platform Driver """ # Set the CSV interface's necessary attributes from the configuration self.csv_path = config_dict.get("csv_path", "csv_device.csv") diff --git a/examples/CSVHistorian/csv_historian/historian.py b/examples/CSVHistorian/csv_historian/historian.py index 33fdcfdba8..ed08a94410 100644 --- a/examples/CSVHistorian/csv_historian/historian.py +++ b/examples/CSVHistorian/csv_historian/historian.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- {{{ # vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et: # -# Copyright 2019, Battelle Memorial Institute. +# Copyright 2020, Battelle Memorial Institute. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/examples/CSVHistorian/setup.py b/examples/CSVHistorian/setup.py index f513a83c9a..203cd3945b 100644 --- a/examples/CSVHistorian/setup.py +++ b/examples/CSVHistorian/setup.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- {{{ # vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et: # -# Copyright 2019, Battelle Memorial Institute. +# Copyright 2020, Battelle Memorial Institute. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/examples/ConfigActuation/config_actuation/agent.py b/examples/ConfigActuation/config_actuation/agent.py index e003c2764b..273c1dc220 100644 --- a/examples/ConfigActuation/config_actuation/agent.py +++ b/examples/ConfigActuation/config_actuation/agent.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- {{{ # vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et: # -# Copyright 2019, Battelle Memorial Institute. +# Copyright 2020, Battelle Memorial Institute. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/examples/ConfigActuation/setup.py b/examples/ConfigActuation/setup.py index eaf3948ea7..cc64769bff 100644 --- a/examples/ConfigActuation/setup.py +++ b/examples/ConfigActuation/setup.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- {{{ # vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et: # -# Copyright 2019, Battelle Memorial Institute. +# Copyright 2020, Battelle Memorial Institute. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/examples/ConfigActuation/tests/test_config_actuation.py b/examples/ConfigActuation/tests/test_config_actuation.py index 4c834ceeba..9fd17c1f3f 100644 --- a/examples/ConfigActuation/tests/test_config_actuation.py +++ b/examples/ConfigActuation/tests/test_config_actuation.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- {{{ # vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et: # -# Copyright 2019, Battelle Memorial Institute. +# Copyright 2020, Battelle Memorial Institute. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -67,7 +67,7 @@ def publish_agent(request, volttron_instance): """ Fixture used for setting up the environment. 1. Creates fake driver configs - 2. Starts the master driver agent with the created fake driver agents + 2. Starts the platform driver agent with the created fake driver agents 3. Starts the actuator agent 4. Creates an instance Agent class for publishing and returns it @@ -76,7 +76,7 @@ def publish_agent(request, volttron_instance): :return: an instance of fake agent used for publishing """ - # Reset master driver config store + # Reset platform driver config store cmd = ['volttron-ctl', 'config', 'delete', PLATFORM_DRIVER, '--all'] process = Popen(cmd, env=volttron_instance.env, @@ -86,7 +86,7 @@ def publish_agent(request, volttron_instance): (output, error) = process.communicate() assert process.returncode == 0 - # Add master driver configuration files to config store. + # Add platform driver configuration files to config store. cmd = ['volttron-ctl', 'config', 'store',PLATFORM_DRIVER, 'fake.csv', 'fake_unit_testing.csv', '--csv'] process = Popen(cmd, env=volttron_instance.env, @@ -104,17 +104,17 @@ def publish_agent(request, volttron_instance): result = process.wait() assert result == 0 - # Start the master driver agent which would intern start the fake driver + # Start the platform driver agent which would intern start the fake driver # using the configs created above - master_uuid = volttron_instance.install_agent( - agent_dir=get_services_core("MasterDriverAgent"), + platform_uuid = volttron_instance.install_agent( + agent_dir=get_services_core("PlatformDriverAgent"), config_file={}, start=True) - print("agent id: ", master_uuid) + print("agent id: ", platform_uuid) gevent.sleep(2) # wait for the agent to start and start the devices # Start the actuator agent through which publish agent should communicate - # to fake device. Start the master driver agent which would intern start + # to fake device. Start the platform driver agent which would intern start # the fake driver using the configs created above actuator_uuid = volttron_instance.install_agent( agent_dir=get_services_core("ActuatorAgent"), @@ -139,10 +139,10 @@ def publish_agent(request, volttron_instance): def stop_agent(): print("In teardown method of module") volttron_instance.stop_agent(actuator_uuid) - volttron_instance.stop_agent(master_uuid) + volttron_instance.stop_agent(platform_uuid) volttron_instance.stop_agent(example_uuid) volttron_instance.remove_agent(actuator_uuid) - volttron_instance.remove_agent(master_uuid) + volttron_instance.remove_agent(platform_uuid) volttron_instance.remove_agent(example_uuid) publish_agent.core.stop() diff --git a/examples/ConfigActuation/update_config.py b/examples/ConfigActuation/update_config.py index 713a38c3fb..abf1ebc774 100644 --- a/examples/ConfigActuation/update_config.py +++ b/examples/ConfigActuation/update_config.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- {{{ # vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et: # -# Copyright 2019, Battelle Memorial Institute. +# Copyright 2020, Battelle Memorial Institute. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/examples/DDSAgent/ddsagent/agent.py b/examples/DDSAgent/ddsagent/agent.py index 289b3afe32..1768f2fd8d 100644 --- a/examples/DDSAgent/ddsagent/agent.py +++ b/examples/DDSAgent/ddsagent/agent.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- {{{ # vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et: # -# Copyright 2019, Battelle Memorial Institute. +# Copyright 2020, Battelle Memorial Institute. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/examples/DataCleaner/data_cleaner/agent.py b/examples/DataCleaner/data_cleaner/agent.py index 2a7a5c1caf..c80bab3579 100644 --- a/examples/DataCleaner/data_cleaner/agent.py +++ b/examples/DataCleaner/data_cleaner/agent.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- {{{ # vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et: # -# Copyright 2019, Battelle Memorial Institute. +# Copyright 2020, Battelle Memorial Institute. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/examples/DataPublisher/datapublisher/agent.py b/examples/DataPublisher/datapublisher/agent.py index db29624691..b461c1911c 100644 --- a/examples/DataPublisher/datapublisher/agent.py +++ b/examples/DataPublisher/datapublisher/agent.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- {{{ # vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et: # -# Copyright 2019, Battelle Memorial Institute. +# Copyright 2020, Battelle Memorial Institute. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/examples/DataPublisher/setup.py b/examples/DataPublisher/setup.py index 25ab24da59..cb5acdb326 100644 --- a/examples/DataPublisher/setup.py +++ b/examples/DataPublisher/setup.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- {{{ # vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et: # -# Copyright 2019, Battelle Memorial Institute. +# Copyright 2020, Battelle Memorial Institute. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/examples/DataPuller/puller/agent.py b/examples/DataPuller/puller/agent.py index 904d4a4f62..18e897ae85 100644 --- a/examples/DataPuller/puller/agent.py +++ b/examples/DataPuller/puller/agent.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- {{{ # vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et: # -# Copyright 2019, Battelle Memorial Institute. +# Copyright 2020, Battelle Memorial Institute. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/examples/DataPuller/setup.py b/examples/DataPuller/setup.py index e231183a9f..65bf2b0a14 100644 --- a/examples/DataPuller/setup.py +++ b/examples/DataPuller/setup.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- {{{ # vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et: # -# Copyright 2019, Battelle Memorial Institute. +# Copyright 2020, Battelle Memorial Institute. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/examples/EnergyPlusAgent/README.md b/examples/EnergyPlusAgent/README.md new file mode 100644 index 0000000000..5163866fce --- /dev/null +++ b/examples/EnergyPlusAgent/README.md @@ -0,0 +1,259 @@ +# EnergyPlus Agent + +This is an example agent that demonstrates how to integrate with EnergyPlus simulation, run building model simulations +with EnergyPlus, send/receive messages back and forth between VOLTTRON and +EnergyPlus simulation. For more information about EnergyPlus, please refer to https://www.energyplus.net/sites/default/files/docs/site_v8.3.0/GettingStarted/GettingStarted/index.html. +Technical documentation about the simulation framework can be found at https://volttron.readthedocs.io/en/develop/developing-volttron/integrating-simulations/index.html + +## EnergyPlus installation +For installing setup in Ubuntu based systems, + +1. Install java JDK package + +``` +sudo apt update +sudo apt install default-jdk +``` + +2. Download and install EnergyPlus (ver 8.5.0) + +``` +wget https://github.com/NREL/EnergyPlus/releases/download/v8.5.0/EnergyPlus-8.5.0-c87e61b44b-Linux-x86_64.sh +chmod +x EnergyPlus-8.5.0-c87e61b44b-Linux-x86_64.sh +sudo ./EnergyPlus-8.5.0-c87e61b44b-Linux-x86_64.sh +``` + +## EnergyPlus Agent Configuration + +1. Copy 'bcvtb' and 'eplus' folders into root of VOLTTRON source directories. 'eplus' should contain EnergyPlus Input Data File (IDF). IDF file is an ASCII file containing the data describing the building and HVAC system to be simulated. + +'eplus' is available at https://github.com/VOLTTRON/volttron-GS/tree/master/eplus +'bcvtb' is available at https://github.com/VOLTTRON/volttron-GS/tree/master/pnnl/energyplusagent/bcvtb + +2. You can specify the configuration in either json or yaml format. The yaml format is specified +below. Please ensure that paths to IDF, weather and 'bcvtb' directories are correctly specified. + +```` yaml +# Config parameters for setting up EnergyPlus agent +properties: + identity: platform.actuator + model: ~/git/sim_volttron/volttron/eplus/BUILDING1.idf + weather: ~/git/sim_volttron/volttron/eplus/USA_WA_Pasco-Tri.Cities.AP.727845_TMY3.epw + bcvtb_home: ~/git/sim_volttron/volttron/bcvtb + size: 40960 + startmonth: 8 + startday: 1 + endmonth: 8 + endday: 31 + timestep: 60 + time_scale: 6 + cosimulation_sync: true + real_time_periodic: true + co_sim_timestep: 5 + real_time_flag: false +# configuration for subscribing to EnergyPlus simulation +outputs: + # List of subscription information, typically contains + # - energyplus point name + # - type + # - publication topic for VOLTTRON (optional) to republish on VOLTTRON message bus + # - energyplus 'field' name + # - metadata information about the output + - sim_topic: ENVIRONMENT Site Outdoor Air Drybulb Temperature1 + name: ENVIRONMENT + type: Site Outdoor Air Drybulb Temperature + topic: devices/PNNL/BUILDING1/AHU1/all + field: OutdoorAirTemperature + meta: + units: degreesCentigrade + tz: US/Pacific + type: float +# configuration for sending to EnergyPlus simulation +inputs: + # List of subscription information typically contains + # - EnergyPlus input name + # - type + # - subscription topic for VOLTTRON to receive message from VOLTTRON (agents) + # - EnergyPlus field + # - default value (if any) + # - dynamic default (receive dynamic default from + - sim_topic: CLGTEMPSETPOINT Zone-VAV-102 + name: CLGTEMPSETPOINT Zone-VAV-102 + type: schedule + topic: PNNL/BUILDING1/AHU1/VAV102 + field: ZoneCoolingTemperatureSetPoint + default: 21.11 + dynamic_default: 1.0 +```` + +## Running EnergyPlus Example agent + +3. In one terminal, start VOLTTRON + ```` + ./start-volttron + ```` + +4. Start EnergyPlus simulation example agent + ```` + source env/bin/activate + python scripts/install-agent.py -s examples/EnergyPlusAgent/ -c examples/EnergyPlusAgent/ep_building1.yml -i platform.actuator --tag eplus --start --force + ```` + +5. You will see that EnergyPlus simulation starts and sends measurement data to VOLTTRON which is then republished + on VOLTTRON message bus + + ```` + 2020-08-17 09:40:01,962 (energyplusagent-0.1 3597) __main__ DEBUG: CONFIG PATH: /home/niddodi/.volttron/agents/5980b1c7-7939-4e99-9ab6-bbb590786361/energyplusagent-0.1/energyplusagent-0.1.dist-info/config + 2020-08-17 09:40:02,510 (energyplusagent-0.1 3597) __main__ DEBUG: CONFIG: {'inputs': [{'default': 21.11, 'dynamic_default': 1.0, 'field': 'ZoneCoolingTemperatureSetPoint', 'name': 'CLGTEMPSETPOINT Zone-VAV-102', 'sim_topic': 'CLGTEMPSETPOINT Zone-VAV-102', 'topic': 'PNNL/BUILDING1/AHU1/VAV102', 'type': 'schedule'}, {'default': 21.11, 'dynamic_default': 1.0, 'field': 'ZoneCoolingTemperatureSetPoint', 'name': 'CLGTEMPSETPOINT Zone-VAV-118', 'sim_topic': 'CLGTEMPSETPOINT Zone-VAV-118', 'topic': 'PNNL/BUILDING1/AHU1/VAV118', 'type': 'schedule'}, {'default': 21.11, 'dynamic_default': 1.0, 'field': 'ZoneCoolingTemperatureSetPoint', 'name': 'CLGTEMPSETPOINT Zone-VAV-119', 'sim_topic': 'CLGTEMPSETPOINT Zone-VAV-119', 'topic': 'PNNL/BUILDING1/AHU1/VAV119', 'type': 'schedule'}, {'default': 21.11, 'dynamic_default': 1.0, 'field': 'ZoneCoolingTemperatureSetPoint', 'name': 'CLGTEMPSETPOINT Zone-VAV-120', 'sim_topic': 'CLGTEMPSETPOINT Zone-VAV-120', 'topic': 'PNNL/BUILDING1/AHU1/VAV120', 'type': 'schedule'}, {'default': 21.11, 'dynamic_default': 1.0, 'field': 'ZoneCoolingTemperatureSetPoint', 'name': 'CLGTEMPSETPOINT Zone-VAV-123A', 'sim_topic': 'CLGTEMPSETPOINT Zone-VAV-123A', 'topic': 'PNNL/BUILDING1/AHU1/VAV123A', 'type': 'schedule'}, {'default': 21.11, 'dynamic_default': 1.0, 'field': 'ZoneCoolingTemperatureSetPoint', 'name': 'CLGTEMPSETPOINT Zone-VAV-123B', 'sim_topic': 'CLGTEMPSETPOINT Zone-VAV-123B', 'topic': 'PNNL/BUILDING1/AHU1/VAV123B', 'type': 'schedule'}, {'default': 21.11, 'dynamic_default': 1.0, 'field': 'ZoneCoolingTemperatureSetPoint', 'name': 'CLGTEMPSETPOINT Zone-VAV-127A', 'sim_topic': 'CLGTEMPSETPOINT Zone-VAV-127A', 'topic': 'PNNL/BUILDING1/AHU1/VAV127A', 'type': 'schedule'}, {'default': 21.11, 'dynamic_default': 1.0, 'field': 'ZoneCoolingTemperatureSetPoint', 'name': 'CLGTEMPSETPOINT Zone-VAV-127B', 'sim_topic': 'CLGTEMPSETPOINT Zone-VAV-127B', 'topic': 'PNNL/BUILDING1/AHU1/VAV127B', 'type': 'schedule'}, {'default': 21.11, 'dynamic_default': 1.0, 'field': 'ZoneCoolingTemperatureSetPoint', 'name': 'CLGTEMPSETPOINT Zone-VAV-129', 'sim_topic': 'CLGTEMPSETPOINT Zone-VAV-129', 'topic': 'PNNL/BUILDING1/AHU1/VAV129', 'type': 'schedule'}, {'default': 21.11, 'dynamic_default': 1.0, 'field': 'ZoneCoolingTemperatureSetPoint', 'name': 'CLGTEMPSETPOINT Zone-VAV-131', 'sim_topic': 'CLGTEMPSETPOINT Zone-VAV-131', 'topic': 'PNNL/BUILDING1/AHU1/VAV131', 'type': 'schedule'}, {'default': 21.11, 'dynamic_default': 1.0, 'field': 'ZoneCoolingTemperatureSetPoint', 'name': 'CLGTEMPSETPOINT Zone-VAV-133', 'sim_topic': 'CLGTEMPSETPOINT Zone-VAV-133', 'topic': 'PNNL/BUILDING1/AHU1/VAV133', 'type': 'schedule'}, {'default': 21.11, 'dynamic_default': 1.0, 'field': 'ZoneCoolingTemperatureSetPoint', 'name': 'CLGTEMPSETPOINT Zone-VAV-136', 'sim_topic': 'CLGTEMPSETPOINT Zone-VAV-136', 'topic': 'PNNL/BUILDING1/AHU1/VAV136', 'type': 'schedule'}, {'default': 21.11, 'dynamic_default': 1.0, 'field': 'ZoneCoolingTemperatureSetPoint', 'name': 'CLGTEMPSETPOINT Zone-VAV-142', 'sim_topic': 'CLGTEMPSETPOINT Zone-VAV-142', 'topic': 'PNNL/BUILDING1/AHU1/VAV142', 'type': 'schedule'}, {'default': 21.11, 'dynamic_default': 1.0, 'field': 'ZoneCoolingTemperatureSetPoint', 'name': 'CLGTEMPSETPOINT Zone-VAV-143', 'sim_topic': 'CLGTEMPSETPOINT Zone-VAV-143', 'topic': 'PNNL/BUILDING1/AHU1/VAV143', 'type': 'schedule'}, {'default': 21.11, 'dynamic_default': 1.0, 'field': 'ZoneCoolingTemperatureSetPoint', 'name': 'CLGTEMPSETPOINT Zone-VAV-150', 'sim_topic': 'CLGTEMPSETPOINT Zone-VAV-150', 'topic': 'PNNL/BUILDING1/AHU1/VAV150', 'type': 'schedule'}, {'default': 21.11, 'dynamic_default': 1.0, 'field': 'ZoneCoolingTemperatureSetPoint', 'name': 'CLGTEMPSETPOINT Zone-VAV-CORRIDOR', 'sim_topic': 'CLGTEMPSETPOINT Zone-VAV-100', 'topic': 'PNNL/BUILDING1/AHU1/VAV100', 'type': 'schedule'}, {'default': 21.11, 'dynamic_default': 1.0, 'field': 'ZoneCoolingTemperatureSetPoint', 'name': 'CLGTEMPSETPOINT Zone-VAV-RESTROOM', 'sim_topic': 'CLGTEMPSETPOINT Zone-VAV-121', 'topic': 'PNNL/BUILDING1/AHU1/VAV121', 'type': 'schedule'}, {'default': 21.11, 'dynamic_default': 1.0, 'field': 'ZoneCoolingTemperatureSetPoint', 'name': 'CLGTEMPSETPOINT Zone-VAV-104', 'sim_topic': 'CLGTEMPSETPOINT Zone-VAV-104', 'topic': 'PNNL/BUILDING1/AHU3/VAV104', 'type': 'schedule'}, {'default': 21.11, 'dynamic_default': 1.0, 'field': 'ZoneCoolingTemperatureSetPoint', 'name': 'CLGTEMPSETPOINT Zone-VAV-105', 'sim_topic': 'CLGTEMPSETPOINT Zone-VAV-105', 'topic': 'PNNL/BUILDING1/AHU3/VAV105', 'type': 'schedule'}, {'default': 21.11, 'dynamic_default': 1.0, 'field': 'ZoneCoolingTemperatureSetPoint', 'name': 'CLGTEMPSETPOINT Zone-VAV-107', 'sim_topic': 'CLGTEMPSETPOINT Zone-VAV-107', 'topic': 'PNNL/BUILDING1/AHU3/VAV107', 'type': 'schedule'}, {'default': 21.11, 'dynamic_default': 1.0, 'field': 'ZoneCoolingTemperatureSetPoint', 'name': 'CLGTEMPSETPOINT Zone-VAV-108', 'sim_topic': 'CLGTEMPSETPOINT Zone-VAV-108', 'topic': 'PNNL/BUILDING1/AHU3/VAV108', 'type': 'schedule'}, {'default': 21.11, 'dynamic_default': 1.0, 'field': 'ZoneCoolingTemperatureSetPoint', 'name': 'CLGTEMPSETPOINT Zone-VAV-112', 'sim_topic': 'CLGTEMPSETPOINT Zone-VAV-112', 'topic': 'PNNL/BUILDING1/AHU3/VAV112', 'type': 'schedule'}, {'default': 21.11, 'dynamic_default': 1.0, 'field': 'ZoneCoolingTemperatureSetPoint', 'name': 'CLGTEMPSETPOINT Zone-VAV-116', 'sim_topic': 'CLGTEMPSETPOINT Zone-VAV-116', 'topic': 'PNNL/BUILDING1/AHU3/VAV116', 'type': 'schedule'}, {'default': 21.11, 'dynamic_default': 1.0, 'field': 'ZoneCoolingTemperatureSetPoint', 'name': 'CLGTEMPSETPOINT Zone-AHU-002', 'sim_topic': 'CLGTEMPSETPOINT Zone-AHU-002', 'topic': 'PNNL/BUILDING1/AHU2', 'type': 'schedule'}, {'default': 21.11, 'dynamic_default': 1.0, 'field': 'ZoneCoolingTemperatureSetPoint', 'name': 'CLGTEMPSETPOINT Zone-AHU-004', 'sim_topic': 'CLGTEMPSETPOINT Zone-AHU-004', 'topic': 'PNNL/BUILDING1/AHU4', 'type': 'schedule'}, {'default': 1.0, 'dynamic_default': 1.0, 'field': 'DimmingLevelOutput', 'name': 'BLDG LIGHT SCH Zone-VAV-102', 'sim_topic': 'BLDG LIGHT SCH Zone-VAV-102', 'topic': 'PNNL/BUILDING1/LIGHTING/ZONE102', 'type': 'schedule'}, {'default': 1.0, 'dynamic_default': 1.0, 'field': 'DimmingLevelOutput', 'name': 'BLDG LIGHT SCH Zone-VAV-104', 'sim_topic': 'BLDG LIGHT SCH Zone-VAV-104', 'topic': 'PNNL/BUILDING1/LIGHTING/ZONE104', 'type': 'schedule'}, {'default': 1.0, 'dynamic_default': 1.0, 'field': 'DimmingLevelOutput', 'name': 'BLDG LIGHT SCH Zone-VAV-105', 'sim_topic': 'BLDG LIGHT SCH Zone-VAV-105', 'topic': 'PNNL/BUILDING1/LIGHTING/ZONE105', 'type': 'schedule'}, {'default': 1.0, 'dynamic_default': 1.0, 'field': 'DimmingLevelOutput', 'name': 'BLDG LIGHT SCH Zone-VAV-107', 'sim_topic': 'BLDG LIGHT SCH Zone-VAV-107', 'topic': 'PNNL/BUILDING1/LIGHTING/ZONE107', 'type': 'schedule'}, {'default': 1.0, 'dynamic_default': 1.0, 'field': 'DimmingLevelOutput', 'name': 'BLDG LIGHT SCH Zone-VAV-108', 'sim_topic': 'BLDG LIGHT SCH Zone-VAV-108', 'topic': 'PNNL/BUILDING1/LIGHTING/ZONE108', 'type': 'schedule'}, {'default': 1.0, 'dynamic_default': 1.0, 'field': 'DimmingLevelOutput', 'name': 'BLDG LIGHT SCH Zone-VAV-112', 'sim_topic': 'BLDG LIGHT SCH Zone-VAV-112', 'topic': 'PNNL/BUILDING1/LIGHTING/ZONE112', 'type': 'schedule'}, {'default': 1.0, 'dynamic_default': 1.0, 'field': 'DimmingLevelOutput', 'name': 'BLDG LIGHT SCH Zone-VAV-116', 'sim_topic': 'BLDG LIGHT SCH Zone-VAV-116', 'topic': 'PNNL/BUILDING1/LIGHTING/ZONE116', 'type': 'schedule'}, {'default': 1.0, 'dynamic_default': 1.0, 'field': 'DimmingLevelOutput', 'name': 'BLDG LIGHT SCH Zone-VAV-118', 'sim_topic': 'BLDG LIGHT SCH Zone-VAV-118', 'topic': 'PNNL/BUILDING1/LIGHTING/ZONE118', 'type': 'schedule'}, {'default': 1.0, 'dynamic_default': 1.0, 'field': 'DimmingLevelOutput', 'name': 'BLDG LIGHT SCH Zone-VAV-119', 'sim_topic': 'BLDG LIGHT SCH Zone-VAV-119', 'topic': 'PNNL/BUILDING1/LIGHTING/ZONE119', 'type': 'schedule'}, {'default': 1.0, 'dynamic_default': 1.0, 'field': 'DimmingLevelOutput', 'name': 'BLDG LIGHT SCH Zone-VAV-120', 'sim_topic': 'BLDG LIGHT SCH Zone-VAV-120', 'topic': 'PNNL/BUILDING1/LIGHTING/ZONE120', 'type': 'schedule'}, {'default': 1.0, 'dynamic_default': 1.0, 'field': 'DimmingLevelOutput', 'name': 'BLDG LIGHT SCH Zone-VAV-123A', 'sim_topic': 'BLDG LIGHT SCH Zone-VAV-123A', 'topic': 'PNNL/BUILDING1/LIGHTING/ZONE123A', 'type': 'schedule'}, {'default': 1.0, 'dynamic_default': 1.0, 'field': 'DimmingLevelOutput', 'name': 'BLDG LIGHT SCH Zone-VAV-123B', 'sim_topic': 'BLDG LIGHT SCH Zone-VAV-123B', 'topic': 'PNNL/BUILDING1/LIGHTING/ZONE123B', 'type': 'schedule'}, {'default': 1.0, 'dynamic_default': 1.0, 'field': 'DimmingLevelOutput', 'name': 'BLDG LIGHT SCH Zone-VAV-127A', 'sim_topic': 'BLDG LIGHT SCH Zone-VAV-127A', 'topic': 'PNNL/BUILDING1/LIGHTING/ZONE127A', 'type': 'schedule'}, {'default': 1.0, 'dynamic_default': 1.0, 'field': 'DimmingLevelOutput', 'name': 'BLDG LIGHT SCH Zone-VAV-127B', 'sim_topic': 'BLDG LIGHT SCH Zone-VAV-127B', 'topic': 'PNNL/BUILDING1/LIGHTING/ZONE127B', 'type': 'schedule'}, {'default': 1.0, 'dynamic_default': 1.0, 'field': 'DimmingLevelOutput', 'name': 'BLDG LIGHT SCH Zone-VAV-129', 'sim_topic': 'BLDG LIGHT SCH Zone-VAV-129', 'topic': 'PNNL/BUILDING1/LIGHTING/ZONE129', 'type': 'schedule'}, {'default': 1.0, 'dynamic_default': 1.0, 'field': 'DimmingLevelOutput', 'name': 'BLDG LIGHT SCH Zone-VAV-131', 'sim_topic': 'BLDG LIGHT SCH Zone-VAV-131', 'topic': 'PNNL/BUILDING1/LIGHTING/ZONE131', 'type': 'schedule'}, {'default': 1.0, 'dynamic_default': 1.0, 'field': 'DimmingLevelOutput', 'name': 'BLDG LIGHT SCH Zone-VAV-133', 'sim_topic': 'BLDG LIGHT SCH Zone-VAV-133', 'topic': 'PNNL/BUILDING1/LIGHTING/ZONE133', 'type': 'schedule'}, {'default': 1.0, 'dynamic_default': 1.0, 'field': 'DimmingLevelOutput', 'name': 'BLDG LIGHT SCH Zone-VAV-142', 'sim_topic': 'BLDG LIGHT SCH Zone-VAV-142', 'topic': 'PNNL/BUILDING1/LIGHTING/ZONE142', 'type': 'schedule'}, {'default': 1.0, 'dynamic_default': 1.0, 'field': 'DimmingLevelOutput', 'name': 'BLDG LIGHT SCH Zone-VAV-143', 'sim_topic': 'BLDG LIGHT SCH Zone-VAV-143', 'topic': 'PNNL/BUILDING1/LIGHTING/ZONE143', 'type': 'schedule'}, {'default': 1.0, 'dynamic_default': 1.0, 'field': 'DimmingLevelOutput', 'name': 'BLDG LIGHT SCH Zone-VAV-150', 'sim_topic': 'BLDG LIGHT SCH Zone-VAV-150', 'topic': 'PNNL/BUILDING1/LIGHTING/ZONE150', 'type': 'schedule'}, {'default': 1.0, 'dynamic_default': 1.0, 'field': 'DimmingLevelOutput', 'name': 'BLDG LIGHT SCH Zone-VAV-136', 'sim_topic': 'BLDG LIGHT SCH Zone-VAV-136', 'topic': 'PNNL/BUILDING1/LIGHTING/ZONE136', 'type': 'schedule'}], 'outputs': [{'field': 'OutdoorAirTemperature', 'meta': {'type': 'float', 'tz': 'US/Pacific', 'units': 'degreesCentigrade'}, 'name': 'ENVIRONMENT', 'sim_topic': 'ENVIRONMENT Site Outdoor Air Drybulb Temperature1', 'topic': 'devices/PNNL/BUILDING1/AHU1/all', 'type': 'Site Outdoor Air Drybulb Temperature'}, {'field': 'OutdoorAirTemperature', 'meta': {'type': 'float', 'tz': 'US/Pacific', 'units': 'degreesCentigrade'}, 'name': 'ENVIRONMENT', 'sim_topic': 'ENVIRONMENT Site Outdoor Air Drybulb Temperature2', 'topic': 'devices/PNNL/BUILDING1/AHU2/all', 'type': 'Site Outdoor Air Drybulb Temperature'}, {'field': 'OutdoorAirTemperature', 'meta': {'type': 'float', 'tz': 'US/Pacific', 'units': 'degreesCentigrade'}, 'name': 'ENVIRONMENT', 'sim_topic': 'ENVIRONMENT Site Outdoor Air Drybulb Temperature3', 'topic': 'devices/PNNL/BUILDING1/AHU3/all', 'type': 'Site Outdoor Air Drybulb Temperature'}, {'field': 'OutdoorAirTemperature', 'meta': {'type': 'float', 'tz': 'US/Pacific', 'units': 'degreesCentigrade'}, 'name': 'ENVIRONMENT', 'sim_topic': 'ENVIRONMENT Site Outdoor Air Drybulb Temperature4', 'topic': 'devices/PNNL/BUILDING1/AHU4/all', 'type': 'Site Outdoor Air Drybulb Temperature'}, {'field': 'ZoneTemperature', 'meta': {'type': 'float', 'tz': 'US/Pacific', 'units': 'degreesCentigrade'}, 'name': 'Zone-VAV-102', 'sim_topic': 'Zone-VAV-102 Zone Mean Air Temperature', 'topic': 'devices/PNNL/BUILDING1/AHU1/VAV102/all', 'type': 'Zone Mean Air Temperature'}, {'field': 'ZoneTemperature', 'meta': {'type': 'float', 'tz': 'US/Pacific', 'units': 'degreesCentigrade'}, 'name': 'Zone-VAV-118', 'sim_topic': 'Zone-VAV-118 Zone Mean Air Temperature', 'topic': 'devices/PNNL/BUILDING1/AHU1/VAV118/all', 'type': 'Zone Mean Air Temperature'}, {'field': 'ZoneTemperature', 'meta': {'type': 'float', 'tz': 'US/Pacific', 'units': 'degreesCentigrade'}, 'name': 'Zone-VAV-119', 'sim_topic': 'Zone-VAV-119 Zone Mean Air Temperature', 'topic': 'devices/PNNL/BUILDING1/AHU1/VAV119/all', 'type': 'Zone Mean Air Temperature'}, {'field': 'ZoneTemperature', 'meta': {'type': 'float', 'tz': 'US/Pacific', 'units': 'degreesCentigrade'}, 'name': 'Zone-VAV-120', 'sim_topic': 'Zone-VAV-120 Zone Mean Air Temperature', 'topic': 'devices/PNNL/BUILDING1/AHU1/VAV120/all', 'type': 'Zone Mean Air Temperature'}, {'field': 'ZoneTemperature', 'meta': {'type': 'float', 'tz': 'US/Pacific', 'units': 'degreesCentigrade'}, 'name': 'Zone-VAV-123A', 'sim_topic': 'Zone-VAV-123A Zone Mean Air Temperature', 'topic': 'devices/PNNL/BUILDING1/AHU1/VAV123A/all', 'type': 'Zone Mean Air Temperature'}, {'field': 'ZoneTemperature', 'meta': {'type': 'float', 'tz': 'US/Pacific', 'units': 'degreesCentigrade'}, 'name': 'Zone-VAV-123B', 'sim_topic': 'Zone-VAV-123B Zone Mean Air Temperature', 'topic': 'devices/PNNL/BUILDING1/AHU1/VAV123B/all', 'type': 'Zone Mean Air Temperature'}, {'field': 'ZoneTemperature', 'meta': {'type': 'float', 'tz': 'US/Pacific', 'units': 'degreesCentigrade'}, 'name': 'Zone-VAV-127A', 'sim_topic': 'Zone-VAV-127A Zone Mean Air Temperature', 'topic': 'devices/PNNL/BUILDING1/AHU1/VAV127A/all', 'type': 'Zone Mean Air Temperature'}, {'field': 'ZoneTemperature', 'meta': {'type': 'float', 'tz': 'US/Pacific', 'units': 'degreesCentigrade'}, 'name': 'Zone-VAV-127B', 'sim_topic': 'Zone-VAV-127B Zone Mean Air Temperature', 'topic': 'devices/PNNL/BUILDING1/AHU1/VAV127B/all', 'type': 'Zone Mean Air Temperature'}, {'field': 'ZoneTemperature', 'meta': {'type': 'float', 'tz': 'US/Pacific', 'units': 'degreesCentigrade'}, 'name': 'Zone-VAV-129', 'sim_topic': 'Zone-VAV-129 Zone Mean Air Temperature', 'topic': 'devices/PNNL/BUILDING1/AHU1/VAV129/all', 'type': 'Zone Mean Air Temperature'}, {'field': 'ZoneTemperature', 'meta': {'type': 'float', 'tz': 'US/Pacific', 'units': 'degreesCentigrade'}, 'name': 'Zone-VAV-131', 'sim_topic': 'Zone-VAV-131 Zone Mean Air Temperature', 'topic': 'devices/PNNL/BUILDING1/AHU1/VAV131/all', 'type': 'Zone Mean Air Temperature'}, {'field': 'ZoneTemperature', 'meta': {'type': 'float', 'tz': 'US/Pacific', 'units': 'degreesCentigrade'}, 'name': 'Zone-VAV-133', 'sim_topic': 'Zone-VAV-133 Zone Mean Air Temperature', 'topic': 'devices/PNNL/BUILDING1/AHU1/VAV133/all', 'type': 'Zone Mean Air Temperature'}, {'field': 'ZoneTemperature', 'meta': {'type': 'float', 'tz': 'US/Pacific', 'units': 'degreesCentigrade'}, 'name': 'Zone-VAV-136', 'sim_topic': 'Zone-VAV-136 Zone Mean Air Temperature', 'topic': 'devices/PNNL/BUILDING1/AHU1/VAV136/all', 'type': 'Zone Mean Air Temperature'}, {'field': 'ZoneTemperature', 'meta': {'type': 'float', 'tz': 'US/Pacific', 'units': 'degreesCentigrade'}, 'name': 'Zone-VAV-142', 'sim_topic': 'Zone-VAV-142 Zone Mean Air Temperature', 'topic': 'devices/PNNL/BUILDING1/AHU1/VAV142/all', 'type': 'Zone Mean Air Temperature'}, {'field': 'ZoneTemperature', 'meta': {'type': 'float', 'tz': 'US/Pacific', 'units': 'degreesCentigrade'}, 'name': 'Zone-VAV-143', 'sim_topic': 'Zone-VAV-143 Zone Mean Air Temperature', 'topic': 'devices/PNNL/BUILDING1/AHU1/VAV143/all', 'type': 'Zone Mean Air Temperature'}, {'field': 'ZoneTemperature', 'meta': {'type': 'float', 'tz': 'US/Pacific', 'units': 'degreesCentigrade'}, 'name': 'Zone-VAV-150', 'sim_topic': 'Zone-VAV-150 Zone Mean Air Temperature', 'topic': 'devices/PNNL/BUILDING1/AHU1/VAV150/all', 'type': 'Zone Mean Air Temperature'}, {'field': 'ZoneTemperature', 'meta': {'type': 'float', 'tz': 'US/Pacific', 'units': 'degreesCentigrade'}, 'name': 'Zone-VAV-CORRIDOR', 'sim_topic': 'Zone-VAV-100 Zone Mean Air Temperature', 'topic': 'devices/PNNL/BUILDING1/AHU1/VAV100/all', 'type': 'Zone Mean Air Temperature'}, {'field': 'ZoneTemperature', 'meta': {'type': 'float', 'tz': 'US/Pacific', 'units': 'degreesCentigrade'}, 'name': 'Zone-VAV-RESTROOM', 'sim_topic': 'Zone-VAV-121 Zone Mean Air Temperature', 'topic': 'devices/PNNL/BUILDING1/AHU1/VAV121/all', 'type': 'Zone Mean Air Temperature'}, {'field': 'ZoneTemperature', 'meta': {'type': 'float', 'tz': 'US/Pacific', 'units': 'degreesCentigrade'}, 'name': 'Zone-VAV-104', 'sim_topic': 'Zone-VAV-104 Zone Mean Air Temperature', 'topic': 'devices/PNNL/BUILDING1/AHU3/VAV104/all', 'type': 'Zone Mean Air Temperature'}, {'field': 'ZoneTemperature', 'meta': {'type': 'float', 'tz': 'US/Pacific', 'units': 'degreesCentigrade'}, 'name': 'Zone-VAV-105', 'sim_topic': 'Zone-VAV-105 Zone Mean Air Temperature', 'topic': 'devices/PNNL/BUILDING1/AHU3/VAV105/all', 'type': 'Zone Mean Air Temperature'}, {'field': 'ZoneTemperature', 'meta': {'type': 'float', 'tz': 'US/Pacific', 'units': 'degreesCentigrade'}, 'name': 'Zone-VAV-107', 'sim_topic': 'Zone-VAV-107 Zone Mean Air Temperature', 'topic': 'devices/PNNL/BUILDING1/AHU3/VAV107/all', 'type': 'Zone Mean Air Temperature'}, {'field': 'ZoneTemperature', 'meta': {'type': 'float', 'tz': 'US/Pacific', 'units': 'degreesCentigrade'}, 'name': 'Zone-VAV-108', 'sim_topic': 'Zone-VAV-108 Zone Mean Air Temperature', 'topic': 'devices/PNNL/BUILDING1/AHU3/VAV108/all', 'type': 'Zone Mean Air Temperature'}, {'field': 'ZoneTemperature', 'meta': {'type': 'float', 'tz': 'US/Pacific', 'units': 'degreesCentigrade'}, 'name': 'Zone-VAV-112', 'sim_topic': 'Zone-VAV-112 Zone Mean Air Temperature', 'topic': 'devices/PNNL/BUILDING1/AHU3/VAV112/all', 'type': 'Zone Mean Air Temperature'}, {'field': 'ZoneTemperature', 'meta': {'type': 'float', 'tz': 'US/Pacific', 'units': 'degreesCentigrade'}, 'name': 'Zone-VAV-116', 'sim_topic': 'Zone-VAV-116 Zone Mean Air Temperature', 'topic': 'devices/PNNL/BUILDING1/AHU3/VAV116/all', 'type': 'Zone Mean Air Temperature'}, {'field': 'ZoneTemperature', 'meta': {'type': 'float', 'tz': 'US/Pacific', 'units': 'degreesCentigrade'}, 'name': 'Zone-AHU-002', 'sim_topic': 'Zone-AHU-002 Zone Mean Air Temperature', 'topic': 'devices/PNNL/BUILDING1/AHU2/all', 'type': 'Zone Mean Air Temperature'}, {'field': 'ZoneTemperature', 'meta': {'type': 'float', 'tz': 'US/Pacific', 'units': 'degreesCentigrade'}, 'name': 'Zone-AHU-004', 'sim_topic': 'Zone-AHU-004 Zone Mean Air Temperature', 'topic': 'devices/PNNL/BUILDING1/AHU4/all', 'type': 'Zone Mean Air Temperature'}, {'field': 'TerminalBoxDamperPosition', 'meta': {'type': 'float', 'tz': 'US/Pacific', 'units': 'percentage'}, 'name': 'Zone-VAV-Corridor VAV BOX COMPONENT', 'sim_topic': 'Zone-VAV-Corridor VAV BOX COMPONENT Zone Air Terminal VAV Damper Position', 'topic': 'devices/PNNL/BUILDING1/AHU1/VAV100/all', 'type': 'Zone Air Terminal VAV Damper Position'}, {'field': 'TerminalBoxDamperPosition', 'meta': {'type': 'float', 'tz': 'US/Pacific', 'units': 'percentage'}, 'name': 'Zone-VAV-102 VAV BOX COMPONENT', 'sim_topic': 'Zone-VAV-102 VAV BOX COMPONENT Zone Air Terminal VAV Damper Position', 'topic': 'devices/PNNL/BUILDING1/AHU1/VAV102/all', 'type': 'Zone Air Terminal VAV Damper Position'}, {'field': 'TerminalBoxDamperPosition', 'meta': {'type': 'float', 'tz': 'US/Pacific', 'units': 'percentage'}, 'name': 'Zone-VAV-104 VAV BOX COMPONENT', 'sim_topic': 'Zone-VAV-104 VAV BOX COMPONENT Zone Air Terminal VAV Damper Position', 'topic': 'devices/PNNL/BUILDING1/AHU3/VAV104/all', 'type': 'Zone Air Terminal VAV Damper Position'}, {'field': 'TerminalBoxDamperPosition', 'meta': {'type': 'float', 'tz': 'US/Pacific', 'units': 'percentage'}, 'name': 'Zone-VAV-105 VAV BOX COMPONENT', 'sim_topic': 'Zone-VAV-105 VAV BOX COMPONENT Zone Air Terminal VAV Damper Position', 'topic': 'devices/PNNL/BUILDING1/AHU3/VAV105/all', 'type': 'Zone Air Terminal VAV Damper Position'}, {'field': 'TerminalBoxDamperPosition', 'meta': {'type': 'float', 'tz': 'US/Pacific', 'units': 'percentage'}, 'name': 'Zone-VAV-107 VAV BOX COMPONENT', 'sim_topic': 'Zone-VAV-107 VAV BOX COMPONENT Zone Air Terminal VAV Damper Position', 'topic': 'devices/PNNL/BUILDING1/AHU3/VAV107/all', 'type': 'Zone Air Terminal VAV Damper Position'}, {'field': 'TerminalBoxDamperPosition', 'meta': {'type': 'float', 'tz': 'US/Pacific', 'units': 'percentage'}, 'name': 'Zone-VAV-108 VAV BOX COMPONENT', 'sim_topic': 'Zone-VAV-108 VAV BOX COMPONENT Zone Air Terminal VAV Damper Position', 'topic': 'devices/PNNL/BUILDING1/AHU3/VAV108/all', 'type': 'Zone Air Terminal VAV Damper Position'}, {'field': 'TerminalBoxDamperPosition', 'meta': {'type': 'float', 'tz': 'US/Pacific', 'units': 'percentage'}, 'name': 'Zone-VAV-112 VAV BOX COMPONENT', 'sim_topic': 'Zone-VAV-112 VAV BOX COMPONENT Zone Air Terminal VAV Damper Position', 'topic': 'devices/PNNL/BUILDING1/AHU3/VAV112/all', 'type': 'Zone Air Terminal VAV Damper Position'}, {'field': 'TerminalBoxDamperPosition', 'meta': {'type': 'float', 'tz': 'US/Pacific', 'units': 'percentage'}, 'name': 'Zone-VAV-116 VAV BOX COMPONENT', 'sim_topic': 'Zone-VAV-116 VAV BOX COMPONENT Zone Air Terminal VAV Damper Position', 'topic': 'devices/PNNL/BUILDING1/AHU3/VAV116/all', 'type': 'Zone Air Terminal VAV Damper Position'}, {'field': 'TerminalBoxDamperPosition', 'meta': {'type': 'float', 'tz': 'US/Pacific', 'units': 'percentage'}, 'name': 'Zone-VAV-118 VAV BOX COMPONENT', 'sim_topic': 'Zone-VAV-118 VAV BOX COMPONENT Zone Air Terminal VAV Damper Position', 'topic': 'devices/PNNL/BUILDING1/AHU1/VAV118/all', 'type': 'Zone Air Terminal VAV Damper Position'}, {'field': 'TerminalBoxDamperPosition', 'meta': {'type': 'float', 'tz': 'US/Pacific', 'units': 'percentage'}, 'name': 'Zone-VAV-119 VAV BOX COMPONENT', 'sim_topic': 'Zone-VAV-119 VAV BOX COMPONENT Zone Air Terminal VAV Damper Position', 'topic': 'devices/PNNL/BUILDING1/AHU1/VAV119/all', 'type': 'Zone Air Terminal VAV Damper Position'}, {'field': 'TerminalBoxDamperPosition', 'meta': {'type': 'float', 'tz': 'US/Pacific', 'units': 'percentage'}, 'name': 'Zone-VAV-120 VAV BOX COMPONENT', 'sim_topic': 'Zone-VAV-120 VAV BOX COMPONENT Zone Air Terminal VAV Damper Position', 'topic': 'devices/PNNL/BUILDING1/AHU1/VAV120/all', 'type': 'Zone Air Terminal VAV Damper Position'}, {'field': 'TerminalBoxDamperPosition', 'meta': {'type': 'float', 'tz': 'US/Pacific', 'units': 'percentage'}, 'name': 'Zone-VAV-123A VAV BOX COMPONENT', 'sim_topic': 'Zone-VAV-123A VAV BOX COMPONENT Zone Air Terminal VAV Damper Position', 'topic': 'devices/PNNL/BUILDING1/AHU1/VAV123A/all', 'type': 'Zone Air Terminal VAV Damper Position'}, {'field': 'TerminalBoxDamperPosition', 'meta': {'type': 'float', 'tz': 'US/Pacific', 'units': 'percentage'}, 'name': 'Zone-VAV-123B VAV BOX COMPONENT', 'sim_topic': 'Zone-VAV-123B VAV BOX COMPONENT Zone Air Terminal VAV Damper Position', 'topic': 'devices/PNNL/BUILDING1/AHU1/VAV123B/all', 'type': 'Zone Air Terminal VAV Damper Position'}, {'field': 'TerminalBoxDamperPosition', 'meta': {'type': 'float', 'tz': 'US/Pacific', 'units': 'percentage'}, 'name': 'Zone-VAV-127A VAV BOX COMPONENT', 'sim_topic': 'Zone-VAV-127A VAV BOX COMPONENT Zone Air Terminal VAV Damper Position', 'topic': 'devices/PNNL/BUILDING1/AHU1/VAV127A/all', 'type': 'Zone Air Terminal VAV Damper Position'}, {'field': 'TerminalBoxDamperPosition', 'meta': {'type': 'float', 'tz': 'US/Pacific', 'units': 'percentage'}, 'name': 'Zone-VAV-127B VAV BOX COMPONENT', 'sim_topic': 'Zone-VAV-127B VAV BOX COMPONENT Zone Air Terminal VAV Damper Position', 'topic': 'devices/PNNL/BUILDING1/AHU1/VAV127B/all', 'type': 'Zone Air Terminal VAV Damper Position'}, {'field': 'TerminalBoxDamperPosition', 'meta': {'type': 'float', 'tz': 'US/Pacific', 'units': 'percentage'}, 'name': 'Zone-VAV-129 VAV BOX COMPONENT', 'sim_topic': 'Zone-VAV-129 VAV BOX COMPONENT Zone Air Terminal VAV Damper Position', 'topic': 'devices/PNNL/BUILDING1/AHU1/VAV129/all', 'type': 'Zone Air Terminal VAV Damper Position'}, {'field': 'TerminalBoxDamperPosition', 'meta': {'type': 'float', 'tz': 'US/Pacific', 'units': 'percentage'}, 'name': 'Zone-VAV-131 VAV BOX COMPONENT', 'sim_topic': 'Zone-VAV-131 VAV BOX COMPONENT Zone Air Terminal VAV Damper Position', 'topic': 'devices/PNNL/BUILDING1/AHU1/VAV131/all', 'type': 'Zone Air Terminal VAV Damper Position'}, {'field': 'TerminalBoxDamperPosition', 'meta': {'type': 'float', 'tz': 'US/Pacific', 'units': 'percentage'}, 'name': 'Zone-VAV-133 VAV BOX COMPONENT', 'sim_topic': 'Zone-VAV-133 VAV BOX COMPONENT Zone Air Terminal VAV Damper Position', 'topic': 'devices/PNNL/BUILDING1/AHU1/VAV133/all', 'type': 'Zone Air Terminal VAV Damper Position'}, {'field': 'TerminalBoxDamperPosition', 'meta': {'type': 'float', 'tz': 'US/Pacific', 'units': 'percentage'}, 'name': 'Zone-VAV-136 VAV BOX COMPONENT', 'sim_topic': 'Zone-VAV-136 VAV BOX COMPONENT Zone Air Terminal VAV Damper Position', 'topic': 'devices/PNNL/BUILDING1/AHU1/VAV136/all', 'type': 'Zone Air Terminal VAV Damper Position'}, {'field': 'TerminalBoxDamperPosition', 'meta': {'type': 'float', 'tz': 'US/Pacific', 'units': 'percentage'}, 'name': 'Zone-VAV-142 VAV BOX COMPONENT', 'sim_topic': 'Zone-VAV-142 VAV BOX COMPONENT Zone Air Terminal VAV Damper Position', 'topic': 'devices/PNNL/BUILDING1/AHU1/VAV142/all', 'type': 'Zone Air Terminal VAV Damper Position'}, {'field': 'TerminalBoxDamperPosition', 'meta': {'type': 'float', 'tz': 'US/Pacific', 'units': 'percentage'}, 'name': 'Zone-VAV-143 VAV BOX COMPONENT', 'sim_topic': 'Zone-VAV-143 VAV BOX COMPONENT Zone Air Terminal VAV Damper Position', 'topic': 'devices/PNNL/BUILDING1/AHU1/VAV143/all', 'type': 'Zone Air Terminal VAV Damper Position'}, {'field': 'TerminalBoxDamperPosition', 'meta': {'type': 'float', 'tz': 'US/Pacific', 'units': 'percentage'}, 'name': 'Zone-VAV-150 VAV BOX COMPONENT', 'sim_topic': 'Zone-VAV-150 VAV BOX COMPONENT Zone Air Terminal VAV Damper Position', 'topic': 'devices/PNNL/BUILDING1/AHU1/VAV150/all', 'type': 'Zone Air Terminal VAV Damper Position'}, {'field': 'TerminalBoxDamperPosition', 'meta': {'type': 'float', 'tz': 'US/Pacific', 'units': 'percentage'}, 'name': 'Zone-VAV-Restroom VAV Box Component', 'sim_topic': 'Zone-Restroom VAV BOX COMPONENT Zone Air Terminal VAV Damper Position', 'topic': 'devices/PNNL/BUILDING1/AHU1/VAV121/all', 'type': 'Zone Air Terminal VAV Damper Position'}, {'field': 'ZoneAirFlow', 'meta': {'type': 'float', 'tz': 'US/Pacific', 'units': 'cubicMetersPerSecond'}, 'name': 'Zone-VAV-102 VAV BOX OUTLET NODE', 'sim_topic': 'Zone-VAV-102 VAV BOX OUTLET NODE System Node Mass Flow Rate', 'topic': 'devices/PNNL/BUILDING1/AHU1/VAV102/all', 'type': 'System Node Mass Flow Rate'}, {'field': 'ZoneAirFlow', 'meta': {'type': 'float', 'tz': 'US/Pacific', 'units': 'cubicMetersPerSecond'}, 'name': 'Zone-VAV-118 VAV BOX OUTLET NODE', 'sim_topic': 'Zone-VAV-118 VAV BOX OUTLET NODE System Node Mass Flow Rate', 'topic': 'devices/PNNL/BUILDING1/AHU1/VAV118/all', 'type': 'System Node Mass Flow Rate'}, {'field': 'ZoneAirFlow', 'meta': {'type': 'float', 'tz': 'US/Pacific', 'units': 'cubicMetersPerSecond'}, 'name': 'Zone-VAV-119 VAV BOX OUTLET NODE', 'sim_topic': 'Zone-VAV-119 VAV BOX OUTLET NODE System Node Mass Flow Rate', 'topic': 'devices/PNNL/BUILDING1/AHU1/VAV119/all', 'type': 'System Node Mass Flow Rate'}, {'field': 'ZoneAirFlow', 'meta': {'type': 'float', 'tz': 'US/Pacific', 'units': 'cubicMetersPerSecond'}, 'name': 'Zone-VAV-120 VAV BOX OUTLET NODE', 'sim_topic': 'Zone-VAV-120 VAV BOX OUTLET NODE System Node Mass Flow Rate', 'topic': 'devices/PNNL/BUILDING1/AHU1/VAV120/all', 'type': 'System Node Mass Flow Rate'}, {'field': 'ZoneAirFlow', 'meta': {'type': 'float', 'tz': 'US/Pacific', 'units': 'cubicMetersPerSecond'}, 'name': 'Zone-VAV-123A VAV BOX OUTLET NODE', 'sim_topic': 'Zone-VAV-123A VAV BOX OUTLET NODE System Node Mass Flow Rate', 'topic': 'devices/PNNL/BUILDING1/AHU1/VAV123A/all', 'type': 'System Node Mass Flow Rate'}, {'field': 'ZoneAirFlow', 'meta': {'type': 'float', 'tz': 'US/Pacific', 'units': 'cubicMetersPerSecond'}, 'name': 'Zone-VAV-123B VAV BOX OUTLET NODE', 'sim_topic': 'Zone-VAV-123B VAV BOX OUTLET NODE System Node Mass Flow Rate', 'topic': 'devices/PNNL/BUILDING1/AHU1/VAV123B/all', 'type': 'System Node Mass Flow Rate'}, {'field': 'ZoneAirFlow', 'meta': {'type': 'float', 'tz': 'US/Pacific', 'units': 'cubicMetersPerSecond'}, 'name': 'Zone-VAV-127A VAV BOX OUTLET NODE', 'sim_topic': 'Zone-VAV-127A VAV BOX OUTLET NODE System Node Mass Flow Rate', 'topic': 'devices/PNNL/BUILDING1/AHU1/VAV127A/all', 'type': 'System Node Mass Flow Rate'}, {'field': 'ZoneAirFlow', 'meta': {'type': 'float', 'tz': 'US/Pacific', 'units': 'cubicMetersPerSecond'}, 'name': 'Zone-VAV-127B VAV BOX OUTLET NODE', 'sim_topic': 'Zone-VAV-127B VAV BOX OUTLET NODE System Node Mass Flow Rate', 'topic': 'devices/PNNL/BUILDING1/AHU1/VAV127B/all', 'type': 'System Node Mass Flow Rate'}, {'field': 'ZoneAirFlow', 'meta': {'type': 'float', 'tz': 'US/Pacific', 'units': 'cubicMetersPerSecond'}, 'name': 'Zone-VAV-129 VAV BOX OUTLET NODE', 'sim_topic': 'Zone-VAV-129 VAV BOX OUTLET NODE System Node Mass Flow Rate', 'topic': 'devices/PNNL/BUILDING1/AHU1/VAV129/all', 'type': 'System Node Mass Flow Rate'}, {'field': 'ZoneAirFlow', 'meta': {'type': 'float', 'tz': 'US/Pacific', 'units': 'cubicMetersPerSecond'}, 'name': 'Zone-VAV-131 VAV BOX OUTLET NODE', 'sim_topic': 'Zone-VAV-131 VAV BOX OUTLET NODE System Node Mass Flow Rate', 'topic': 'devices/PNNL/BUILDING1/AHU1/VAV131/all', 'type': 'System Node Mass Flow Rate'}, {'field': 'ZoneAirFlow', 'meta': {'type': 'float', 'tz': 'US/Pacific', 'units': 'cubicMetersPerSecond'}, 'name': 'Zone-VAV-133 VAV BOX OUTLET NODE', 'sim_topic': 'Zone-VAV-133 VAV BOX OUTLET NODE System Node Mass Flow Rate', 'topic': 'devices/PNNL/BUILDING1/AHU1/VAV133/all', 'type': 'System Node Mass Flow Rate'}, {'field': 'ZoneAirFlow', 'meta': {'type': 'float', 'tz': 'US/Pacific', 'units': 'cubicMetersPerSecond'}, 'name': 'Zone-VAV-136 VAV BOX OUTLET NODE', 'sim_topic': 'Zone-VAV-136 VAV BOX OUTLET NODE System Node Mass Flow Rate', 'topic': 'devices/PNNL/BUILDING1/AHU1/VAV136/all', 'type': 'System Node Mass Flow Rate'}, {'field': 'ZoneAirFlow', 'meta': {'type': 'float', 'tz': 'US/Pacific', 'units': 'cubicMetersPerSecond'}, 'name': 'Zone-VAV-142 VAV BOX OUTLET NODE', 'sim_topic': 'Zone-VAV-142 VAV BOX OUTLET NODE System Node Mass Flow Rate', 'topic': 'devices/PNNL/BUILDING1/AHU1/VAV142/all', 'type': 'System Node Mass Flow Rate'}, {'field': 'ZoneAirFlow', 'meta': {'type': 'float', 'tz': 'US/Pacific', 'units': 'cubicMetersPerSecond'}, 'name': 'Zone-VAV-143 VAV BOX OUTLET NODE', 'sim_topic': 'Zone-VAV-143 VAV BOX OUTLET NODE System Node Mass Flow Rate', 'topic': 'devices/PNNL/BUILDING1/AHU1/VAV143/all', 'type': 'System Node Mass Flow Rate'}, {'field': 'ZoneAirFlow', 'meta': {'type': 'float', 'tz': 'US/Pacific', 'units': 'cubicMetersPerSecond'}, 'name': 'Zone-VAV-150 VAV BOX OUTLET NODE', 'sim_topic': 'Zone-VAV-150 VAV BOX OUTLET NODE System Node Mass Flow Rate', 'topic': 'devices/PNNL/BUILDING1/AHU1/VAV150/all', 'type': 'System Node Mass Flow Rate'}, {'field': 'ZoneAirFlow', 'meta': {'type': 'float', 'tz': 'US/Pacific', 'units': 'cubicMetersPerSecond'}, 'name': 'Zone-VAV-CORRIDOR VAV BOX OUTLET NODE', 'sim_topic': 'Zone-VAV-100 VAV BOX OUTLET NODE System Node Mass Flow Rate', 'topic': 'devices/PNNL/BUILDING1/AHU1/VAV100/all', 'type': 'System Node Mass Flow Rate'}, {'field': 'ZoneAirFlow', 'meta': {'type': 'float', 'tz': 'US/Pacific', 'units': 'cubicMetersPerSecond'}, 'name': 'Zone-VAV-RESTROOM VAV BOX OUTLET NODE', 'sim_topic': 'Zone-VAV-121 VAV BOX OUTLET NODE System Node Mass Flow Rate', 'topic': 'devices/PNNL/BUILDING1/AHU1/VAV121/all', 'type': 'System Node Mass Flow Rate'}, {'field': 'ZoneAirFlow', 'meta': {'type': 'float', 'tz': 'US/Pacific', 'units': 'cubicMetersPerSecond'}, 'name': 'Zone-VAV-104 VAV BOX OUTLET NODE', 'sim_topic': 'Zone-VAV-104 VAV BOX OUTLET NODE System Node Mass Flow Rate', 'topic': 'devices/PNNL/BUILDING1/AHU3/VAV104/all', 'type': 'System Node Mass Flow Rate'}, {'field': 'ZoneAirFlow', 'meta': {'type': 'float', 'tz': 'US/Pacific', 'units': 'cubicMetersPerSecond'}, 'name': 'Zone-VAV-105 VAV BOX OUTLET NODE', 'sim_topic': 'Zone-VAV-105 VAV BOX OUTLET NODE System Node Mass Flow Rate', 'topic': 'devices/PNNL/BUILDING1/AHU3/VAV105/all', 'type': 'System Node Mass Flow Rate'}, {'field': 'ZoneAirFlow', 'meta': {'type': 'float', 'tz': 'US/Pacific', 'units': 'cubicMetersPerSecond'}, 'name': 'Zone-VAV-107 VAV BOX OUTLET NODE', 'sim_topic': 'Zone-VAV-107 VAV BOX OUTLET NODE System Node Mass Flow Rate', 'topic': 'devices/PNNL/BUILDING1/AHU3/VAV107/all', 'type': 'System Node Mass Flow Rate'}, {'field': 'ZoneAirFlow', 'meta': {'type': 'float', 'tz': 'US/Pacific', 'units': 'cubicMetersPerSecond'}, 'name': 'Zone-VAV-108 VAV BOX OUTLET NODE', 'sim_topic': 'Zone-VAV-108 VAV BOX OUTLET NODE System Node Mass Flow Rate', 'topic': 'devices/PNNL/BUILDING1/AHU3/VAV108/all', 'type': 'System Node Mass Flow Rate'}, {'field': 'ZoneAirFlow', 'meta': {'type': 'float', 'tz': 'US/Pacific', 'units': 'cubicMetersPerSecond'}, 'name': 'Zone-VAV-112 VAV BOX OUTLET NODE', 'sim_topic': 'Zone-VAV-112 VAV BOX OUTLET NODE System Node Mass Flow Rate', 'topic': 'devices/PNNL/BUILDING1/AHU3/VAV112/all', 'type': 'System Node Mass Flow Rate'}, {'field': 'ZoneAirFlow', 'meta': {'type': 'float', 'tz': 'US/Pacific', 'units': 'cubicMetersPerSecond'}, 'name': 'Zone-VAV-116 VAV BOX OUTLET NODE', 'sim_topic': 'Zone-VAV-116 VAV BOX OUTLET NODE System Node Mass Flow Rate', 'topic': 'devices/PNNL/BUILDING1/AHU3/VAV116/all', 'type': 'System Node Mass Flow Rate'}, {'field': 'SupplyAirFlow', 'meta': {'type': 'float', 'tz': 'US/Pacific', 'units': 'cubicMetersPerSecond'}, 'name': 'Zone-AHU-002 Direct Air Inlet Node Name', 'sim_topic': 'Zone-AHU-002 Direct Air Inlet Node Name System Node Mass Flow Rate', 'topic': 'devices/PNNL/BUILDING1/AHU2/all', 'type': 'System Node Mass Flow Rate'}, {'field': 'ZoneAirFlow', 'meta': {'type': 'float', 'tz': 'US/Pacific', 'units': 'cubicMetersPerSecond'}, 'name': 'Zone-AHU-004 Direct Air Inlet Node Name', 'sim_topic': 'Zone-AHU-004 Direct Air Inlet Node Name System Node Mass Flow Rate', 'topic': 'devices/PNNL/BUILDING1/AHU4/all', 'type': 'System Node Mass Flow Rate'}, {'field': 'ZoneDischargeAirTemperature', 'meta': {'type': 'float', 'tz': 'US/Pacific', 'units': 'degreesCentigrade'}, 'name': 'Zone-VAV-102 VAV BOX OUTLET NODE', 'sim_topic': 'Zone-VAV-102 VAV BOX OUTLET NODE System Node Temperature', 'topic': 'devices/PNNL/BUILDING1/AHU1/VAV102/all', 'type': 'System Node Temperature'}, {'field': 'ZoneDischargeAirTemperature', 'meta': {'type': 'float', 'tz': 'US/Pacific', 'units': 'degreesCentigrade'}, 'name': 'Zone-VAV-118 VAV BOX OUTLET NODE', 'sim_topic': 'Zone-VAV-118 VAV BOX OUTLET NODE System Node Temperature', 'topic': 'devices/PNNL/BUILDING1/AHU1/VAV118/all', 'type': 'System Node Temperature'}, {'field': 'ZoneDischargeAirTemperature', 'meta': {'type': 'float', 'tz': 'US/Pacific', 'units': 'degreesCentigrade'}, 'name': 'Zone-VAV-119 VAV BOX OUTLET NODE', 'sim_topic': 'Zone-VAV-119 VAV BOX OUTLET NODE System Node Temperature', 'topic': 'devices/PNNL/BUILDING1/AHU1/VAV119/all', 'type': 'System Node Temperature'}, {'field': 'ZoneDischargeAirTemperature', 'meta': {'type': 'float', 'tz': 'US/Pacific', 'units': 'degreesCentigrade'}, 'name': 'Zone-VAV-120 VAV BOX OUTLET NODE', 'sim_topic': 'Zone-VAV-120 VAV BOX OUTLET NODE System Node Temperature', 'topic': 'devices/PNNL/BUILDING1/AHU1/VAV120/all', 'type': 'System Node Temperature'}, {'field': 'ZoneDischargeAirTemperature', 'meta': {'type': 'float', 'tz': 'US/Pacific', 'units': 'degreesCentigrade'}, 'name': 'Zone-VAV-123A VAV BOX OUTLET NODE', 'sim_topic': 'Zone-VAV-123A VAV BOX OUTLET NODE System Node Temperature', 'topic': 'devices/PNNL/BUILDING1/AHU1/VAV123A/all', 'type': 'System Node Temperature'}, {'field': 'ZoneDischargeAirTemperature', 'meta': {'type': 'float', 'tz': 'US/Pacific', 'units': 'degreesCentigrade'}, 'name': 'Zone-VAV-123B VAV BOX OUTLET NODE', 'sim_topic': 'Zone-VAV-123B VAV BOX OUTLET NODE System Node Temperature', 'topic': 'devices/PNNL/BUILDING1/AHU1/VAV123B/all', 'type': 'System Node Temperature'}, {'field': 'ZoneDischargeAirTemperature', 'meta': {'type': 'float', 'tz': 'US/Pacific', 'units': 'degreesCentigrade'}, 'name': 'Zone-VAV-127A VAV BOX OUTLET NODE', 'sim_topic': 'Zone-VAV-127A VAV BOX OUTLET NODE System Node Temperature', 'topic': 'devices/PNNL/BUILDING1/AHU1/VAV127A/all', 'type': 'System Node Temperature'}, {'field': 'ZoneDischargeAirTemperature', 'meta': {'type': 'float', 'tz': 'US/Pacific', 'units': 'degreesCentigrade'}, 'name': 'Zone-VAV-127B VAV BOX OUTLET NODE', 'sim_topic': 'Zone-VAV-127B VAV BOX OUTLET NODE System Node Temperature', 'topic': 'devices/PNNL/BUILDING1/AHU1/VAV127B/all', 'type': 'System Node Temperature'}, {'field': 'ZoneDischargeAirTemperature', 'meta': {'type': 'float', 'tz': 'US/Pacific', 'units': 'degreesCentigrade'}, 'name': 'Zone-VAV-129 VAV BOX OUTLET NODE', 'sim_topic': 'Zone-VAV-129 VAV BOX OUTLET NODE System Node Temperature', 'topic': 'devices/PNNL/BUILDING1/AHU1/VAV129/all', 'type': 'System Node Temperature'}, {'field': 'ZoneDischargeAirTemperature', 'meta': {'type': 'float', 'tz': 'US/Pacific', 'units': 'degreesCentigrade'}, 'name': 'Zone-VAV-131 VAV BOX OUTLET NODE', 'sim_topic': 'Zone-VAV-131 VAV BOX OUTLET NODE System Node Temperature', 'topic': 'devices/PNNL/BUILDING1/AHU1/VAV131/all', 'type': 'System Node Temperature'}, {'field': 'ZoneDischargeAirTemperature', 'meta': {'type': 'float', 'tz': 'US/Pacific', 'units': 'degreesCentigrade'}, 'name': 'Zone-VAV-133 VAV BOX OUTLET NODE', 'sim_topic': 'Zone-VAV-133 VAV BOX OUTLET NODE System Node Temperature', 'topic': 'devices/PNNL/BUILDING1/AHU1/VAV133/all', 'type': 'System Node Temperature'}, {'field': 'ZoneDischargeAirTemperature', 'meta': {'type': 'float', 'tz': 'US/Pacific', 'units': 'degreesCentigrade'}, 'name': 'Zone-VAV-136 VAV BOX OUTLET NODE', 'sim_topic': 'Zone-VAV-136 VAV BOX OUTLET NODE System Node Temperature', 'topic': 'devices/PNNL/BUILDING1/AHU1/VAV136/all', 'type': 'System Node Temperature'}, {'field': 'ZoneDischargeAirTemperature', 'meta': {'type': 'float', 'tz': 'US/Pacific', 'units': 'degreesCentigrade'}, 'name': 'Zone-VAV-142 VAV BOX OUTLET NODE', 'sim_topic': 'Zone-VAV-142 VAV BOX OUTLET NODE System Node Temperature', 'topic': 'devices/PNNL/BUILDING1/AHU1/VAV142/all', 'type': 'System Node Temperature'}, {'field': 'ZoneDischargeAirTemperature', 'meta': {'type': 'float', 'tz': 'US/Pacific', 'units': 'degreesCentigrade'}, 'name': 'Zone-VAV-143 VAV BOX OUTLET NODE', 'sim_topic': 'Zone-VAV-143 VAV BOX OUTLET NODE System Node Temperature', 'topic': 'devices/PNNL/BUILDING1/AHU1/VAV143/all', 'type': 'System Node Temperature'}, {'field': 'ZoneDischargeAirTemperature', 'meta': {'type': 'float', 'tz': 'US/Pacific', 'units': 'degreesCentigrade'}, 'name': 'Zone-VAV-150 VAV BOX OUTLET NODE', 'sim_topic': 'Zone-VAV-150 VAV BOX OUTLET NODE System Node Temperature', 'topic': 'devices/PNNL/BUILDING1/AHU1/VAV150/all', 'type': 'System Node Temperature'}, {'field': 'ZoneDischargeAirTemperature', 'meta': {'type': 'float', 'tz': 'US/Pacific', 'units': 'degreesCentigrade'}, 'name': 'Zone-VAV-CORRIDOR VAV BOX OUTLET NODE', 'sim_topic': 'Zone-VAV-100 VAV BOX OUTLET NODE System Node Temperature', 'topic': 'devices/PNNL/BUILDING1/AHU1/VAV100/all', 'type': 'System Node Temperature'}, {'field': 'ZoneDischargeAirTemperature', 'meta': {'type': 'float', 'tz': 'US/Pacific', 'units': 'degreesCentigrade'}, 'name': 'Zone-VAV-RESTROOM VAV BOX OUTLET NODE', 'sim_topic': 'Zone-VAV-121 VAV BOX OUTLET NODE System Node Temperature', 'topic': 'devices/PNNL/BUILDING1/AHU1/VAV121/all', 'type': 'System Node Temperature'}, {'field': 'ZoneDischargeAirTemperature', 'meta': {'type': 'float', 'tz': 'US/Pacific', 'units': 'degreesCentigrade'}, 'name': 'Zone-VAV-104 VAV BOX OUTLET NODE', 'sim_topic': 'Zone-VAV-104 VAV BOX OUTLET NODE System Node Temperature', 'topic': 'devices/PNNL/BUILDING1/AHU3/VAV104/all', 'type': 'System Node Temperature'}, {'field': 'ZoneDischargeAirTemperature', 'meta': {'type': 'float', 'tz': 'US/Pacific', 'units': 'degreesCentigrade'}, 'name': 'Zone-VAV-105 VAV BOX OUTLET NODE', 'sim_topic': 'Zone-VAV-105 VAV BOX OUTLET NODE System Node Temperature', 'topic': 'devices/PNNL/BUILDING1/AHU3/VAV105/all', 'type': 'System Node Temperature'}, {'field': 'ZoneDischargeAirTemperature', 'meta': {'type': 'float', 'tz': 'US/Pacific', 'units': 'degreesCentigrade'}, 'name': 'Zone-VAV-107 VAV BOX OUTLET NODE', 'sim_topic': 'Zone-VAV-107 VAV BOX OUTLET NODE System Node Temperature', 'topic': 'devices/PNNL/BUILDING1/AHU3/VAV107/all', 'type': 'System Node Temperature'}, {'field': 'ZoneDischargeAirTemperature', 'meta': {'type': 'float', 'tz': 'US/Pacific', 'units': 'degreesCentigrade'}, 'name': 'Zone-VAV-108 VAV BOX OUTLET NODE', 'sim_topic': 'Zone-VAV-108 VAV BOX OUTLET NODE System Node Temperature', 'topic': 'devices/PNNL/BUILDING1/AHU3/VAV108/all', 'type': 'System Node Temperature'}, {'field': 'ZoneDischargeAirTemperature', 'meta': {'type': 'float', 'tz': 'US/Pacific', 'units': 'degreesCentigrade'}, 'name': 'Zone-VAV-112 VAV BOX OUTLET NODE', 'sim_topic': 'Zone-VAV-112 VAV BOX OUTLET NODE System Node Temperature', 'topic': 'devices/PNNL/BUILDING1/AHU3/VAV112/all', 'type': 'System Node Temperature'}, {'field': 'ZoneDischargeAirTemperature', 'meta': {'type': 'float', 'tz': 'US/Pacific', 'units': 'degreesCentigrade'}, 'name': 'Zone-VAV-116 VAV BOX OUTLET NODE', 'sim_topic': 'Zone-VAV-116 VAV BOX OUTLET NODE System Node Temperature', 'topic': 'devices/PNNL/BUILDING1/AHU3/VAV116/all', 'type': 'System Node Temperature'}, {'field': 'ZoneDischargeAirTemperature', 'meta': {'type': 'float', 'tz': 'US/Pacific', 'units': 'degreesCentigrade'}, 'name': 'Zone-AHU-002 Direct Air Inlet Node Name', 'sim_topic': 'Zone-AHU-002 Direct Air Inlet Node Name System Node Temperature', 'topic': 'devices/PNNL/BUILDING1/AHU2/all', 'type': 'System Node Temperature'}, {'field': 'ZoneDischargeAirTemperature', 'meta': {'type': 'float', 'tz': 'US/Pacific', 'units': 'degreesCentigrade'}, 'name': 'Zone-AHU-004 Direct Air Inlet Node Name', 'sim_topic': 'Zone-AHU-004 Direct Air Inlet Node Name System Node Temperature', 'topic': 'devices/PNNL/BUILDING1/AHU4/all', 'type': 'System Node Temperature'}, {'field': 'DischargeAirTemperature', 'meta': {'type': 'float', 'tz': 'US/Pacific', 'units': 'degreesCentigrade'}, 'name': 'AHU-001 Supply Equipment Outlet Node', 'sim_topic': 'AHU-001 Supply Equipment Outlet Node System Node Temperature', 'topic': 'devices/PNNL/BUILDING1/AHU1/all', 'type': 'System Node Temperature'}, {'field': 'SupplyAirFlow', 'meta': {'type': 'float', 'tz': 'US/Pacific', 'units': 'cubicMetersPerSecond'}, 'name': 'AHU-001 Supply Equipment Outlet Node', 'sim_topic': 'AHU-001 Supply Equipment Outlet Node System Node Mass Flow Rate', 'topic': 'devices/PNNL/BUILDING1/AHU1/all', 'type': 'System Node Mass Flow Rate'}, {'field': 'ReturnAirTemperature', 'meta': {'type': 'float', 'tz': 'US/Pacific', 'units': 'degreesCentigrade'}, 'name': 'AHU-001 Supply Equipment Inlet Node', 'sim_topic': 'AHU-001 Supply Equipment Inlet Node System Node Temperature', 'topic': 'devices/PNNL/BUILDING1/AHU1/all', 'type': 'System Node Temperature'}, {'field': 'MixedAirTemperature', 'meta': {'type': 'float', 'tz': 'US/Pacific', 'units': 'degreesCentigrade'}, 'name': 'AHU-001 OA CoolCNode', 'sim_topic': 'AHU-001 OA CoolCNode System Node Temperature', 'topic': 'devices/PNNL/BUILDING1/AHU1/all', 'type': 'System Node Temperature'}, {'field': 'DischargeAirTemperature', 'meta': {'type': 'float', 'tz': 'US/Pacific', 'units': 'degreesCentigrade'}, 'name': 'AHU-002 Supply Equipment Outlet Node', 'sim_topic': 'AHU-002 Supply Equipment Outlet Node System Node Temperature', 'topic': 'devices/PNNL/BUILDING1/AHU2/all', 'type': 'System Node Temperature'}, {'field': 'ZoneAirFlow', 'meta': {'type': 'float', 'tz': 'US/Pacific', 'units': 'cubicMetersPerSecond'}, 'name': 'AHU-002 Supply Equipment Outlet Node', 'sim_topic': 'AHU-002 Supply Equipment Outlet Node System Node Mass Flow Rate', 'topic': 'devices/PNNL/BUILDING1/AHU2/all', 'type': 'System Node Mass Flow Rate'}, {'field': 'ReturnAirTemperature', 'meta': {'type': 'float', 'tz': 'US/Pacific', 'units': 'degreesCentigrade'}, 'name': 'AHU-002 Supply Equipment Inlet Node', 'sim_topic': 'AHU-002 Supply Equipment Inlet Node System Node Temperature', 'topic': 'devices/PNNL/BUILDING1/AHU2/all', 'type': 'System Node Temperature'}, {'field': 'MixedAirTemperature', 'meta': {'type': 'float', 'tz': 'US/Pacific', 'units': 'degreesCentigrade'}, 'name': 'AHU-002 OA CoolCNode', 'sim_topic': 'AHU-002 OA CoolCNode System Node Temperature', 'topic': 'devices/PNNL/BUILDING1/AHU2/all', 'type': 'System Node Temperature'}, {'field': 'DischargeAirTemperature', 'meta': {'type': 'float', 'tz': 'US/Pacific', 'units': 'degreesCentigrade'}, 'name': 'AHU-003 Supply Equipment Outlet Node', 'sim_topic': 'AHU-003 Supply Equipment Outlet Node System Node Temperature', 'topic': 'devices/PNNL/BUILDING1/AHU3/all', 'type': 'System Node Temperature'}, {'field': 'SupplyAirFlow', 'meta': {'type': 'float', 'tz': 'US/Pacific', 'units': 'cubicMetersPerSecond'}, 'name': 'AHU-003 Supply Equipment Outlet Node', 'sim_topic': 'AHU-003 Supply Equipment Outlet Node System Node Mass Flow Rate', 'topic': 'devices/PNNL/BUILDING1/AHU3/all', 'type': 'System Node Mass Flow Rate'}, {'field': 'ReturnAirTemperature', 'meta': {'type': 'float', 'tz': 'US/Pacific', 'units': 'degreesCentigrade'}, 'name': 'AHU-003 Supply Equipment Inlet Node', 'sim_topic': 'AHU-003 Supply Equipment Inlet Node System Node Temperature', 'topic': 'devices/PNNL/BUILDING1/AHU3/all', 'type': 'System Node Temperature'}, {'field': 'MixedAirTemperature', 'meta': {'type': 'float', 'tz': 'US/Pacific', 'units': 'degreesCentigrade'}, 'name': 'AHU-003 OA CoolCNode', 'sim_topic': 'AHU-003 OA CoolCNode System Node Temperature', 'topic': 'devices/PNNL/BUILDING1/AHU3/all', 'type': 'System Node Temperature'}, {'field': 'DischargeAirTemperature', 'meta': {'type': 'float', 'tz': 'US/Pacific', 'units': 'degreesCentigrade'}, 'name': 'AHU-004 Supply Equipment Outlet Node', 'sim_topic': 'AHU-004 Supply Equipment Outlet Node System Node Temperature', 'topic': 'devices/PNNL/BUILDING1/AHU4/all', 'type': 'System Node Temperature'}, {'field': 'SupplyAirFlow', 'meta': {'type': 'float', 'tz': 'US/Pacific', 'units': 'cubicMetersPerSecond'}, 'name': 'AHU-004 Supply Equipment Outlet Node', 'sim_topic': 'AHU-004 Supply Equipment Outlet Node System Node Mass Flow Rate', 'topic': 'devices/PNNL/BUILDING1/AHU4/all', 'type': 'System Node Mass Flow Rate'}, {'field': 'ReturnAirTemperature', 'meta': {'type': 'float', 'tz': 'US/Pacific', 'units': 'degreesCentigrade'}, 'name': 'AHU-004 Supply Equipment Inlet Node', 'sim_topic': 'AHU-004 Supply Equipment Inlet Node System Node Temperature', 'topic': 'devices/PNNL/BUILDING1/AHU4/all', 'type': 'System Node Temperature'}, {'field': 'MixedAirTemperature', 'meta': {'type': 'float', 'tz': 'US/Pacific', 'units': 'degreesCentigrade'}, 'name': 'AHU-004 OA CoolCNode', 'sim_topic': 'AHU-004 OA CoolCNode System Node Temperature', 'topic': 'devices/PNNL/BUILDING1/AHU4/all', 'type': 'System Node Temperature'}, {'field': 'SupplyFanStatus', 'meta': {'type': 'float', 'tz': 'US/Pacific', 'units': 'Enum'}, 'name': 'HVACOperationSchd', 'sim_topic': 'HVACOperationSchd Schedule Value1', 'topic': 'devices/PNNL/BUILDING1/AHU1/all', 'type': 'Schedule Value'}, {'field': 'SupplyFanStatus', 'meta': {'type': 'float', 'tz': 'US/Pacific', 'units': 'Enum'}, 'name': 'HVACOperationSchd', 'sim_topic': 'HVACOperationSchd Schedule Value2', 'topic': 'devices/PNNL/BUILDING1/AHU2/all', 'type': 'Schedule Value'}, {'field': 'SupplyFanStatus', 'meta': {'type': 'float', 'tz': 'US/Pacific', 'units': 'Enum'}, 'name': 'HVACOperationSchd', 'sim_topic': 'HVACOperationSchd Schedule Value3', 'topic': 'devices/PNNL/BUILDING1/AHU3/all', 'type': 'Schedule Value'}, {'field': 'SupplyFanStatus', 'meta': {'type': 'float', 'tz': 'US/Pacific', 'units': 'Enum'}, 'name': 'HVACOperationSchd', 'sim_topic': 'HVACOperationSchd Schedule Value4', 'topic': 'devices/PNNL/BUILDING1/AHU4/all', 'type': 'Schedule Value'}, {'field': 'OccupancyMode', 'meta': {'type': 'float', 'tz': 'US/Pacific', 'units': 'None'}, 'name': 'ALWAYS ON', 'sim_topic': 'ALWAYS ON Schedule Value VAVCorridor', 'topic': 'devices/PNNL/BUILDING1/AHU1/VAV100/all', 'type': 'Schedule Value'}, {'field': 'OccupancyMode', 'meta': {'type': 'float', 'tz': 'US/Pacific', 'units': 'None'}, 'name': 'ALWAYS ON', 'sim_topic': 'ALWAYS ON Schedule Value VAV102', 'topic': 'devices/PNNL/BUILDING1/AHU1/VAV102/all', 'type': 'Schedule Value'}, {'field': 'OccupancyMode', 'meta': {'type': 'float', 'tz': 'US/Pacific', 'units': 'None'}, 'name': 'ALWAYS ON', 'sim_topic': 'ALWAYS ON Schedule Value VAV118', 'topic': 'devices/PNNL/BUILDING1/AHU1/VAV118/all', 'type': 'Schedule Value'}, {'field': 'OccupancyMode', 'meta': {'type': 'float', 'tz': 'US/Pacific', 'units': 'None'}, 'name': 'ALWAYS ON', 'sim_topic': 'ALWAYS ON Schedule Value VAV119', 'topic': 'devices/PNNL/BUILDING1/AHU1/VAV119/all', 'type': 'Schedule Value'}, {'field': 'OccupancyMode', 'meta': {'type': 'float', 'tz': 'US/Pacific', 'units': 'None'}, 'name': 'ALWAYS ON', 'sim_topic': 'ALWAYS ON Schedule Value VAV120', 'topic': 'devices/PNNL/BUILDING1/AHU1/VAV120/all', 'type': 'Schedule Value'}, {'field': 'OccupancyMode', 'meta': {'type': 'float', 'tz': 'US/Pacific', 'units': 'None'}, 'name': 'ALWAYS ON', 'sim_topic': 'ALWAYS ON Schedule Value VAV123A', 'topic': 'devices/PNNL/BUILDING1/AHU1/VAV123A/all', 'type': 'Schedule Value'}, {'field': 'OccupancyMode', 'meta': {'type': 'float', 'tz': 'US/Pacific', 'units': 'None'}, 'name': 'ALWAYS ON', 'sim_topic': 'ALWAYS ON Schedule Value VAV123B', 'topic': 'devices/PNNL/BUILDING1/AHU1/VAV123B/all', 'type': 'Schedule Value'}, {'field': 'OccupancyMode', 'meta': {'type': 'float', 'tz': 'US/Pacific', 'units': 'None'}, 'name': 'ALWAYS ON', 'sim_topic': 'ALWAYS ON Schedule Value VAV127A', 'topic': 'devices/PNNL/BUILDING1/AHU1/VAV127A/all', 'type': 'Schedule Value'}, {'field': 'OccupancyMode', 'meta': {'type': 'float', 'tz': 'US/Pacific', 'units': 'None'}, 'name': 'ALWAYS ON', 'sim_topic': 'ALWAYS ON Schedule Value VAV127B', 'topic': 'devices/PNNL/BUILDING1/AHU1/VAV127B/all', 'type': 'Schedule Value'}, {'field': 'OccupancyMode', 'meta': {'type': 'float', 'tz': 'US/Pacific', 'units': 'None'}, 'name': 'ALWAYS ON', 'sim_topic': 'ALWAYS ON Schedule Value VAV129', 'topic': 'devices/PNNL/BUILDING1/AHU1/VAV129/all', 'type': 'Schedule Value'}, {'field': 'OccupancyMode', 'meta': {'type': 'float', 'tz': 'US/Pacific', 'units': 'None'}, 'name': 'ALWAYS ON', 'sim_topic': 'ALWAYS ON Schedule Value VAV131', 'topic': 'devices/PNNL/BUILDING1/AHU1/VAV131/all', 'type': 'Schedule Value'}, {'field': 'OccupancyMode', 'meta': {'type': 'float', 'tz': 'US/Pacific', 'units': 'None'}, 'name': 'ALWAYS ON', 'sim_topic': 'ALWAYS ON Schedule Value VAV136', 'topic': 'devices/PNNL/BUILDING1/AHU1/VAV136/all', 'type': 'Schedule Value'}, {'field': 'OccupancyMode', 'meta': {'type': 'float', 'tz': 'US/Pacific', 'units': 'None'}, 'name': 'ALWAYS ON', 'sim_topic': 'ALWAYS ON Schedule Value VAV133', 'topic': 'devices/PNNL/BUILDING1/AHU1/VAV133/all', 'type': 'Schedule Value'}, {'field': 'OccupancyMode', 'meta': {'type': 'float', 'tz': 'US/Pacific', 'units': 'None'}, 'name': 'ALWAYS ON', 'sim_topic': 'ALWAYS ON Schedule Value VAV142', 'topic': 'devices/PNNL/BUILDING1/AHU1/VAV142/all', 'type': 'Schedule Value'}, {'field': 'OccupancyMode', 'meta': {'type': 'float', 'tz': 'US/Pacific', 'units': 'None'}, 'name': 'ALWAYS ON', 'sim_topic': 'ALWAYS ON Schedule Value VAV143', 'topic': 'devices/PNNL/BUILDING1/AHU1/VAV143/all', 'type': 'Schedule Value'}, {'field': 'OccupancyMode', 'meta': {'type': 'float', 'tz': 'US/Pacific', 'units': 'None'}, 'name': 'ALWAYS ON', 'sim_topic': 'ALWAYS ON Schedule Value VAV150', 'topic': 'devices/PNNL/BUILDING1/AHU1/VAV150/all', 'type': 'Schedule Value'}, {'field': 'OccupancyMode', 'meta': {'type': 'float', 'tz': 'US/Pacific', 'units': 'None'}, 'name': 'ALWAYS ON', 'sim_topic': 'ALWAYS ON Schedule Value VAV104', 'topic': 'devices/PNNL/BUILDING1/AHU3/VAV104/all', 'type': 'Schedule Value'}, {'field': 'OccupancyMode', 'meta': {'type': 'float', 'tz': 'US/Pacific', 'units': 'None'}, 'name': 'ALWAYS ON', 'sim_topic': 'ALWAYS ON Schedule Value VAV105', 'topic': 'devices/PNNL/BUILDING1/AHU3/VAV105/all', 'type': 'Schedule Value'}, {'field': 'OccupancyMode', 'meta': {'type': 'float', 'tz': 'US/Pacific', 'units': 'None'}, 'name': 'ALWAYS ON', 'sim_topic': 'ALWAYS ON Schedule Value VAV107', 'topic': 'devices/PNNL/BUILDING1/AHU3/VAV107/all', 'type': 'Schedule Value'}, {'field': 'OccupancyMode', 'meta': {'type': 'float', 'tz': 'US/Pacific', 'units': 'None'}, 'name': 'ALWAYS ON', 'sim_topic': 'ALWAYS ON Schedule Value VAV108', 'topic': 'devices/PNNL/BUILDING1/AHU3/VAV108/all', 'type': 'Schedule Value'}, {'field': 'OccupancyMode', 'meta': {'type': 'float', 'tz': 'US/Pacific', 'units': 'None'}, 'name': 'ALWAYS ON', 'sim_topic': 'ALWAYS ON Schedule Value VAV112', 'topic': 'devices/PNNL/BUILDING1/AHU3/VAV112/all', 'type': 'Schedule Value'}, {'field': 'OccupancyMode', 'meta': {'type': 'float', 'tz': 'US/Pacific', 'units': 'None'}, 'name': 'ALWAYS ON', 'sim_topic': 'ALWAYS ON Schedule Value VAV116', 'topic': 'devices/PNNL/BUILDING1/AHU3/VAV116/all', 'type': 'Schedule Value'}, {'field': 'OccupancyMode', 'meta': {'type': 'boolean', 'tz': 'US/Pacific', 'units': 'None'}, 'name': 'ALWAYS ON', 'sim_topic': 'ALWAYS ON Schedule Value VAVRESTROOM', 'topic': 'devices/PNNL/BUILDING1/AHU1/VAV121/all', 'type': 'Schedule Value'}, {'field': 'SupplyFanPower', 'meta': {'type': 'float', 'tz': 'US/Pacific', 'units': 'WATTS'}, 'name': 'AHU-001 Fan', 'sim_topic': 'AHU 1 Power', 'topic': 'devices/PNNL/BUILDING1/AHU1/all', 'type': 'Fan Electric Power'}, {'field': 'SupplyFanPower', 'meta': {'type': 'float', 'tz': 'US/Pacific', 'units': 'WATTS'}, 'name': 'AHU-002 Fan', 'sim_topic': 'AHU 2 Power', 'topic': 'devices/PNNL/BUILDING1/AHU2/all', 'type': 'Fan Electric Power'}, {'field': 'SupplyFanPower', 'meta': {'type': 'float', 'tz': 'US/Pacific', 'units': 'WATTS'}, 'name': 'AHU-003 Fan', 'sim_topic': 'AHU 3 Power', 'topic': 'devices/PNNL/BUILDING1/AHU3/all', 'type': 'Fan Electric Power'}, {'field': 'SupplyFanPower', 'meta': {'type': 'float', 'tz': 'US/Pacific', 'units': 'WATTS'}, 'name': 'AHU-004 Fan', 'sim_topic': 'AHU 4 Power', 'topic': 'devices/PNNL/BUILDING1/AHU4/all', 'type': 'Fan Electric Power'}, {'field': 'Power', 'meta': {'type': 'float', 'tz': 'US/Pacific', 'units': 'WATTS'}, 'name': 'DISTRICT COOLING', 'sim_topic': 'DISTRICT COOLING District Cooling Rate', 'topic': 'devices/PNNL/BUILDING1/Chiller/all', 'type': 'District Cooling Rate'}, {'field': 'Power', 'meta': {'type': 'float', 'tz': 'US/Pacific', 'units': 'WATTS'}, 'name': 'Zone-VAV-102 Lights', 'sim_topic': 'Zone-VAV-102 Lights Electric Power', 'topic': 'devices/PNNL/BUILDING1/LIGHTING/ZONE102/all', 'type': 'Lights Electric Power'}, {'field': 'Power', 'meta': {'type': 'float', 'tz': 'US/Pacific', 'units': 'WATTS'}, 'name': 'Zone-VAV-104 Lights', 'sim_topic': 'Zone-VAV-104 Lights Electric Power', 'topic': 'devices/PNNL/BUILDING1/LIGHTING/ZONE104/all', 'type': 'Lights Electric Power'}, {'field': 'Power', 'meta': {'type': 'float', 'tz': 'US/Pacific', 'units': 'WATTS'}, 'name': 'Zone-VAV-105 Lights', 'sim_topic': 'Zone-VAV-105 Lights Electric Power', 'topic': 'devices/PNNL/BUILDING1/LIGHTING/ZONE105/all', 'type': 'Lights Electric Power'}, {'field': 'Power', 'meta': {'type': 'float', 'tz': 'US/Pacific', 'units': 'WATTS'}, 'name': 'Zone-VAV-107 Lights', 'sim_topic': 'Zone-VAV-107 Lights Electric Power', 'topic': 'devices/PNNL/BUILDING1/LIGHTING/ZONE107/all', 'type': 'Lights Electric Power'}, {'field': 'Power', 'meta': {'type': 'float', 'tz': 'US/Pacific', 'units': 'WATTS'}, 'name': 'Zone-VAV-108 Lights', 'sim_topic': 'Zone-VAV-108 Lights Electric Power', 'topic': 'devices/PNNL/BUILDING1/LIGHTING/ZONE108/all', 'type': 'Lights Electric Power'}, {'field': 'Power', 'meta': {'type': 'float', 'tz': 'US/Pacific', 'units': 'WATTS'}, 'name': 'Zone-VAV-112 Lights', 'sim_topic': 'Zone-VAV-112 Lights Electric Power', 'topic': 'devices/PNNL/BUILDING1/LIGHTING/ZONE112/all', 'type': 'Lights Electric Power'}, {'field': 'Power', 'meta': {'type': 'float', 'tz': 'US/Pacific', 'units': 'WATTS'}, 'name': 'Zone-VAV-116 Lights', 'sim_topic': 'Zone-VAV-116 Lights Electric Power', 'topic': 'devices/PNNL/BUILDING1/LIGHTING/ZONE116/all', 'type': 'Lights Electric Power'}, {'field': 'Power', 'meta': {'type': 'float', 'tz': 'US/Pacific', 'units': 'WATTS'}, 'name': 'Zone-VAV-118 Lights', 'sim_topic': 'Zone-VAV-118 Lights Electric Power', 'topic': 'devices/PNNL/BUILDING1/LIGHTING/ZONE118/all', 'type': 'Lights Electric Power'}, {'field': 'Power', 'meta': {'type': 'float', 'tz': 'US/Pacific', 'units': 'WATTS'}, 'name': 'Zone-VAV-119 Lights', 'sim_topic': 'Zone-VAV-119 Lights Electric Power', 'topic': 'devices/PNNL/BUILDING1/LIGHTING/ZONE119/all', 'type': 'Lights Electric Power'}, {'field': 'Power', 'meta': {'type': 'float', 'tz': 'US/Pacific', 'units': 'WATTS'}, 'name': 'Zone-VAV-120 Lights', 'sim_topic': 'Zone-VAV-120 Lights Electric Power', 'topic': 'devices/PNNL/BUILDING1/LIGHTING/ZONE120/all', 'type': 'Lights Electric Power'}, {'field': 'Power', 'meta': {'type': 'float', 'tz': 'US/Pacific', 'units': 'WATTS'}, 'name': 'Zone-VAV-123A Lights', 'sim_topic': 'Zone-VAV-123A Lights Electric Power', 'topic': 'devices/PNNL/BUILDING1/LIGHTING/ZONE123A/all', 'type': 'Lights Electric Power'}, {'field': 'Power', 'meta': {'type': 'float', 'tz': 'US/Pacific', 'units': 'WATTS'}, 'name': 'Zone-VAV-123B Lights', 'sim_topic': 'Zone-VAV-123B Lights Electric Power', 'topic': 'devices/PNNL/BUILDING1/LIGHTING/ZONE123B/all', 'type': 'Lights Electric Power'}, {'field': 'Power', 'meta': {'type': 'float', 'tz': 'US/Pacific', 'units': 'WATTS'}, 'name': 'Zone-VAV-127A Lights', 'sim_topic': 'Zone-VAV-127A Lights Electric Power', 'topic': 'devices/PNNL/BUILDING1/LIGHTING/ZONE127A/all', 'type': 'Lights Electric Power'}, {'field': 'Power', 'meta': {'type': 'float', 'tz': 'US/Pacific', 'units': 'WATTS'}, 'name': 'Zone-VAV-127B Lights', 'sim_topic': 'Zone-VAV-127B Lights Electric Power', 'topic': 'devices/PNNL/BUILDING1/LIGHTING/ZONE127B/all', 'type': 'Lights Electric Power'}, {'field': 'Power', 'meta': {'type': 'float', 'tz': 'US/Pacific', 'units': 'WATTS'}, 'name': 'Zone-VAV-129 Lights', 'sim_topic': 'Zone-VAV-129 Lights Electric Power', 'topic': 'devices/PNNL/BUILDING1/LIGHTING/ZONE129/all', 'type': 'Lights Electric Power'}, {'field': 'Power', 'meta': {'type': 'float', 'tz': 'US/Pacific', 'units': 'WATTS'}, 'name': 'Zone-VAV-131 Lights', 'sim_topic': 'Zone-VAV-131 Lights Electric Power', 'topic': 'devices/PNNL/BUILDING1/LIGHTING/ZONE131/all', 'type': 'Lights Electric Power'}, {'field': 'Power', 'meta': {'type': 'float', 'tz': 'US/Pacific', 'units': 'WATTS'}, 'name': 'Zone-VAV-133 Lights', 'sim_topic': 'Zone-VAV-133 Lights Electric Power', 'topic': 'devices/PNNL/BUILDING1/LIGHTING/ZONE133/all', 'type': 'Lights Electric Power'}, {'field': 'Power', 'meta': {'type': 'float', 'tz': 'US/Pacific', 'units': 'WATTS'}, 'name': 'Zone-VAV-136 Lights', 'sim_topic': 'Zone-VAV-136 Lights Electric Power', 'topic': 'devices/PNNL/BUILDING1/LIGHTING/ZONE136/all', 'type': 'Lights Electric Power'}, {'field': 'Power', 'meta': {'type': 'float', 'tz': 'US/Pacific', 'units': 'WATTS'}, 'name': 'Zone-VAV-142 Lights', 'sim_topic': 'Zone-VAV-142 Lights Electric Power', 'topic': 'devices/PNNL/BUILDING1/LIGHTING/ZONE142/all', 'type': 'Lights Electric Power'}, {'field': 'Power', 'meta': {'type': 'float', 'tz': 'US/Pacific', 'units': 'WATTS'}, 'name': 'Zone-VAV-143 Lights', 'sim_topic': 'Zone-VAV-143 Lights Electric Power', 'topic': 'devices/PNNL/BUILDING1/LIGHTING/ZONE143/all', 'type': 'Lights Electric Power'}, {'field': 'Power', 'meta': {'type': 'float', 'tz': 'US/Pacific', 'units': 'WATTS'}, 'name': 'Zone-VAV-150 Lights', 'sim_topic': 'Zone-VAV-150 Lights Electric Power', 'topic': 'devices/PNNL/BUILDING1/LIGHTING/ZONE150/all', 'type': 'Lights Electric Power'}, {'field': 'DimmingLevelOutput', 'meta': {'type': 'float', 'tz': 'US/Pacific', 'units': 'fraction'}, 'name': 'BLDG LIGHT SCH Zone-VAV-102', 'sim_topic': 'BLDG LIGHT SCH Zone-VAV-102', 'topic': 'devices/PNNL/BUILDING1/LIGHTING/ZONE102/all', 'type': 'Schedule Value'}, {'field': 'DimmingLevelOutput', 'meta': {'type': 'float', 'tz': 'US/Pacific', 'units': 'fraction'}, 'name': 'BLDG LIGHT SCH Zone-VAV-104', 'sim_topic': 'BLDG LIGHT SCH Zone-VAV-104', 'topic': 'devices/PNNL/BUILDING1/LIGHTING/ZONE104/all', 'type': 'Schedule Value'}, {'field': 'DimmingLevelOutput', 'meta': {'type': 'float', 'tz': 'US/Pacific', 'units': 'fraction'}, 'name': 'BLDG LIGHT SCH Zone-VAV-105', 'sim_topic': 'BLDG LIGHT SCH Zone-VAV-105', 'topic': 'devices/PNNL/BUILDING1/LIGHTING/ZONE105/all', 'type': 'Schedule Value'}, {'field': 'DimmingLevelOutput', 'meta': {'type': 'float', 'tz': 'US/Pacific', 'units': 'fraction'}, 'name': 'BLDG LIGHT SCH Zone-VAV-107', 'sim_topic': 'BLDG LIGHT SCH Zone-VAV-107', 'topic': 'devices/PNNL/BUILDING1/LIGHTING/ZONE107/all', 'type': 'Schedule Value'}, {'field': 'DimmingLevelOutput', 'meta': {'type': 'float', 'tz': 'US/Pacific', 'units': 'fraction'}, 'name': 'BLDG LIGHT SCH Zone-VAV-108', 'sim_topic': 'BLDG LIGHT SCH Zone-VAV-108', 'topic': 'devices/PNNL/BUILDING1/LIGHTING/ZONE108/all', 'type': 'Schedule Value'}, {'field': 'DimmingLevelOutput', 'meta': {'type': 'float', 'tz': 'US/Pacific', 'units': 'fraction'}, 'name': 'BLDG LIGHT SCH Zone-VAV-112', 'sim_topic': 'BLDG LIGHT SCH Zone-VAV-112', 'topic': 'devices/PNNL/BUILDING1/LIGHTING/ZONE112/all', 'type': 'Schedule Value'}, {'field': 'DimmingLevelOutput', 'meta': {'type': 'float', 'tz': 'US/Pacific', 'units': 'fraction'}, 'name': 'BLDG LIGHT SCH Zone-VAV-116', 'sim_topic': 'BLDG LIGHT SCH Zone-VAV-116', 'topic': 'devices/PNNL/BUILDING1/LIGHTING/ZONE116/all', 'type': 'Schedule Value'}, {'field': 'DimmingLevelOutput', 'meta': {'type': 'float', 'tz': 'US/Pacific', 'units': 'fraction'}, 'name': 'BLDG LIGHT SCH Zone-VAV-118', 'sim_topic': 'BLDG LIGHT SCH Zone-VAV-118', 'topic': 'devices/PNNL/BUILDING1/LIGHTING/ZONE118/all', 'type': 'Schedule Value'}, {'field': 'DimmingLevelOutput', 'meta': {'type': 'float', 'tz': 'US/Pacific', 'units': 'fraction'}, 'name': 'BLDG LIGHT SCH Zone-VAV-119', 'sim_topic': 'BLDG LIGHT SCH Zone-VAV-119', 'topic': 'devices/PNNL/BUILDING1/LIGHTING/ZONE119/all', 'type': 'Schedule Value'}, {'field': 'DimmingLevelOutput', 'meta': {'type': 'float', 'tz': 'US/Pacific', 'units': 'fraction'}, 'name': 'BLDG LIGHT SCH Zone-VAV-120', 'sim_topic': 'BLDG LIGHT SCH Zone-VAV-120', 'topic': 'devices/PNNL/BUILDING1/LIGHTING/ZONE120/all', 'type': 'Schedule Value'}, {'field': 'DimmingLevelOutput', 'meta': {'type': 'float', 'tz': 'US/Pacific', 'units': 'fraction'}, 'name': 'BLDG LIGHT SCH Zone-VAV-123A', 'sim_topic': 'BLDG LIGHT SCH Zone-VAV-123A', 'topic': 'devices/PNNL/BUILDING1/LIGHTING/ZONE123A/all', 'type': 'Schedule Value'}, {'field': 'DimmingLevelOutput', 'meta': {'type': 'float', 'tz': 'US/Pacific', 'units': 'fraction'}, 'name': 'BLDG LIGHT SCH Zone-VAV-123B', 'sim_topic': 'BLDG LIGHT SCH Zone-VAV-123B', 'topic': 'devices/PNNL/BUILDING1/LIGHTING/ZONE123B/all', 'type': 'Schedule Value'}, {'field': 'DimmingLevelOutput', 'meta': {'type': 'float', 'tz': 'US/Pacific', 'units': 'fraction'}, 'name': 'BLDG LIGHT SCH Zone-VAV-127A', 'sim_topic': 'BLDG LIGHT SCH Zone-VAV-127A', 'topic': 'devices/PNNL/BUILDING1/LIGHTING/ZONE127A/all', 'type': 'Schedule Value'}, {'field': 'DimmingLevelOutput', 'meta': {'type': 'float', 'tz': 'US/Pacific', 'units': 'fraction'}, 'name': 'BLDG LIGHT SCH Zone-VAV-127B', 'sim_topic': 'BLDG LIGHT SCH Zone-VAV-127B', 'topic': 'devices/PNNL/BUILDING1/LIGHTING/ZONE127B/all', 'type': 'Schedule Value'}, {'field': 'DimmingLevelOutput', 'meta': {'type': 'float', 'tz': 'US/Pacific', 'units': 'fraction'}, 'name': 'BLDG LIGHT SCH Zone-VAV-129', 'sim_topic': 'BLDG LIGHT SCH Zone-VAV-129', 'topic': 'devices/PNNL/BUILDING1/LIGHTING/ZONE129/all', 'type': 'Schedule Value'}, {'field': 'DimmingLevelOutput', 'meta': {'type': 'float', 'tz': 'US/Pacific', 'units': 'fraction'}, 'name': 'BLDG LIGHT SCH Zone-VAV-131', 'sim_topic': 'BLDG LIGHT SCH Zone-VAV-131', 'topic': 'devices/PNNL/BUILDING1/LIGHTING/ZONE131/all', 'type': 'Schedule Value'}, {'field': 'DimmingLevelOutput', 'meta': {'type': 'float', 'tz': 'US/Pacific', 'units': 'fraction'}, 'name': 'BLDG LIGHT SCH Zone-VAV-133', 'sim_topic': 'BLDG LIGHT SCH Zone-VAV-133', 'topic': 'devices/PNNL/BUILDING1/LIGHTING/ZONE133/all', 'type': 'Schedule Value'}, {'field': 'DimmingLevelOutput', 'meta': {'type': 'float', 'tz': 'US/Pacific', 'units': 'fraction'}, 'name': 'BLDG LIGHT SCH Zone-VAV-136', 'sim_topic': 'BLDG LIGHT SCH Zone-VAV-136', 'topic': 'devices/PNNL/BUILDING1/LIGHTING/ZONE136/all', 'type': 'Schedule Value'}, {'field': 'DimmingLevelOutput', 'meta': {'type': 'float', 'tz': 'US/Pacific', 'units': 'fraction'}, 'name': 'BLDG LIGHT SCH Zone-VAV-142', 'sim_topic': 'BLDG LIGHT SCH Zone-VAV-142', 'topic': 'devices/PNNL/BUILDING1/LIGHTING/ZONE142/all', 'type': 'Schedule Value'}, {'field': 'DimmingLevelOutput', 'meta': {'type': 'float', 'tz': 'US/Pacific', 'units': 'fraction'}, 'name': 'BLDG LIGHT SCH Zone-VAV-143', 'sim_topic': 'BLDG LIGHT SCH Zone-VAV-143', 'topic': 'devices/PNNL/BUILDING1/LIGHTING/ZONE143/all', 'type': 'Schedule Value'}, {'field': 'DimmingLevelOutput', 'meta': {'type': 'float', 'tz': 'US/Pacific', 'units': 'fraction'}, 'name': 'BLDG LIGHT SCH Zone-VAV-150', 'sim_topic': 'BLDG LIGHT SCH Zone-VAV-150', 'topic': 'devices/PNNL/BUILDING1/LIGHTING/ZONE150/all', 'type': 'Schedule Value'}, {'field': 'ZoneCoolingTemperatureSetPoint', 'meta': {'type': 'float', 'tz': 'US/Pacific', 'units': 'degreesCentigrade'}, 'name': 'CLGTEMPSETPOINT Zone-VAV-102', 'sim_topic': 'CLGTEMPSETPOINT Zone-VAV-102', 'topic': 'devices/PNNL/BUILDING1/AHU1/VAV102/all', 'type': 'Schedule Value'}, {'field': 'ZoneCoolingTemperatureSetPoint', 'meta': {'type': 'float', 'tz': 'US/Pacific', 'units': 'degreesCentigrade'}, 'name': 'CLGTEMPSETPOINT Zone-VAV-118', 'sim_topic': 'CLGTEMPSETPOINT Zone-VAV-118', 'topic': 'devices/PNNL/BUILDING1/AHU1/VAV118/all', 'type': 'Schedule Value'}, {'field': 'ZoneCoolingTemperatureSetPoint', 'meta': {'type': 'float', 'tz': 'US/Pacific', 'units': 'degreesCentigrade'}, 'name': 'CLGTEMPSETPOINT Zone-VAV-119', 'sim_topic': 'CLGTEMPSETPOINT Zone-VAV-119', 'topic': 'devices/PNNL/BUILDING1/AHU1/VAV119/all', 'type': 'Schedule Value'}, {'field': 'ZoneCoolingTemperatureSetPoint', 'meta': {'type': 'float', 'tz': 'US/Pacific', 'units': 'degreesCentigrade'}, 'name': 'CLGTEMPSETPOINT Zone-VAV-120', 'sim_topic': 'CLGTEMPSETPOINT Zone-VAV-120', 'topic': 'devices/PNNL/BUILDING1/AHU1/VAV120/all', 'type': 'Schedule Value'}, {'field': 'ZoneCoolingTemperatureSetPoint', 'meta': {'type': 'float', 'tz': 'US/Pacific', 'units': 'degreesCentigrade'}, 'name': 'CLGTEMPSETPOINT Zone-VAV-123A', 'sim_topic': 'CLGTEMPSETPOINT Zone-VAV-123A', 'topic': 'devices/PNNL/BUILDING1/AHU1/VAV123A/all', 'type': 'Schedule Value'}, {'field': 'ZoneCoolingTemperatureSetPoint', 'meta': {'type': 'float', 'tz': 'US/Pacific', 'units': 'degreesCentigrade'}, 'name': 'CLGTEMPSETPOINT Zone-VAV-123B', 'sim_topic': 'CLGTEMPSETPOINT Zone-VAV-123B', 'topic': 'devices/PNNL/BUILDING1/AHU1/VAV123B/all', 'type': 'Schedule Value'}, {'field': 'ZoneCoolingTemperatureSetPoint', 'meta': {'type': 'float', 'tz': 'US/Pacific', 'units': 'degreesCentigrade'}, 'name': 'CLGTEMPSETPOINT Zone-VAV-127A', 'sim_topic': 'CLGTEMPSETPOINT Zone-VAV-127A', 'topic': 'devices/PNNL/BUILDING1/AHU1/VAV127A/all', 'type': 'Schedule Value'}, {'field': 'ZoneCoolingTemperatureSetPoint', 'meta': {'type': 'float', 'tz': 'US/Pacific', 'units': 'degreesCentigrade'}, 'name': 'CLGTEMPSETPOINT Zone-VAV-127B', 'sim_topic': 'CLGTEMPSETPOINT Zone-VAV-127B', 'topic': 'devices/PNNL/BUILDING1/AHU1/VAV127B/all', 'type': 'Schedule Value'}, {'field': 'ZoneCoolingTemperatureSetPoint', 'meta': {'type': 'float', 'tz': 'US/Pacific', 'units': 'degreesCentigrade'}, 'name': 'CLGTEMPSETPOINT Zone-VAV-129', 'sim_topic': 'CLGTEMPSETPOINT Zone-VAV-129', 'topic': 'devices/PNNL/BUILDING1/AHU1/VAV129/all', 'type': 'Schedule Value'}, {'field': 'ZoneCoolingTemperatureSetPoint', 'meta': {'type': 'float', 'tz': 'US/Pacific', 'units': 'degreesCentigrade'}, 'name': 'CLGTEMPSETPOINT Zone-VAV-131', 'sim_topic': 'CLGTEMPSETPOINT Zone-VAV-131', 'topic': 'devices/PNNL/BUILDING1/AHU1/VAV131/all', 'type': 'Schedule Value'}, {'field': 'ZoneCoolingTemperatureSetPoint', 'meta': {'type': 'float', 'tz': 'US/Pacific', 'units': 'degreesCentigrade'}, 'name': 'CLGTEMPSETPOINT Zone-VAV-133', 'sim_topic': 'CLGTEMPSETPOINT Zone-VAV-133', 'topic': 'devices/PNNL/BUILDING1/AHU1/VAV133/all', 'type': 'Schedule Value'}, {'field': 'ZoneCoolingTemperatureSetPoint', 'meta': {'type': 'float', 'tz': 'US/Pacific', 'units': 'degreesCentigrade'}, 'name': 'CLGTEMPSETPOINT Zone-VAV-136', 'sim_topic': 'CLGTEMPSETPOINT Zone-VAV-136', 'topic': 'devices/PNNL/BUILDING1/AHU1/VAV136/all', 'type': 'Schedule Value'}, {'field': 'ZoneCoolingTemperatureSetPoint', 'meta': {'type': 'float', 'tz': 'US/Pacific', 'units': 'degreesCentigrade'}, 'name': 'CLGTEMPSETPOINT Zone-VAV-142', 'sim_topic': 'CLGTEMPSETPOINT Zone-VAV-142', 'topic': 'devices/PNNL/BUILDING1/AHU1/VAV142/all', 'type': 'Schedule Value'}, {'field': 'ZoneCoolingTemperatureSetPoint', 'meta': {'type': 'float', 'tz': 'US/Pacific', 'units': 'degreesCentigrade'}, 'name': 'CLGTEMPSETPOINT Zone-VAV-143', 'sim_topic': 'CLGTEMPSETPOINT Zone-VAV-143', 'topic': 'devices/PNNL/BUILDING1/AHU1/VAV143/all', 'type': 'Schedule Value'}, {'field': 'ZoneCoolingTemperatureSetPoint', 'meta': {'type': 'float', 'tz': 'US/Pacific', 'units': 'degreesCentigrade'}, 'name': 'CLGTEMPSETPOINT Zone-VAV-150', 'sim_topic': 'CLGTEMPSETPOINT Zone-VAV-150', 'topic': 'devices/PNNL/BUILDING1/AHU1/VAV150/all', 'type': 'Schedule Value'}, {'field': 'ZoneCoolingTemperatureSetPoint', 'meta': {'type': 'float', 'tz': 'US/Pacific', 'units': 'degreesCentigrade'}, 'name': 'CLGTEMPSETPOINT Zone-VAV-CORRIDOR', 'sim_topic': 'CLGTEMPSETPOINT Zone-VAV-100', 'topic': 'devices/PNNL/BUILDING1/AHU1/VAV100/all', 'type': 'Schedule Value'}, {'field': 'ZoneCoolingTemperatureSetPoint', 'meta': {'type': 'float', 'tz': 'US/Pacific', 'units': 'degreesCentigrade'}, 'name': 'CLGTEMPSETPOINT Zone-VAV-RESTROOM', 'sim_topic': 'CLGTEMPSETPOINT Zone-VAV-121', 'topic': 'devices/PNNL/BUILDING1/AHU1/VAV121/all', 'type': 'Schedule Value'}, {'field': 'ZoneCoolingTemperatureSetPoint', 'meta': {'type': 'float', 'tz': 'US/Pacific', 'units': 'degreesCentigrade'}, 'name': 'CLGTEMPSETPOINT Zone-VAV-104', 'sim_topic': 'CLGTEMPSETPOINT Zone-VAV-104', 'topic': 'devices/PNNL/BUILDING1/AHU3/VAV104/all', 'type': 'Schedule Value'}, {'field': 'ZoneCoolingTemperatureSetPoint', 'meta': {'type': 'float', 'tz': 'US/Pacific', 'units': 'degreesCentigrade'}, 'name': 'CLGTEMPSETPOINT Zone-VAV-105', 'sim_topic': 'CLGTEMPSETPOINT Zone-VAV-105', 'topic': 'devices/PNNL/BUILDING1/AHU3/VAV105/all', 'type': 'Schedule Value'}, {'field': 'ZoneCoolingTemperatureSetPoint', 'meta': {'type': 'float', 'tz': 'US/Pacific', 'units': 'degreesCentigrade'}, 'name': 'CLGTEMPSETPOINT Zone-VAV-107', 'sim_topic': 'CLGTEMPSETPOINT Zone-VAV-107', 'topic': 'devices/PNNL/BUILDING1/AHU3/VAV107/all', 'type': 'Schedule Value'}, {'field': 'ZoneCoolingTemperatureSetPoint', 'meta': {'type': 'float', 'tz': 'US/Pacific', 'units': 'degreesCentigrade'}, 'name': 'CLGTEMPSETPOINT Zone-VAV-108', 'sim_topic': 'CLGTEMPSETPOINT Zone-VAV-108', 'topic': 'devices/PNNL/BUILDING1/AHU3/VAV108/all', 'type': 'Schedule Value'}, {'field': 'ZoneCoolingTemperatureSetPoint', 'meta': {'type': 'float', 'tz': 'US/Pacific', 'units': 'degreesCentigrade'}, 'name': 'CLGTEMPSETPOINT Zone-VAV-112', 'sim_topic': 'CLGTEMPSETPOINT Zone-VAV-112', 'topic': 'devices/PNNL/BUILDING1/AHU3/VAV112/all', 'type': 'Schedule Value'}, {'field': 'ZoneCoolingTemperatureSetPoint', 'meta': {'type': 'float', 'tz': 'US/Pacific', 'units': 'degreesCentigrade'}, 'name': 'CLGTEMPSETPOINT Zone-VAV-116', 'sim_topic': 'CLGTEMPSETPOINT Zone-VAV-116', 'topic': 'devices/PNNL/BUILDING1/AHU3/VAV116/all', 'type': 'Schedule Value'}, {'field': 'ZoneCoolingTemperatureSetPoint', 'meta': {'type': 'float', 'tz': 'US/Pacific', 'units': 'degreesCentigrade'}, 'name': 'CLGTEMPSETPOINT Zone-AHU-002', 'sim_topic': 'CLGTEMPSETPOINT Zone-AHU-002', 'topic': 'devices/PNNL/BUILDING1/AHU2/all', 'type': 'Schedule Value'}, {'field': 'ZoneCoolingTemperatureSetPoint', 'meta': {'type': 'float', 'tz': 'US/Pacific', 'units': 'degreesCentigrade'}, 'name': 'CLGTEMPSETPOINT Zone-AHU-004', 'sim_topic': 'CLGTEMPSETPOINT Zone-AHU-004', 'topic': 'devices/PNNL/BUILDING1/AHU4/all', 'type': 'Schedule Value'}, {'field': 'WholeBuildingPower', 'meta': {'type': 'float', 'tz': 'US/Pacific', 'units': 'WATTS'}, 'name': 'Whole Building', 'publish_last': True, 'sim_topic': 'Whole Building Power', 'topic': 'devices/PNNL/BUILDING1/METERS/all', 'type': 'Facility Total Electric Demand Power'}, {'name': 'EMS', 'sim_topic': 'currentMonthV', 'type': 'currentMonthV'}, {'name': 'EMS', 'sim_topic': 'currentDayOfMonthV', 'type': 'currentDayOfMonthV'}, {'name': 'EMS', 'sim_topic': 'currentHourV', 'type': 'currentHourV'}, {'name': 'EMS', 'sim_topic': 'currentMinuteV', 'type': 'currentMinuteV'}, {'default': 'BLDG LIGHT SCH Zone-VAV-102', 'name': 'BLDG LIGHT SCH bak', 'sim_topic': 'Dynamic_default1', 'type': 'Schedule Value'}, {'default': 'BLDG LIGHT SCH Zone-VAV-104', 'name': 'BLDG LIGHT SCH bak', 'sim_topic': 'Dynamic_default2', 'type': 'Schedule Value'}, {'default': 'BLDG LIGHT SCH Zone-VAV-105', 'name': 'BLDG LIGHT SCH bak', 'sim_topic': 'Dynamic_default3', 'type': 'Schedule Value'}, {'default': 'BLDG LIGHT SCH Zone-VAV-107', 'name': 'BLDG LIGHT SCH bak', 'sim_topic': 'Dynamic_default4', 'type': 'Schedule Value'}, {'default': 'BLDG LIGHT SCH Zone-VAV-108', 'name': 'BLDG LIGHT SCH bak', 'sim_topic': 'Dynamic_default5', 'type': 'Schedule Value'}, {'default': 'BLDG LIGHT SCH Zone-VAV-112', 'name': 'BLDG LIGHT SCH bak', 'sim_topic': 'Dynamic_default6', 'type': 'Schedule Value'}, {'default': 'BLDG LIGHT SCH Zone-VAV-116', 'name': 'BLDG LIGHT SCH bak', 'sim_topic': 'Dynamic_default7', 'type': 'Schedule Value'}, {'default': 'BLDG LIGHT SCH Zone-VAV-118', 'name': 'BLDG LIGHT SCH bak', 'sim_topic': 'Dynamic_default8', 'type': 'Schedule Value'}, {'default': 'BLDG LIGHT SCH Zone-VAV-119', 'name': 'BLDG LIGHT SCH bak', 'sim_topic': 'Dynamic_default9', 'type': 'Schedule Value'}, {'default': 'BLDG LIGHT SCH Zone-VAV-120', 'name': 'BLDG LIGHT SCH bak', 'sim_topic': 'Dynamic_default10', 'type': 'Schedule Value'}, {'default': 'BLDG LIGHT SCH Zone-VAV-123A', 'name': 'BLDG LIGHT SCH bak', 'sim_topic': 'Dynamic_default11', 'type': 'Schedule Value'}, {'default': 'BLDG LIGHT SCH Zone-VAV-123B', 'name': 'BLDG LIGHT SCH bak', 'sim_topic': 'Dynamic_default12', 'type': 'Schedule Value'}, {'default': 'BLDG LIGHT SCH Zone-VAV-127A', 'name': 'BLDG LIGHT SCH bak', 'sim_topic': 'Dynamic_default13', 'type': 'Schedule Value'}, {'default': 'BLDG LIGHT SCH Zone-VAV-127B', 'name': 'BLDG LIGHT SCH bak', 'sim_topic': 'Dynamic_default14', 'type': 'Schedule Value'}, {'default': 'BLDG LIGHT SCH Zone-VAV-129', 'name': 'BLDG LIGHT SCH bak', 'sim_topic': 'Dynamic_default15', 'type': 'Schedule Value'}, {'default': 'BLDG LIGHT SCH Zone-VAV-131', 'name': 'BLDG LIGHT SCH bak', 'sim_topic': 'Dynamic_default16', 'type': 'Schedule Value'}, {'default': 'BLDG LIGHT SCH Zone-VAV-133', 'name': 'BLDG LIGHT SCH bak', 'sim_topic': 'Dynamic_default17', 'type': 'Schedule Value'}, {'default': 'BLDG LIGHT SCH Zone-VAV-142', 'name': 'BLDG LIGHT SCH bak', 'sim_topic': 'Dynamic_default18', 'type': 'Schedule Value'}, {'default': 'BLDG LIGHT SCH Zone-VAV-143', 'name': 'BLDG LIGHT SCH bak', 'sim_topic': 'Dynamic_default19', 'type': 'Schedule Value'}, {'default': 'BLDG LIGHT SCH Zone-VAV-150', 'name': 'BLDG LIGHT SCH bak', 'sim_topic': 'Dynamic_default20', 'type': 'Schedule Value'}, {'default': 'BLDG LIGHT SCH Zone-VAV-136', 'name': 'BLDG LIGHT SCH bak', 'sim_topic': 'Dynamic_default21', 'type': 'Schedule Value'}, {'default': 'CLGTEMPSETPOINT Zone-VAV-102', 'name': 'CLGTEMPSETPOINT', 'sim_topic': 'Dynamic_VAVdefault1', 'type': 'Schedule Value'}, {'default': 'CLGTEMPSETPOINT Zone-VAV-118', 'name': 'CLGTEMPSETPOINT', 'sim_topic': 'Dynamic_VAVdefault2', 'type': 'Schedule Value'}, {'default': 'CLGTEMPSETPOINT Zone-VAV-119', 'name': 'CLGTEMPSETPOINT', 'sim_topic': 'Dynamic_VAVdefault3', 'type': 'Schedule Value'}, {'default': 'CLGTEMPSETPOINT Zone-VAV-120', 'name': 'CLGTEMPSETPOINT', 'sim_topic': 'Dynamic_VAVdefault4', 'type': 'Schedule Value'}, {'default': 'CLGTEMPSETPOINT Zone-VAV-123A', 'name': 'CLGTEMPSETPOINT', 'sim_topic': 'Dynamic_VAVdefault5', 'type': 'Schedule Value'}, {'default': 'CLGTEMPSETPOINT Zone-VAV-123B', 'name': 'CLGTEMPSETPOINT', 'sim_topic': 'Dynamic_VAVdefault6', 'type': 'Schedule Value'}, {'default': 'CLGTEMPSETPOINT Zone-VAV-127A', 'name': 'CLGTEMPSETPOINT', 'sim_topic': 'Dynamic_VAVdefault7', 'type': 'Schedule Value'}, {'default': 'CLGTEMPSETPOINT Zone-VAV-127B', 'name': 'CLGTEMPSETPOINT', 'sim_topic': 'Dynamic_VAVdefault8', 'type': 'Schedule Value'}, {'default': 'CLGTEMPSETPOINT Zone-VAV-129', 'name': 'CLGTEMPSETPOINT', 'sim_topic': 'Dynamic_VAVdefault9', 'type': 'Schedule Value'}, {'default': 'CLGTEMPSETPOINT Zone-VAV-131', 'name': 'CLGTEMPSETPOINT', 'sim_topic': 'Dynamic_VAVdefault10', 'type': 'Schedule Value'}, {'default': 'CLGTEMPSETPOINT Zone-VAV-133', 'name': 'CLGTEMPSETPOINT', 'sim_topic': 'Dynamic_VAVdefault11', 'type': 'Schedule Value'}, {'default': 'CLGTEMPSETPOINT Zone-VAV-136', 'name': 'CLGTEMPSETPOINT', 'sim_topic': 'Dynamic_VAVdefault12', 'type': 'Schedule Value'}, {'default': 'CLGTEMPSETPOINT Zone-VAV-142', 'name': 'CLGTEMPSETPOINT', 'sim_topic': 'Dynamic_VAVdefault13', 'type': 'Schedule Value'}, {'default': 'CLGTEMPSETPOINT Zone-VAV-143', 'name': 'CLGTEMPSETPOINT', 'sim_topic': 'Dynamic_VAVdefault14', 'type': 'Schedule Value'}, {'default': 'CLGTEMPSETPOINT Zone-VAV-150', 'name': 'CLGTEMPSETPOINT', 'sim_topic': 'Dynamic_VAVdefault15', 'type': 'Schedule Value'}, {'default': 'CLGTEMPSETPOINT Zone-VAV-CORRIDOR', 'name': 'CLGTEMPSETPOINT', 'sim_topic': 'Dynamic_VAVdefault16', 'type': 'Schedule Value'}, {'default': 'CLGTEMPSETPOINT Zone-VAV-RESTROOM', 'name': 'CLGTEMPSETPOINT', 'sim_topic': 'Dynamic_VAVdefault17', 'type': 'Schedule Value'}, {'default': 'CLGTEMPSETPOINT Zone-VAV-104', 'name': 'CLGTEMPSETPOINT', 'sim_topic': 'Dynamic_VAVdefault18', 'type': 'Schedule Value'}, {'default': 'CLGTEMPSETPOINT Zone-VAV-105', 'name': 'CLGTEMPSETPOINT', 'sim_topic': 'Dynamic_VAVdefault19', 'type': 'Schedule Value'}, {'default': 'CLGTEMPSETPOINT Zone-VAV-107', 'name': 'CLGTEMPSETPOINT', 'sim_topic': 'Dynamic_VAVdefault20', 'type': 'Schedule Value'}, {'default': 'CLGTEMPSETPOINT Zone-VAV-108', 'name': 'CLGTEMPSETPOINT', 'sim_topic': 'Dynamic_VAVdefault21', 'type': 'Schedule Value'}, {'default': 'CLGTEMPSETPOINT Zone-VAV-112', 'name': 'CLGTEMPSETPOINT', 'sim_topic': 'Dynamic_VAVdefault22', 'type': 'Schedule Value'}, {'default': 'CLGTEMPSETPOINT Zone-VAV-116', 'name': 'CLGTEMPSETPOINT', 'sim_topic': 'Dynamic_VAVdefault23', 'type': 'Schedule Value'}, {'default': 'CLGTEMPSETPOINT Zone-AHU-002', 'name': 'CLGTEMPSETPOINT', 'sim_topic': 'Dynamic_VAVdefault24', 'type': 'Schedule Value'}, {'default': 'CLGTEMPSETPOINT Zone-AHU-004', 'name': 'CLGTEMPSETPOINT', 'sim_topic': 'Dynamic_VAVdefault25', 'type': 'Schedule Value'}, {'field': 'operation', 'name': 'HVACOperationSchd', 'sim_topic': 'operation', 'type': 'Schedule Value'}], 'properties': {'bcvtb_home': '~/git/sim_volttron/volttron/bcvtb', 'co_sim_timestep': 5, 'cosimulation_sync': False, 'endday': 5, 'endmonth': 8, 'identity': 'platform.actuator', 'model': '~/git/sim_volttron/volttron/eplus/BUILDING1.idf', 'real_time_periodic': False, 'realtime': False, 'size': 40960, 'startday': 1, 'startmonth': 8, 'timestep': 60, 'weather': '~/git/sim_volttron/volttron/eplus/USA_WA_Pasco-Tri.Cities.AP.727845_TMY3.epw'}} + 2020-08-17 09:40:02,513 (energyplusagent-0.1 3597) root DEBUG: Creating ZMQ Core platform.actuator + 2020-08-17 09:40:02,517 (energyplusagent-0.1 3597) __main__ DEBUG: vip_identity: platform.actuator + 2020-08-17 09:40:02,558 () volttron.platform.auth INFO: AUTH: After authenticate user id: platform.actuator, b'849b25e4-64af-42f5-8e19-74e57f22115e' + 2020-08-17 09:40:02,558 () volttron.platform.auth INFO: authentication success: userid=b'849b25e4-64af-42f5-8e19-74e57f22115e' domain='vip', address='localhost:1000:1000:3597', mechanism='CURVE', credentials=['QY8uyp4KryvnFzmOwCc3KYvUVKW5r6a1br124UNi9Sc'], user='platform.actuator' + 2020-08-17 09:40:02,563 (energyplusagent-0.1 3597) volttron.platform.vip.agent.core INFO: Connected to platform: router: 849b25e4-64af-42f5-8e19-74e57f22115e version: 1.0 identity: platform.actuator + 2020-08-17 09:40:02,563 (energyplusagent-0.1 3597) __main__ INFO: subscribed to PNNL/BUILDING1/AHU1/VAV102 + 2020-08-17 09:40:02,563 (energyplusagent-0.1 3597) __main__ INFO: subscribed to PNNL/BUILDING1/AHU1/VAV118 + 2020-08-17 09:40:02,563 (energyplusagent-0.1 3597) __main__ INFO: subscribed to PNNL/BUILDING1/AHU1/VAV119 + 2020-08-17 09:40:02,563 (energyplusagent-0.1 3597) __main__ INFO: subscribed to PNNL/BUILDING1/AHU1/VAV120 + 2020-08-17 09:40:02,563 (energyplusagent-0.1 3597) __main__ INFO: subscribed to PNNL/BUILDING1/AHU1/VAV123A + 2020-08-17 09:40:02,563 (energyplusagent-0.1 3597) __main__ INFO: subscribed to PNNL/BUILDING1/AHU1/VAV123B + 2020-08-17 09:40:02,564 (energyplusagent-0.1 3597) __main__ INFO: subscribed to PNNL/BUILDING1/AHU1/VAV127A + 2020-08-17 09:40:02,564 (energyplusagent-0.1 3597) __main__ INFO: subscribed to PNNL/BUILDING1/AHU1/VAV127B + 2020-08-17 09:40:02,564 (energyplusagent-0.1 3597) __main__ INFO: subscribed to PNNL/BUILDING1/AHU1/VAV129 + 2020-08-17 09:40:02,570 (energyplusagent-0.1 3597) integrations.energyplus_integration DEBUG: Bound to 36353 on '127.0.0.1' + 2020-08-17 09:40:02,571 (energyplusagent-0.1 3597) integrations.energyplus_integration DEBUG: Working in '/home/niddodi/git/sim_volttron/volttron/eplus' + 2020-08-17 09:40:02,572 (energyplusagent-0.1 3597) integrations.energyplus_integration DEBUG: Running: cd /home/niddodi/git/sim_volttron/volttron/eplus; export BCVTB_HOME=/home/niddodi/git/sim_volttron/volttron/bcvtb; energyplus -w /home/niddodi/git/sim_volttron/volttron/eplus/USA_WA_Pasco-Tri.Cities.AP.727845_TMY3.epw -r /home/niddodi/git/sim_volttron/volttron/eplus/BUILDING1.idf + 2020-08-17 09:40:02,834 (energyplusagent-0.1 3597) INFO: ['EnergyPlus Starting'] + 2020-08-17 09:40:02,837 (energyplusagent-0.1 3597) INFO: ['EnergyPlus, Version 8.5.0-c87e61b44b, YMD=2020.08.17 09:40'] + 2020-08-17 09:40:02,837 (energyplusagent-0.1 3597) INFO: ['Processing Data Dictionary'] + 2020-08-17 09:40:02,848 (energyplusagent-0.1 3597) integrations.energyplus_integration DEBUG: Starting socket server + 2020-08-17 09:40:02,848 (energyplusagent-0.1 3597) integrations.energyplus_integration DEBUG: server now listening + 2020-08-17 09:40:03,137 (energyplusagent-0.1 3597) INFO: ['Processing Input File'] + 2020-08-17 09:40:05,408 (energyplusagent-0.1 3597) INFO: ['Initializing Response Factors'] + 2020-08-17 09:40:05,408 (energyplusagent-0.1 3597) INFO: ['Calculating CTFs for "INT WALL", Construction # 1'] + 2020-08-17 09:40:05,408 (energyplusagent-0.1 3597) INFO: ['Calculating CTFs for "NONRES EXT WALL", Construction # 2'] + 2020-08-17 09:40:05,410 (energyplusagent-0.1 3597) INFO: ['Calculating CTFs for "NONRES EXT WALL", Construction # 2'] + 2020-08-17 09:40:05,412 (energyplusagent-0.1 3597) INFO: ['Calculating CTFs for "NONRES ROOF", Construction # 3'] + 2020-08-17 09:40:05,415 (energyplusagent-0.1 3597) INFO: ['Calculating CTFs for "NONRES ROOF", Construction # 3'] + 2020-08-17 09:40:05,418 (energyplusagent-0.1 3597) INFO: ['Calculating CTFs for "NONRES ROOF", Construction # 3'] + 2020-08-17 09:40:05,424 (energyplusagent-0.1 3597) INFO: ['Calculating CTFs for "NONRES ROOF", Construction # 3'] + 2020-08-17 09:40:05,427 (energyplusagent-0.1 3597) INFO: ['Calculating CTFs for "NONRES ROOF", Construction # 3'] + 2020-08-17 09:40:05,430 (energyplusagent-0.1 3597) INFO: ['Calculating CTFs for "NONRES ROOF", Construction # 3'] + 2020-08-17 09:40:05,434 (energyplusagent-0.1 3597) INFO: ['Calculating CTFs for "NONRES ROOF", Construction # 3'] + 2020-08-17 09:40:05,442 (energyplusagent-0.1 3597) INFO: ['Calculating CTFs for "NONRES ROOF", Construction # 3'] + 2020-08-17 09:40:05,449 (energyplusagent-0.1 3597) INFO: ['Calculating CTFs for "NONRES ROOF", Construction # 3'] + 2020-08-17 09:40:05,452 (energyplusagent-0.1 3597) INFO: ['Calculating CTFs for "NONRES ROOF", Construction # 3'] + 2020-08-17 09:40:05,456 (energyplusagent-0.1 3597) INFO: ['Calculating CTFs for "NONRES ROOF", Construction # 3'] + 2020-08-17 09:40:05,460 (energyplusagent-0.1 3597) INFO: ['Calculating CTFs for "NONRES ROOF", Construction # 3'] + 2020-08-17 09:40:05,464 (energyplusagent-0.1 3597) INFO: ['Calculating CTFs for "NONRES ROOF", Construction # 3'] + 2020-08-17 09:40:05,472 (energyplusagent-0.1 3597) INFO: ['Calculating CTFs for "NONRES ROOF", Construction # 3'] + 2020-08-17 09:40:05,475 (energyplusagent-0.1 3597) INFO: ['Calculating CTFs for "NONRES ROOF", Construction # 3'] + 2020-08-17 09:40:05,480 (energyplusagent-0.1 3597) INFO: ['Calculating CTFs for "NONRES ROOF", Construction # 3'] + 2020-08-17 09:40:05,484 (energyplusagent-0.1 3597) INFO: ['Calculating CTFs for "NONRES ROOF", Construction # 3'] + 2020-08-17 09:40:05,487 (energyplusagent-0.1 3597) INFO: ['Calculating CTFs for "NONRES ROOF", Construction # 3'] + 2020-08-17 09:40:05,491 (energyplusagent-0.1 3597) INFO: ['Calculating CTFs for "NONRES ROOF", Construction # 3'] + 2020-08-17 09:40:05,494 (energyplusagent-0.1 3597) INFO: ['Calculating CTFs for "NONRES ROOF", Construction # 3'] + 2020-08-17 09:40:05,504 (energyplusagent-0.1 3597) INFO: ['Calculating CTFs for "NONRES ROOF", Construction # 3'] + 2020-08-17 09:40:05,508 (energyplusagent-0.1 3597) INFO: ['Calculating CTFs for "NONRES ROOF", Construction # 3'] + 2020-08-17 09:40:05,511 (energyplusagent-0.1 3597) INFO: ['Calculating CTFs for "NONRES ROOF", Construction # 3'] + 2020-08-17 09:40:05,514 (energyplusagent-0.1 3597) INFO: ['Calculating CTFs for "NONRES ROOF", Construction # 3'] + 2020-08-17 09:40:05,517 (energyplusagent-0.1 3597) INFO: ['Calculating CTFs for "NONRES ROOF", Construction # 3'] + 2020-08-17 09:40:05,520 (energyplusagent-0.1 3597) INFO: ['Calculating CTFs for "NONRES ROOF", Construction # 3'] + 2020-08-17 09:40:05,523 (energyplusagent-0.1 3597) INFO: ['Calculating CTFs for "NONRES ROOF", Construction # 3'] + 2020-08-17 09:40:05,527 (energyplusagent-0.1 3597) INFO: ['Calculating CTFs for "NONRES ROOF", Construction # 3'] + 2020-08-17 09:40:05,537 (energyplusagent-0.1 3597) INFO: ['Calculating CTFs for "NONRES ROOF", Construction # 3'] + 2020-08-17 09:40:05,542 (energyplusagent-0.1 3597) INFO: ['Calculating CTFs for "NONRES ROOF", Construction # 3'] + 2020-08-17 09:40:05,545 (energyplusagent-0.1 3597) INFO: ['Calculating CTFs for "NONRES ROOF", Construction # 3'] + 2020-08-17 09:40:05,548 (energyplusagent-0.1 3597) INFO: ['Calculating CTFs for "NONRES ROOF", Construction # 3'] + 2020-08-17 09:40:05,551 (energyplusagent-0.1 3597) INFO: ['Calculating CTFs for "NONRES ROOF", Construction # 3'] + 2020-08-17 09:40:05,554 (energyplusagent-0.1 3597) INFO: ['Calculating CTFs for "NONRES ROOF", Construction # 3'] + 2020-08-17 09:40:05,558 (energyplusagent-0.1 3597) INFO: ['Calculating CTFs for "NONRES ROOF", Construction # 3'] + 2020-08-17 09:40:05,567 (energyplusagent-0.1 3597) INFO: ['Calculating CTFs for "NONRES ROOF", Construction # 3'] + 2020-08-17 09:40:05,573 (energyplusagent-0.1 3597) INFO: ['Calculating CTFs for "NONRES ROOF", Construction # 3'] + 2020-08-17 09:40:05,576 (energyplusagent-0.1 3597) INFO: ['Calculating CTFs for "NONRES ROOF", Construction # 3'] + 2020-08-17 09:40:05,579 (energyplusagent-0.1 3597) INFO: ['Calculating CTFs for "NONRES ROOF", Construction # 3'] + 2020-08-17 09:40:05,583 (energyplusagent-0.1 3597) INFO: ['Calculating CTFs for "NONRES ROOF", Construction # 3'] + 2020-08-17 09:40:05,586 (energyplusagent-0.1 3597) INFO: ['Calculating CTFs for "NONRES ROOF", Construction # 3'] + 2020-08-17 09:40:05,589 (energyplusagent-0.1 3597) INFO: ['Calculating CTFs for "NONRES ROOF", Construction # 3'] + 2020-08-17 09:40:05,598 (energyplusagent-0.1 3597) INFO: ['Calculating CTFs for "NONRES ROOF", Construction # 3'] + 2020-08-17 09:40:05,603 (energyplusagent-0.1 3597) INFO: ['Calculating CTFs for "NONRES ROOF", Construction # 3'] + 2020-08-17 09:40:05,607 (energyplusagent-0.1 3597) INFO: ['Calculating CTFs for "NONRES ROOF", Construction # 3'] + 2020-08-17 09:40:05,610 (energyplusagent-0.1 3597) INFO: ['Calculating CTFs for "NONRES ROOF", Construction # 3'] + 2020-08-17 09:40:05,613 (energyplusagent-0.1 3597) INFO: ['Calculating CTFs for "NONRES ROOF", Construction # 3'] + 2020-08-17 09:40:05,616 (energyplusagent-0.1 3597) INFO: ['Calculating CTFs for "NONRES ROOF", Construction # 3'] + 2020-08-17 09:40:05,619 (energyplusagent-0.1 3597) INFO: ['Calculating CTFs for "NONRES ROOF", Construction # 3'] + 2020-08-17 09:40:05,628 (energyplusagent-0.1 3597) INFO: ['Calculating CTFs for "NONRES ROOF", Construction # 3'] + 2020-08-17 09:40:05,632 (energyplusagent-0.1 3597) INFO: ['Calculating CTFs for "NONRES FLOOR", Construction # 4'] + 2020-08-17 09:40:05,637 (energyplusagent-0.1 3597) INFO: ['Calculating CTFs for "INTERIORFURNISHINGS", Construction # 5'] + 2020-08-17 09:40:05,638 (energyplusagent-0.1 3597) INFO: ['Initializing Window Optical Properties'] + 2020-08-17 09:40:05,640 (energyplusagent-0.1 3597) INFO: ['Initializing Solar Calculations'] + 2020-08-17 09:40:05,640 (energyplusagent-0.1 3597) INFO: ['Allocate Solar Module Arrays'] + 2020-08-17 09:40:05,707 (energyplusagent-0.1 3597) INFO: ['Initializing Zone Report Variables'] + 2020-08-17 09:40:05,708 (energyplusagent-0.1 3597) INFO: ['Initializing Surface (Shading) Report Variables'] + 2020-08-17 09:40:05,717 (energyplusagent-0.1 3597) INFO: ['Computing Interior Solar Absorption Factors'] + 2020-08-17 09:40:05,717 (energyplusagent-0.1 3597) INFO: ['Determining Shadowing Combinations'] + 2020-08-17 09:40:05,730 (energyplusagent-0.1 3597) INFO: ['Computing Window Shade Absorption Factors'] + 2020-08-17 09:40:05,730 (energyplusagent-0.1 3597) INFO: ['Proceeding with Initializing Solar Calculations'] + 2020-08-17 09:40:05,750 (energyplusagent-0.1 3597) INFO: ['Initializing Surfaces'] + 2020-08-17 09:40:05,750 (energyplusagent-0.1 3597) INFO: ['Initializing Outdoor environment for Surfaces'] + 2020-08-17 09:40:05,751 (energyplusagent-0.1 3597) INFO: ['Setting up Surface Reporting Variables'] + 2020-08-17 09:40:05,787 (energyplusagent-0.1 3597) INFO: ['Initializing Temperature and Flux Histories'] + 2020-08-17 09:40:05,788 (energyplusagent-0.1 3597) INFO: ['Initializing Window Shading'] + 2020-08-17 09:40:05,788 (energyplusagent-0.1 3597) INFO: ['Computing Interior Absorption Factors'] + 2020-08-17 09:40:05,788 (energyplusagent-0.1 3597) INFO: ['Computing Interior Diffuse Solar Absorption Factors'] + 2020-08-17 09:40:05,788 (energyplusagent-0.1 3597) INFO: ['Computing Interior Diffuse Solar Exchange through Interzone Windows'] + 2020-08-17 09:40:05,790 (energyplusagent-0.1 3597) INFO: ['Initializing Solar Heat Gains'] + 2020-08-17 09:40:05,790 (energyplusagent-0.1 3597) INFO: ['Initializing Internal Heat Gains'] + 2020-08-17 09:40:05,791 (energyplusagent-0.1 3597) INFO: ['Initializing Interior Solar Distribution'] + 2020-08-17 09:40:05,791 (energyplusagent-0.1 3597) INFO: ['Initializing Interior Convection Coefficients'] + 2020-08-17 09:40:05,803 (energyplusagent-0.1 3597) INFO: ['Gathering Information for Predefined Reporting'] + 2020-08-17 09:40:05,811 (energyplusagent-0.1 3597) INFO: ['Completed Initializing Surface Heat Balance'] + 2020-08-17 09:40:05,811 (energyplusagent-0.1 3597) INFO: ['Calculate Outside Surface Heat Balance'] + 2020-08-17 09:40:05,811 (energyplusagent-0.1 3597) INFO: ['Calculate Inside Surface Heat Balance'] + 2020-08-17 09:40:05,813 (energyplusagent-0.1 3597) INFO: ['Calculate Air Heat Balance'] + 2020-08-17 09:40:05,816 (energyplusagent-0.1 3597) INFO: ['Initializing HVAC'] + 2020-08-17 09:40:05,883 (energyplusagent-0.1 3597) INFO: ['Warming up'] + 2020-08-17 09:40:06,725 (energyplusagent-0.1 3597) INFO: ['Warming up'] + 2020-08-17 09:40:07,474 (energyplusagent-0.1 3597) INFO: ['Warming up'] + 2020-08-17 09:40:08,201 (energyplusagent-0.1 3597) INFO: ['Warming up'] + 2020-08-17 09:40:08,989 (energyplusagent-0.1 3597) INFO: ['Warming up'] + 2020-08-17 09:40:09,749 (energyplusagent-0.1 3597) INFO: ['Warming up'] + 2020-08-17 09:40:10,468 (energyplusagent-0.1 3597) INFO: ['Warming up'] + 2020-08-17 09:40:11,196 (energyplusagent-0.1 3597) INFO: ['Warming up'] + 2020-08-17 09:40:11,904 (energyplusagent-0.1 3597) INFO: ['Warming up'] + 2020-08-17 09:40:12,610 (energyplusagent-0.1 3597) INFO: ['Warming up'] + 2020-08-17 09:40:13,330 (energyplusagent-0.1 3597) INFO: ['Warming up'] + 2020-08-17 09:40:14,056 (energyplusagent-0.1 3597) INFO: ['Warming up'] + 2020-08-17 09:40:14,843 (energyplusagent-0.1 3597) INFO: ['Performing Zone Sizing Simulation'] + 2020-08-17 09:40:14,844 (energyplusagent-0.1 3597) INFO: ['...for Sizing Period: #1 PASCO ANN HTG 99.6% CONDNS DB'] + 2020-08-17 09:40:21,657 (energyplusagent-0.1 3597) INFO: ['Warming up'] + 2020-08-17 09:40:22,657 (energyplusagent-0.1 3597) INFO: ['Warming up'] + 2020-08-17 09:40:23,583 (energyplusagent-0.1 3597) INFO: ['Warming up'] + 2020-08-17 09:40:24,658 (energyplusagent-0.1 3597) INFO: ['Warming up'] + 2020-08-17 09:40:25,549 (energyplusagent-0.1 3597) INFO: ['Warming up'] + 2020-08-17 09:40:26,747 (energyplusagent-0.1 3597) INFO: ['Warming up'] + 2020-08-17 09:40:28,383 (energyplusagent-0.1 3597) INFO: ['Performing Zone Sizing Simulation'] + 2020-08-17 09:40:28,383 (energyplusagent-0.1 3597) INFO: ['...for Sizing Period: #2 PASCO ANN CLG .4% CONDNS DB=>MWB'] + 2020-08-17 09:40:31,930 (energyplusagent-0.1 3597) INFO: ['Calculating System sizing'] + 2020-08-17 09:40:31,931 (energyplusagent-0.1 3597) INFO: ['...for Sizing Period: #1 PASCO ANN HTG 99.6% CONDNS DB'] + 2020-08-17 09:40:31,936 (energyplusagent-0.1 3597) INFO: ['Calculating System sizing'] + 2020-08-17 09:40:31,936 (energyplusagent-0.1 3597) INFO: ['...for Sizing Period: #2 PASCO ANN CLG .4% CONDNS DB=>MWB'] + 2020-08-17 09:40:32,578 (energyplusagent-0.1 3597) INFO: ['Initializing Simulation'] + 2020-08-17 09:40:32,735 (energyplusagent-0.1 3597) INFO: ['Reporting Surfaces'] + 2020-08-17 09:40:32,857 (energyplusagent-0.1 3597) INFO: ['Beginning Primary Simulation'] + 2020-08-17 09:40:32,861 (energyplusagent-0.1 3597) INFO: ['Initializing New Environment Parameters'] + 2020-08-17 09:40:32,861 (energyplusagent-0.1 3597) INFO: ['Warming up {1}'] + 2020-08-17 09:40:32,861 (energyplusagent-0.1 3597) INFO: ['Instantiating Building Controls Virtual Test Bed'] + 2020-08-17 09:40:32,862 (energyplusagent-0.1 3597) INFO: ['ExternalInterface initializes.'] + 2020-08-17 09:40:32,862 (energyplusagent-0.1 3597) integrations.energyplus_integration DEBUG: Connected with 127.0.0.1:49342 + 2020-08-17 09:40:33,293 (energyplusagent-0.1 3597) INFO: ['Number of outputs in ExternalInterface = 269'] + 2020-08-17 09:40:33,293 (energyplusagent-0.1 3597) INFO: ['Number of inputs in ExternalInterface = 46'] + 2020-08-17 09:40:35,732 (energyplusagent-0.1 3597) INFO: ['Warming up {2}'] + 2020-08-17 09:40:37,935 (energyplusagent-0.1 3597) INFO: ['Warming up {3}'] + 2020-08-17 09:40:42,864 (energyplusagent-0.1 3597) INFO: ['Warming up {4}'] + 2020-08-17 09:40:45,773 (energyplusagent-0.1 3597) INFO: ['Warming up {5}'] + 2020-08-17 09:40:47,844 (energyplusagent-0.1 3597) INFO: ['Warming up {6}'] + 2020-08-17 09:40:49,631 (energyplusagent-0.1 3597) INFO: ['Starting Simulation at 08/01 for TYPICAL SUMMER WEEK'] + 2020-08-17 09:40:49,631 (energyplusagent-0.1 3597) INFO: ['ExternalInterface starts first data exchange.'] + + ```` + diff --git a/services/core/MasterDriverAgent/master_driver/interfaces/chargepoint/tests/TestAgent/tester/__init__.py b/examples/EnergyPlusAgent/energyplus/__init__.py similarity index 100% rename from services/core/MasterDriverAgent/master_driver/interfaces/chargepoint/tests/TestAgent/tester/__init__.py rename to examples/EnergyPlusAgent/energyplus/__init__.py diff --git a/examples/EnergyPlusAgent/energyplus/agent.py b/examples/EnergyPlusAgent/energyplus/agent.py new file mode 100644 index 0000000000..3535fd6ac8 --- /dev/null +++ b/examples/EnergyPlusAgent/energyplus/agent.py @@ -0,0 +1,618 @@ +# -*- coding: utf-8 -*- {{{ +# vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et: +# +# Copyright 2020, Battelle Memorial Institute. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# This material was prepared as an account of work sponsored by an agency of +# the United States Government. Neither the United States Government nor the +# United States Department of Energy, nor Battelle, nor any of their +# employees, nor any jurisdiction or organization that has cooperated in the +# development of these materials, makes any warranty, express or +# implied, or assumes any legal liability or responsibility for the accuracy, +# completeness, or usefulness or any information, apparatus, product, +# software, or process disclosed, or represents that its use would not infringe +# privately owned rights. Reference herein to any specific commercial product, +# process, or service by trade name, trademark, manufacturer, or otherwise +# does not necessarily constitute or imply its endorsement, recommendation, or +# favoring by the United States Government or any agency thereof, or +# Battelle Memorial Institute. The views and opinions of authors expressed +# herein do not necessarily state or reflect those of the +# United States Government or any agency thereof. +# +# PACIFIC NORTHWEST NATIONAL LABORATORY operated by +# BATTELLE for the UNITED STATES DEPARTMENT OF ENERGY +# under Contract DE-AC05-76RL01830 +# }}} + + +__docformat__ = 'reStructuredText' + +import gevent +import logging +import sys +import collections +from datetime import datetime + +from volttron.platform.agent import utils +from volttron.platform.vip.agent import Agent, Core, RPC +from integrations.energyplus_integration import EnergyPlusSimIntegration +from volttron.platform.messaging import headers as headers_mod +from datetime import timedelta as td +from math import modf + +_log = logging.getLogger(__name__) +utils.setup_logging() +__version__ = "0.1" + +SUCCESS = 'SUCCESS' +FAILURE = 'FAILURE' + + +def energyplus_example(config_path, **kwargs): + """Parses the Agent configuration and returns an instance of + the agent created using that configuration. + + :param config_path: Path to a configuration file. + + :type config_path: str + :returns: EnergyPlusAgent + :rtype: EnergyPlusAgent + """ + _log.debug("CONFIG PATH: {}".format(config_path)) + try: + config = utils.load_config(config_path) + except Exception: + config = {} + #_log.debug("CONFIG: {}".format(config)) + if not config: + _log.info("Using Agent defaults for starting configuration.") + + return EnergyPlusAgent(config, **kwargs) + + +class EnergyPlusAgent(Agent): + def __init__(self, config, **kwargs): + super(EnergyPlusAgent, self).__init__(enable_store=False, **kwargs) + self.config = config + self.inputs = [] + self.outputs = [] + self.cosimulation_advance = None + self._now = None + self.num_of_pub = None + self.tns_actuate = None + self.rt_periodic = None + self.EnergyPlus_sim = EnergyPlusSimIntegration(self.config, self.vip.pubsub, self.core) + _log.debug("vip_identity: " + self.core.identity) + + @Core.receiver('onsetup') + def setup(self, sender, **kwargs): + if 'outputs' in self.config: + self.outputs = self.config['outputs'] + + self.cosimulation_advance = self.config.get('cosimulation_advance', None) + self._now = datetime.utcnow() + self.num_of_pub = 0 + + @Core.receiver('onstart') + def start(self, sender, **kwargs): + """ + Subscribe to VOLTTRON topics on VOLTTRON message bus. + Register config parameters with EnergyPlus + Start EnergyPlus simulation. + """ + # Exit if EnergyPlus isn't installed in the current environment. + if not self.EnergyPlus_sim.is_sim_installed(): + _log.error("EnergyPlus is unavailable please install it before running this agent.") + self.core.stop() + return + + # Register the config and output callback with EnergyPlus + self.EnergyPlus_sim.register_inputs(self.config, self.do_work) + + # Pick out VOLTTRON topics and subscribe to VOLTTRON message bus + self.subscribe() + self.clear_last_update() + + if self.cosimulation_advance is not None: + self.vip.pubsub.subscribe(peer='pubsub', + prefix=self.cosimulation_advance, + callback=self.advance_simulation) + # Start EnergyPlus simulation + self.EnergyPlus_sim.start_simulation() + + def subscribe(self): + """ + Subscribe to VOLTTRON topics + :return: + """ + for obj in self.EnergyPlus_sim.inputs: + + topic = obj.get('topic', None) + if topic is not None: + callback = self.on_match_topic + _log.info('subscribed to ' + topic) + self.vip.pubsub.subscribe(peer='pubsub', prefix=topic, callback=callback) + + def publish_all_outputs(self): + """ + Pull out relevant fields from EnergyPlus output message + and publish on VOLTTRON message bus using corresponding topic + :param args: + :return: + """ + _now = self._create_simulation_datetime() + _log.info(f"Publish the building response for timestamp: {_now}.") + + headers = {headers_mod.DATE: _now, headers_mod.TIMESTAMP: _now} + topics = collections.OrderedDict() + + for obj in self.outputs: + if 'topic' in obj and 'value' in obj: + topic = obj.get('topic', None) + value = obj.get('value', None) + field = obj.get('field', None) + metadata = obj.get('meta', {}) + if topic is not None and value is not None: + if topic not in topics: + topics[topic] = {'values': None, 'fields': None} + if field is not None: + if topics[topic]['fields'] is None: + topics[topic]['fields'] = [{}, {}] + topics[topic]['fields'][0][field] = value + topics[topic]['fields'][1][field] = metadata + else: + if topics[topic]['values'] is None: + topics[topic]['values'] = [] + topics[topic]['values'].append([value, metadata]) + + for topic, obj in topics.items(): + if obj['values'] is not None: + for value in obj['values']: + out = value + _log.info('Sending: ' + topic + ' ' + str(out)) + self.vip.pubsub.publish('pubsub', topic, headers, out).get() + if obj['fields'] is not None: + out = obj['fields'] + _log.info(f"Sending: {topic} {out}") + while True: + try: + self.vip.pubsub.publish('pubsub', topic, headers, out).get() + except: + _log.debug("Again ERROR: retrying publish") + gevent.sleep(0.1) + continue + break + self.num_of_pub += 1 + + def _create_simulation_datetime(self): + """ + Build simulation datetime + :return: + """ + self._now = self._now + td(minutes=1) + + if self.EnergyPlus_sim.month is None or \ + self.EnergyPlus_sim.day is None or \ + self.EnergyPlus_sim.minute is None or \ + self.EnergyPlus_sim.hour is None: + _now = self._now + else: + if self.num_of_pub >= 1: + if abs(self.EnergyPlus_sim.minute - 60.0) < 0.5: + self.EnergyPlus_sim.hour += 1.0 + self.EnergyPlus_sim.minute = 0.0 + if abs(self.EnergyPlus_sim.hour - 24.0) < 0.5: + self.EnergyPlus_sim.hour = 0.0 + self.EnergyPlus_sim.day += 1.0 + else: + self.EnergyPlus_sim.hour = 0.0 + self.EnergyPlus_sim.minute = 0.0 + second, minute = modf(self.EnergyPlus_sim.minute) + self.EnergyPlus_sim.second = int(second * 60.0) + self.EnergyPlus_sim.minute = int(minute) + date_string = '2017-' + str(self.EnergyPlus_sim.month).replace('.0', '') + \ + '-' + str(self.EnergyPlus_sim.day).replace('.0', '') + ' ' + \ + str(self.EnergyPlus_sim.hour).replace('.0', '') + ':' + \ + str(self.EnergyPlus_sim.minute) + ':' + \ + str(self.EnergyPlus_sim.second) + _now = datetime.strptime(date_string, "%Y-%m-%d %H:%M:%S") + _now = _now.isoformat(' ') + 'Z' + return _now + + def on_match_topic(self, peer, sender, bus, topic, headers, message): + """ + Callback to capture VOLTTRON messages + :param peer: 'pubsub' + :param sender: sender identity + :param bus: + :param topic: topic for the message + :param headers: message header + :param message: actual message + :return: + """ + msg = message if type(message) == type([]) else [message] + _log.info(f"Received: {topic} {msg}") + self.update_topic(topic, headers, msg) + + def update_topic(self, topic, headers, message): + """ + :param topic: topic for the message + :param headers: message header + :param message: actual message + :return: + """ + objs = self.get_inputs_from_topic(topic) + if objs is None: + return + for obj in objs: + value = message[0] + if type(value) is dict and 'field' in obj and obj.get('field') in value: + value = value.get(obj.get('field')) + obj['value'] = value + obj['message'] = message[0] + obj['message_meta'] = message[1] + obj['last_update'] = headers.get(headers_mod.DATE, datetime.utcnow().isoformat(' ') + 'Z') + self.send_on_all_inputs_updated() + + def send_on_all_inputs_updated(self): + """ + Check if all input messages have been updated and then send to EnergyPlus + :return: + """ + if self.all_topics_updated(): + self.clear_last_update() + self.EnergyPlus_sim.send_eplus_msg() + + def all_topics_updated(self): + """ + Check if all input messages have been updated + :return: + """ + for obj in self.EnergyPlus_sim.inputs: + if 'topic' in obj: + last_update = obj.get('last_update', None) + if last_update is None: + return False + return True + + def clear_last_update(self): + """ + Clear 'last_update' flag + :return: + """ + for obj in self.EnergyPlus_sim.inputs: + if 'topic' in obj: + obj['last_update'] = None + + def get_inputs_from_topic(self, topic): + """ + Find all input objects that best match the topic + :param topic: topic to match + :return: + """ + objs = [] + for obj in self.EnergyPlus_sim.inputs: + _log.debug("EPLUS: get_inputs_from_topic: {}".format(obj)) + if obj.get('topic') == topic: + objs.append(obj) + topic = "/".join(["devices", topic, "all"]) + for obj in self.outputs: + if obj.get('topic') == topic: + objs.append(obj) + if len(objs): + return objs + return None + + def find_best_match(self, topic): + """ + Find all input objects that best match the topic + :param topic: topic to match + :return: + """ + topic = topic.strip('/') + device_name, point_name = topic.rsplit('/', 1) + objs = self.get_inputs_from_topic(device_name) + + if objs is not None: + for obj in objs: + # we have matches to the , + # so get the first one has a field matching + if obj.get('field', None) == point_name: + return obj + objs = self.get_inputs_from_topic(topic) + if objs is not None and len(objs): # we have exact matches to the topic + return objs[0] + return None + + def do_work(self): + """ + Agent callback to receive EnergyPlus outputs + - Publish all outputs on VOLTTRON message bus + - Periodically advance simulation by sending and receiving messages to EnergyPlus + :return: + """ + self.outputs = self.EnergyPlus_sim.outputs + if self.EnergyPlus_sim.sim_flag != '1': + self.publish_all_outputs() + if self.EnergyPlus_sim.cosimulation_sync: + self.check_advance() + if self.EnergyPlus_sim.real_time_periodic and self.rt_periodic is None: + _log.debug("do_work: self.EnergyPlus_sim.timestep: {}".format(self.EnergyPlus_sim.timestep)) + self.EnergyPlus_sim.timestep = 60. / (self.EnergyPlus_sim.timestep * self.EnergyPlus_sim.time_scale) * 60. + _log.debug("do_work: self.EnergyPlus_sim.timestep: {}".format(self.EnergyPlus_sim.timestep)) + self.rt_periodic = self.core.periodic(self.EnergyPlus_sim.timestep, + self.run_periodic, + wait=self.EnergyPlus_sim.timestep) + + def check_advance(self): + if self.EnergyPlus_sim.real_time_periodic: + return + timestep = int(60 / self.EnergyPlus_sim.timestep) + + if not self.EnergyPlus_sim.real_time_flag: + self.cosim_sync_counter += timestep + if self.cosim_sync_counter < self.EnergyPlus_sim.co_sim_timestep: + self.advance_simulation(None, None, None, None, None, None) + else: + self.cosim_sync_counter = 0 + self.vip.pubsub.publish('pubsub', + self.tns_actuate, + headers={}, + message={}).get(timeout=10) + else: + if self.EnergyPlus_sim.hour > self.EnergyPlus_sim.currenthour or self.EnergyPlus_sim.passtime: + self.EnergyPlus_sim.passtime = True + self.cosim_sync_counter += timestep + if self.cosim_sync_counter < self.EnergyPlus_sim.co_sim_timestep: + self.advance_simulation(None, None, None, None, None, None) + else: + self.cosim_sync_counter = 0 + self.vip.pubsub.publish('pubsub', + self.tns_actuate, + headers={}, + message={}).get(timeout=10) + else: + self.advance_simulation(None, None, None, None, None, None) + + return + + def run_periodic(self): + """ + Advance the simulation periodically and publish all outputs to VOLTTRON bus + :return: + """ + self.advance_simulation(None, None, None, None, None, None) + self.EnergyPlus_sim.send_eplus_msg() + + def advance_simulation(self, peer, sender, bus, topic, headers, message): + _log.info('Advancing simulation.') + + for obj in self.EnergyPlus_sim.inputs: + set_topic = obj['topic'] + '/' + obj['field'] + external = obj.get('external', False) + if external: + value = obj['value'] if 'value' in obj else obj['default'] + else: + value = obj['default'] + self.update_topic_rpc(sender, set_topic, value, external) + return + + @Core.receiver("onstop") + def onstop(self, sender, **kwargs): + """ + This method is called when the Agent is about to shutdown. + Stop EnergyPlus simulation + """ + self.EnergyPlus_sim.stop_simulation() + + @RPC.export + def request_new_schedule(self, requester_id, task_id, priority, requests): + """RPC method + + Requests one or more blocks on time on one or more device. + In this agent, this does nothing! + + :param requester_id: Requester name. + :param task_id: Task name. + :param priority: Priority of the task. Must be either HIGH, LOW, or LOW_PREEMPT + :param requests: A list of time slot requests + + :type requester_id: str + :type task_id: str + :type priority: str + :type request: list + :returns: Request result + :rtype: dict + + """ + _log.debug(requester_id + " requests new schedule " + task_id + " " + str(requests)) + result = {'result': SUCCESS, + 'data': {}, + 'info': ''} + return result + + @RPC.export + def request_cancel_schedule(self, requester_id, task_id): + """RPC method + + Requests the cancelation of the specified task id. + In this agent, this does nothing! + + :param requester_id: Requester name. + :param task_id: Task name. + + :type requester_id: str + :type task_id: str + :returns: Request result + :rtype: dict + + """ + _log.debug(requester_id + " canceled " + task_id) + result = {'result': SUCCESS, + 'data': {}, + 'info': ''} + return result + + @RPC.export + def get_point(self, topic, **kwargs): + """RPC method + + Gets the value of a specific point on a device_name. + Does not require the device_name be scheduled. + + :param topic: The topic of the point to grab in the + format / + :param **kwargs: These get dropped on the floor + :type topic: str + :returns: point value + :rtype: any base python type + + """ + obj = self.find_best_match(topic) + if obj is not None: # we have an exact match to the /, so return the first value + value = obj.get('value', None) + if value is None: + value = obj.get('default', None) + return value + return None + + @RPC.export + def set_point(self, requester_id, topic, value, **kwargs): + """RPC method + + Sets the value of a specific point on a device. + Does not require the device be scheduled. + + :param requester_id: Identifier given when requesting schedule. + :param topic: The topic of the point to set in the + format / + :param value: Value to set point to. + :param **kwargs: These get dropped on the floor + :type topic: str + :type requester_id: str + :type value: any basic python type + :returns: value point was actually set to. + :rtype: any base python type + + """ + topic = topic.strip('/') + external = True + if value is None: + result = self.revert_point(requester_id, topic) + else: + result = self.update_topic_rpc(requester_id, topic, value, external) + _log.debug("Writing: {topic} : {value} {result}".format(topic=topic, value=value, result=result)) + if result == SUCCESS: + return value + else: + raise RuntimeError("Failed to set value: " + result) + + @RPC.export + def revert_point(self, requester_id, topic, **kwargs): + """RPC method + + Reverts the value of a specific point on a device to a default state. + Does not require the device be scheduled. + + :param requester_id: Identifier given when requesting schedule. + :param topic: The topic of the point to revert in the + format / + :param **kwargs: These get dropped on the floor + :type topic: str + :type requester_id: str + + """ + obj = self.find_best_match(topic) + if obj and 'default' in obj: + value = obj.get('default') + _log.debug("Reverting topic " + topic + " to " + str(value)) + external = False + result = self.update_topic_rpc(requester_id, topic, value, external) + else: + result = FAILURE + _log.warning("Unable to revert topic. No topic match or default defined!") + return result + + @RPC.export + def revert_device(self, requester_id, device_name, **kwargs): + """RPC method + + Reverts all points on a device to a default state. + Does not require the device be scheduled. + + :param requester_id: Identifier given when requesting schedule. + :param topic: The topic of the device to revert (without a point!) + :param **kwargs: These get dropped on the floor + :type topic: str + :type requester_id: str + + """ + device_name = device_name.strip('/') + # we will assume that the topic is only the and revert all matches at this level! + objs = self.get_inputs_from_topic(device_name) + if objs is not None: + for obj in objs: + point_name = obj.get('field', None) + topic = device_name + "/" + point_name if point_name else device_name + external = False + if 'default' in obj: + value = obj.get('default') + _log.debug("Reverting " + topic + " to " + str(value)) + self.update_topic_rpc(requester_id, topic, value, external) + else: + _log.warning("Unable to revert " + topic + ". No default defined!") + + def update_topic_rpc(self, requester_id, topic, value, external): + """ + Find the best match for the topic and update the objects with + received values + :param requester_id: + :param topic: + :param value: + :param external: + :return: + """ + obj = self.find_best_match(topic) + if obj is not None: + obj['value'] = value + obj['external'] = external + obj['last_update'] = datetime.utcnow().isoformat(' ') + 'Z' + if not self.EnergyPlus_sim.real_time_periodic: + self.on_update_topic_rpc(requester_id, topic, value) + return SUCCESS + return FAILURE + + def on_update_topic_rpc(self, requester_id, topic, value): + """ + Send to EnergyPlus + :param requester_id: + :param topic: + :param value: + :return: + """ + self.send_on_all_inputs_updated() + + +def main(): + """Main method called to start the agent.""" + utils.vip_main(energyplus_example, version=__version__) + + +if __name__ == '__main__': + # Entry point for script + try: + sys.exit(main()) + except KeyboardInterrupt: + pass diff --git a/examples/EnergyPlusAgent/ep_building1.config b/examples/EnergyPlusAgent/ep_building1.config new file mode 100644 index 0000000000..aeb4109e59 --- /dev/null +++ b/examples/EnergyPlusAgent/ep_building1.config @@ -0,0 +1,3041 @@ +{ + "properties": { + "identity": "platform.actuator", + "model": "~/git/volttron/eplus/BUILDING1.idf", + "weather": "~/git/volttron/eplus/USA_WA_Pasco-Tri.Cities.AP.727845_TMY3.epw", + "bcvtb_home": "~/git/volttron/bcvtb", + "size" :40960, + "startmonth":8, + "startday":1, + "endmonth":8, + "endday":31, + "timestep":60, + "time_scale": 6, + "cosimulation_sync": true, + "realtime": true, + "co_sim_timestep": 5, + "real_time_flag": false + }, + "inputs": { + "CLGTEMPSETPOINT Zone-VAV-102": { + "name": "CLGTEMPSETPOINT Zone-VAV-102", + "type": "schedule", + "topic": "PNNL/BUILDING1/AHU1/VAV102", + "field": "ZoneCoolingTemperatureSetPoint", + "default": 21.11, + "dynamic_default": 1.0 + }, + "CLGTEMPSETPOINT Zone-VAV-118": { + "name": "CLGTEMPSETPOINT Zone-VAV-118", + "type": "schedule", + "topic": "PNNL/BUILDING1/AHU1/VAV118", + "field": "ZoneCoolingTemperatureSetPoint", + "default": 21.11, + "dynamic_default": 1.0 + }, + "CLGTEMPSETPOINT Zone-VAV-119": { + "name": "CLGTEMPSETPOINT Zone-VAV-119", + "type": "schedule", + "topic": "PNNL/BUILDING1/AHU1/VAV119", + "field": "ZoneCoolingTemperatureSetPoint", + "default": 21.11, + "dynamic_default": 1.0 + }, + "CLGTEMPSETPOINT Zone-VAV-120": { + "name": "CLGTEMPSETPOINT Zone-VAV-120", + "type": "schedule", + "topic": "PNNL/BUILDING1/AHU1/VAV120", + "field": "ZoneCoolingTemperatureSetPoint", + "default": 21.11, + "dynamic_default": 1.0 + }, + "CLGTEMPSETPOINT Zone-VAV-123A": { + "name": "CLGTEMPSETPOINT Zone-VAV-123A", + "type": "schedule", + "topic": "PNNL/BUILDING1/AHU1/VAV123A", + "field": "ZoneCoolingTemperatureSetPoint", + "default": 21.11, + "dynamic_default": 1.0 + }, + "CLGTEMPSETPOINT Zone-VAV-123B": { + "name": "CLGTEMPSETPOINT Zone-VAV-123B", + "type": "schedule", + "topic": "PNNL/BUILDING1/AHU1/VAV123B", + "field": "ZoneCoolingTemperatureSetPoint", + "default": 21.11, + "dynamic_default": 1.0 + }, + "CLGTEMPSETPOINT Zone-VAV-127A": { + "name": "CLGTEMPSETPOINT Zone-VAV-127A", + "type": "schedule", + "topic": "PNNL/BUILDING1/AHU1/VAV127A", + "field": "ZoneCoolingTemperatureSetPoint", + "default": 21.11, + "dynamic_default": 1.0 + }, + "CLGTEMPSETPOINT Zone-VAV-127B": { + "name": "CLGTEMPSETPOINT Zone-VAV-127B", + "type": "schedule", + "topic": "PNNL/BUILDING1/AHU1/VAV127B", + "field": "ZoneCoolingTemperatureSetPoint", + "default": 21.11, + "dynamic_default": 1.0 + }, + "CLGTEMPSETPOINT Zone-VAV-129": { + "name": "CLGTEMPSETPOINT Zone-VAV-129", + "type": "schedule", + "topic": "PNNL/BUILDING1/AHU1/VAV129", + "field": "ZoneCoolingTemperatureSetPoint", + "default": 21.11, + "dynamic_default": 1.0 + }, + "CLGTEMPSETPOINT Zone-VAV-131": { + "name": "CLGTEMPSETPOINT Zone-VAV-131", + "type": "schedule", + "topic": "PNNL/BUILDING1/AHU1/VAV131", + "field": "ZoneCoolingTemperatureSetPoint", + "default": 21.11, + "dynamic_default": 1.0 + }, + "CLGTEMPSETPOINT Zone-VAV-133": { + "name": "CLGTEMPSETPOINT Zone-VAV-133", + "type": "schedule", + "topic": "PNNL/BUILDING1/AHU1/VAV133", + "field": "ZoneCoolingTemperatureSetPoint", + "default": 21.11, + "dynamic_default": 1.0 + }, + "CLGTEMPSETPOINT Zone-VAV-136": { + "name": "CLGTEMPSETPOINT Zone-VAV-136", + "type": "schedule", + "topic": "PNNL/BUILDING1/AHU1/VAV136", + "field": "ZoneCoolingTemperatureSetPoint", + "default": 21.11, + "dynamic_default": 1.0 + }, + "CLGTEMPSETPOINT Zone-VAV-142": { + "name": "CLGTEMPSETPOINT Zone-VAV-142", + "type": "schedule", + "topic": "PNNL/BUILDING1/AHU1/VAV142", + "field": "ZoneCoolingTemperatureSetPoint", + "default": 21.11, + "dynamic_default": 1.0 + }, + "CLGTEMPSETPOINT Zone-VAV-143": { + "name": "CLGTEMPSETPOINT Zone-VAV-143", + "type": "schedule", + "topic": "PNNL/BUILDING1/AHU1/VAV143", + "field": "ZoneCoolingTemperatureSetPoint", + "default": 21.11, + "dynamic_default": 1.0 + }, + "CLGTEMPSETPOINT Zone-VAV-150": { + "name": "CLGTEMPSETPOINT Zone-VAV-150", + "type": "schedule", + "topic": "PNNL/BUILDING1/AHU1/VAV150", + "field": "ZoneCoolingTemperatureSetPoint", + "default": 21.11, + "dynamic_default": 1.0 + }, + "CLGTEMPSETPOINT Zone-VAV-100": { + "name": "CLGTEMPSETPOINT Zone-VAV-CORRIDOR", + "type": "schedule", + "topic": "PNNL/BUILDING1/AHU1/VAV100", + "field": "ZoneCoolingTemperatureSetPoint", + "default": 21.11, + "dynamic_default": 1.0 + }, + "CLGTEMPSETPOINT Zone-VAV-121": { + "name": "CLGTEMPSETPOINT Zone-VAV-RESTROOM", + "type": "schedule", + "topic": "PNNL/BUILDING1/AHU1/VAV121", + "field": "ZoneCoolingTemperatureSetPoint", + "default": 21.11, + "dynamic_default": 1.0 + }, + "CLGTEMPSETPOINT Zone-VAV-104": { + "name": "CLGTEMPSETPOINT Zone-VAV-104", + "type": "schedule", + "topic": "PNNL/BUILDING1/AHU3/VAV104", + "field": "ZoneCoolingTemperatureSetPoint", + "default": 21.11, + "dynamic_default": 1.0 + }, + "CLGTEMPSETPOINT Zone-VAV-105": { + "name": "CLGTEMPSETPOINT Zone-VAV-105", + "type": "schedule", + "topic": "PNNL/BUILDING1/AHU3/VAV105", + "field": "ZoneCoolingTemperatureSetPoint", + "default": 21.11, + "dynamic_default": 1.0 + }, + "CLGTEMPSETPOINT Zone-VAV-107": { + "name": "CLGTEMPSETPOINT Zone-VAV-107", + "type": "schedule", + "topic": "PNNL/BUILDING1/AHU3/VAV107", + "field": "ZoneCoolingTemperatureSetPoint", + "default": 21.11, + "dynamic_default": 1.0 + }, + "CLGTEMPSETPOINT Zone-VAV-108": { + "name": "CLGTEMPSETPOINT Zone-VAV-108", + "type": "schedule", + "topic": "PNNL/BUILDING1/AHU3/VAV108", + "field": "ZoneCoolingTemperatureSetPoint", + "default": 21.11, + "dynamic_default": 1.0 + }, + "CLGTEMPSETPOINT Zone-VAV-112": { + "name": "CLGTEMPSETPOINT Zone-VAV-112", + "type": "schedule", + "topic": "PNNL/BUILDING1/AHU3/VAV112", + "field": "ZoneCoolingTemperatureSetPoint", + "default": 21.11, + "dynamic_default": 1.0 + }, + "CLGTEMPSETPOINT Zone-VAV-116": { + "name": "CLGTEMPSETPOINT Zone-VAV-116", + "type": "schedule", + "topic": "PNNL/BUILDING1/AHU3/VAV116", + "field": "ZoneCoolingTemperatureSetPoint", + "default": 21.11, + "dynamic_default": 1.0 + }, + "CLGTEMPSETPOINT Zone-AHU-002": { + "name": "CLGTEMPSETPOINT Zone-AHU-002", + "type": "schedule", + "topic": "PNNL/BUILDING1/AHU2", + "field": "ZoneCoolingTemperatureSetPoint", + "default": 21.11, + "dynamic_default": 1.0 + }, + "CLGTEMPSETPOINT Zone-AHU-004": { + "name": "CLGTEMPSETPOINT Zone-AHU-004", + "type": "schedule", + "topic": "PNNL/BUILDING1/AHU4", + "field": "ZoneCoolingTemperatureSetPoint", + "default": 21.11, + "dynamic_default": 1.0 + }, + "BLDG LIGHT SCH Zone-VAV-102": { + "name": "BLDG LIGHT SCH Zone-VAV-102", + "type": "schedule", + "topic": "PNNL/BUILDING1/LIGHTING/ZONE102", + "field": "DimmingLevelOutput", + "default": 1.0, + "dynamic_default": 1.0 + }, + "BLDG LIGHT SCH Zone-VAV-104": { + "name": "BLDG LIGHT SCH Zone-VAV-104", + "type": "schedule", + "topic": "PNNL/BUILDING1/LIGHTING/ZONE104", + "field": "DimmingLevelOutput", + "default": 1.0, + "dynamic_default": 1.0 + }, + "BLDG LIGHT SCH Zone-VAV-105": { + "name": "BLDG LIGHT SCH Zone-VAV-105", + "type": "schedule", + "topic": "PNNL/BUILDING1/LIGHTING/ZONE105", + "field": "DimmingLevelOutput", + "default": 1.0, + "dynamic_default": 1.0 + }, + "BLDG LIGHT SCH Zone-VAV-107": { + "name": "BLDG LIGHT SCH Zone-VAV-107", + "type": "schedule", + "topic": "PNNL/BUILDING1/LIGHTING/ZONE107", + "field": "DimmingLevelOutput", + "default": 1.0, + "dynamic_default": 1.0 + }, + "BLDG LIGHT SCH Zone-VAV-108": { + "name": "BLDG LIGHT SCH Zone-VAV-108", + "type": "schedule", + "topic": "PNNL/BUILDING1/LIGHTING/ZONE108", + "field": "DimmingLevelOutput", + "default": 1.0, + "dynamic_default": 1.0 + }, + "BLDG LIGHT SCH Zone-VAV-112": { + "name": "BLDG LIGHT SCH Zone-VAV-112", + "type": "schedule", + "topic": "PNNL/BUILDING1/LIGHTING/ZONE112", + "field": "DimmingLevelOutput", + "default": 1.0, + "dynamic_default": 1.0 + }, + "BLDG LIGHT SCH Zone-VAV-116": { + "name": "BLDG LIGHT SCH Zone-VAV-116", + "type": "schedule", + "topic": "PNNL/BUILDING1/LIGHTING/ZONE116", + "field": "DimmingLevelOutput", + "default": 1.0, + "dynamic_default": 1.0 + }, + "BLDG LIGHT SCH Zone-VAV-118": { + "name": "BLDG LIGHT SCH Zone-VAV-118", + "type": "schedule", + "topic": "PNNL/BUILDING1/LIGHTING/ZONE118", + "field": "DimmingLevelOutput", + "default": 1.0, + "dynamic_default": 1.0 + }, + "BLDG LIGHT SCH Zone-VAV-119": { + "name": "BLDG LIGHT SCH Zone-VAV-119", + "type": "schedule", + "topic": "PNNL/BUILDING1/LIGHTING/ZONE119", + "field": "DimmingLevelOutput", + "default": 1.0, + "dynamic_default": 1.0 + }, + "BLDG LIGHT SCH Zone-VAV-120": { + "name": "BLDG LIGHT SCH Zone-VAV-120", + "type": "schedule", + "topic": "PNNL/BUILDING1/LIGHTING/ZONE120", + "field": "DimmingLevelOutput", + "default": 1.0, + "dynamic_default": 1.0 + }, + "BLDG LIGHT SCH Zone-VAV-123A": { + "name": "BLDG LIGHT SCH Zone-VAV-123A", + "type": "schedule", + "topic": "PNNL/BUILDING1/LIGHTING/ZONE123A", + "field": "DimmingLevelOutput", + "default": 1.0, + "dynamic_default": 1.0 + }, + "BLDG LIGHT SCH Zone-VAV-123B": { + "name": "BLDG LIGHT SCH Zone-VAV-123B", + "type": "schedule", + "topic": "PNNL/BUILDING1/LIGHTING/ZONE123B", + "field": "DimmingLevelOutput", + "default": 1.0, + "dynamic_default": 1.0 + }, + "BLDG LIGHT SCH Zone-VAV-127A": { + "name": "BLDG LIGHT SCH Zone-VAV-127A", + "type": "schedule", + "topic": "PNNL/BUILDING1/LIGHTING/ZONE127A", + "field": "DimmingLevelOutput", + "default": 1.0, + "dynamic_default": 1.0 + }, + "BLDG LIGHT SCH Zone-VAV-127B": { + "name": "BLDG LIGHT SCH Zone-VAV-127B", + "type": "schedule", + "topic": "PNNL/BUILDING1/LIGHTING/ZONE127B", + "field": "DimmingLevelOutput", + "default": 1.0, + "dynamic_default": 1.0 + }, + "BLDG LIGHT SCH Zone-VAV-129": { + "name": "BLDG LIGHT SCH Zone-VAV-129", + "type": "schedule", + "topic": "PNNL/BUILDING1/LIGHTING/ZONE129", + "field": "DimmingLevelOutput", + "default": 1.0, + "dynamic_default": 1.0 + }, + "BLDG LIGHT SCH Zone-VAV-131": { + "name": "BLDG LIGHT SCH Zone-VAV-131", + "type": "schedule", + "topic": "PNNL/BUILDING1/LIGHTING/ZONE131", + "field": "DimmingLevelOutput", + "default": 1.0, + "dynamic_default": 1.0 + }, + "BLDG LIGHT SCH Zone-VAV-133": { + "name": "BLDG LIGHT SCH Zone-VAV-133", + "type": "schedule", + "topic": "PNNL/BUILDING1/LIGHTING/ZONE133", + "field": "DimmingLevelOutput", + "default": 1.0, + "dynamic_default": 1.0 + }, + "BLDG LIGHT SCH Zone-VAV-142": { + "name": "BLDG LIGHT SCH Zone-VAV-142", + "type": "schedule", + "topic": "PNNL/BUILDING1/LIGHTING/ZONE142", + "field": "DimmingLevelOutput", + "default": 1.0, + "dynamic_default": 1.0 + }, + "BLDG LIGHT SCH Zone-VAV-143": { + "name": "BLDG LIGHT SCH Zone-VAV-143", + "type": "schedule", + "topic": "PNNL/BUILDING1/LIGHTING/ZONE143", + "field": "DimmingLevelOutput", + "default": 1.0, + "dynamic_default": 1.0 + }, + "BLDG LIGHT SCH Zone-VAV-150": { + "name": "BLDG LIGHT SCH Zone-VAV-150", + "type": "schedule", + "topic": "PNNL/BUILDING1/LIGHTING/ZONE150", + "field": "DimmingLevelOutput", + "default": 1.0, + "dynamic_default": 1.0 + }, + "BLDG LIGHT SCH Zone-VAV-136": { + "name": "BLDG LIGHT SCH Zone-VAV-136", + "type": "schedule", + "topic": "PNNL/BUILDING1/LIGHTING/ZONE136", + "field": "DimmingLevelOutput", + "default": 1.0, + "dynamic_default": 1.0 + } + }, + "outputs": { + "ENVIRONMENT Site Outdoor Air Drybulb Temperature1": { + "name": "ENVIRONMENT", + "type": "Site Outdoor Air Drybulb Temperature", + "topic": "devices/PNNL/BUILDING1/AHU1/all", + "field": "OutdoorAirTemperature", + "meta": { + "units": "degreesCentigrade", + "tz": "US/Pacific", + "type": "float" + } + }, + "ENVIRONMENT Site Outdoor Air Drybulb Temperature2": { + "name": "ENVIRONMENT", + "type": "Site Outdoor Air Drybulb Temperature", + "topic": "devices/PNNL/BUILDING1/AHU2/all", + "field": "OutdoorAirTemperature", + "meta": { + "units": "degreesCentigrade", + "tz": "US/Pacific", + "type": "float" + } + }, + "ENVIRONMENT Site Outdoor Air Drybulb Temperature3": { + "name": "ENVIRONMENT", + "type": "Site Outdoor Air Drybulb Temperature", + "topic": "devices/PNNL/BUILDING1/AHU3/all", + "field": "OutdoorAirTemperature", + "meta": { + "units": "degreesCentigrade", + "tz": "US/Pacific", + "type": "float" + } + }, + "ENVIRONMENT Site Outdoor Air Drybulb Temperature4": { + "name": "ENVIRONMENT", + "type": "Site Outdoor Air Drybulb Temperature", + "topic": "devices/PNNL/BUILDING1/AHU4/all", + "field": "OutdoorAirTemperature", + "meta": { + "units": "degreesCentigrade", + "tz": "US/Pacific", + "type": "float" + } + }, + "Zone-VAV-102 Zone Mean Air Temperature": { + "name": "Zone-VAV-102", + "type": "Zone Mean Air Temperature", + "topic": "devices/PNNL/BUILDING1/AHU1/VAV102/all", + "field": "ZoneTemperature", + "meta": { + "units": "degreesCentigrade", + "tz": "US/Pacific", + "type": "float" + } + }, + "Zone-VAV-118 Zone Mean Air Temperature": { + "name": "Zone-VAV-118", + "type": "Zone Mean Air Temperature", + "topic": "devices/PNNL/BUILDING1/AHU1/VAV118/all", + "field": "ZoneTemperature", + "meta": { + "units": "degreesCentigrade", + "tz": "US/Pacific", + "type": "float" + } + }, + "Zone-VAV-119 Zone Mean Air Temperature": { + "name": "Zone-VAV-119", + "type": "Zone Mean Air Temperature", + "topic": "devices/PNNL/BUILDING1/AHU1/VAV119/all", + "field": "ZoneTemperature", + "meta": { + "units": "degreesCentigrade", + "tz": "US/Pacific", + "type": "float" + } + }, + "Zone-VAV-120 Zone Mean Air Temperature": { + "name": "Zone-VAV-120", + "type": "Zone Mean Air Temperature", + "topic": "devices/PNNL/BUILDING1/AHU1/VAV120/all", + "field": "ZoneTemperature", + "meta": { + "units": "degreesCentigrade", + "tz": "US/Pacific", + "type": "float" + } + }, + "Zone-VAV-123A Zone Mean Air Temperature": { + "name": "Zone-VAV-123A", + "type": "Zone Mean Air Temperature", + "topic": "devices/PNNL/BUILDING1/AHU1/VAV123A/all", + "field": "ZoneTemperature", + "meta": { + "units": "degreesCentigrade", + "tz": "US/Pacific", + "type": "float" + } + }, + "Zone-VAV-123B Zone Mean Air Temperature": { + "name": "Zone-VAV-123B", + "type": "Zone Mean Air Temperature", + "topic": "devices/PNNL/BUILDING1/AHU1/VAV123B/all", + "field": "ZoneTemperature", + "meta": { + "units": "degreesCentigrade", + "tz": "US/Pacific", + "type": "float" + } + }, + "Zone-VAV-127A Zone Mean Air Temperature": { + "name": "Zone-VAV-127A", + "type": "Zone Mean Air Temperature", + "topic": "devices/PNNL/BUILDING1/AHU1/VAV127A/all", + "field": "ZoneTemperature", + "meta": { + "units": "degreesCentigrade", + "tz": "US/Pacific", + "type": "float" + } + }, + "Zone-VAV-127B Zone Mean Air Temperature": { + "name": "Zone-VAV-127B", + "type": "Zone Mean Air Temperature", + "topic": "devices/PNNL/BUILDING1/AHU1/VAV127B/all", + "field": "ZoneTemperature", + "meta": { + "units": "degreesCentigrade", + "tz": "US/Pacific", + "type": "float" + } + }, + "Zone-VAV-129 Zone Mean Air Temperature": { + "name": "Zone-VAV-129", + "type": "Zone Mean Air Temperature", + "topic": "devices/PNNL/BUILDING1/AHU1/VAV129/all", + "field": "ZoneTemperature", + "meta": { + "units": "degreesCentigrade", + "tz": "US/Pacific", + "type": "float" + } + }, + "Zone-VAV-131 Zone Mean Air Temperature": { + "name": "Zone-VAV-131", + "type": "Zone Mean Air Temperature", + "topic": "devices/PNNL/BUILDING1/AHU1/VAV131/all", + "field": "ZoneTemperature", + "meta": { + "units": "degreesCentigrade", + "tz": "US/Pacific", + "type": "float" + } + }, + "Zone-VAV-133 Zone Mean Air Temperature": { + "name": "Zone-VAV-133", + "type": "Zone Mean Air Temperature", + "topic": "devices/PNNL/BUILDING1/AHU1/VAV133/all", + "field": "ZoneTemperature", + "meta": { + "units": "degreesCentigrade", + "tz": "US/Pacific", + "type": "float" + } + }, + "Zone-VAV-136 Zone Mean Air Temperature": { + "name": "Zone-VAV-136", + "type": "Zone Mean Air Temperature", + "topic": "devices/PNNL/BUILDING1/AHU1/VAV136/all", + "field": "ZoneTemperature", + "meta": { + "units": "degreesCentigrade", + "tz": "US/Pacific", + "type": "float" + } + }, + "Zone-VAV-142 Zone Mean Air Temperature": { + "name": "Zone-VAV-142", + "type": "Zone Mean Air Temperature", + "topic": "devices/PNNL/BUILDING1/AHU1/VAV142/all", + "field": "ZoneTemperature", + "meta": { + "units": "degreesCentigrade", + "tz": "US/Pacific", + "type": "float" + } + }, + "Zone-VAV-143 Zone Mean Air Temperature": { + "name": "Zone-VAV-143", + "type": "Zone Mean Air Temperature", + "topic": "devices/PNNL/BUILDING1/AHU1/VAV143/all", + "field": "ZoneTemperature", + "meta": { + "units": "degreesCentigrade", + "tz": "US/Pacific", + "type": "float" + } + }, + "Zone-VAV-150 Zone Mean Air Temperature": { + "name": "Zone-VAV-150", + "type": "Zone Mean Air Temperature", + "topic": "devices/PNNL/BUILDING1/AHU1/VAV150/all", + "field": "ZoneTemperature", + "meta": { + "units": "degreesCentigrade", + "tz": "US/Pacific", + "type": "float" + } + }, + "Zone-VAV-100 Zone Mean Air Temperature": { + "name": "Zone-VAV-CORRIDOR", + "type": "Zone Mean Air Temperature", + "topic": "devices/PNNL/BUILDING1/AHU1/VAV100/all", + "field": "ZoneTemperature", + "meta": { + "units": "degreesCentigrade", + "tz": "US/Pacific", + "type": "float" + } + }, + "Zone-VAV-121 Zone Mean Air Temperature": { + "name": "Zone-VAV-RESTROOM", + "type": "Zone Mean Air Temperature", + "topic": "devices/PNNL/BUILDING1/AHU1/VAV121/all", + "field": "ZoneTemperature", + "meta": { + "units": "degreesCentigrade", + "tz": "US/Pacific", + "type": "float" + } + }, + "Zone-VAV-104 Zone Mean Air Temperature": { + "name": "Zone-VAV-104", + "type": "Zone Mean Air Temperature", + "topic": "devices/PNNL/BUILDING1/AHU3/VAV104/all", + "field": "ZoneTemperature", + "meta": { + "units": "degreesCentigrade", + "tz": "US/Pacific", + "type": "float" + } + }, + "Zone-VAV-105 Zone Mean Air Temperature": { + "name": "Zone-VAV-105", + "type": "Zone Mean Air Temperature", + "topic": "devices/PNNL/BUILDING1/AHU3/VAV105/all", + "field": "ZoneTemperature", + "meta": { + "units": "degreesCentigrade", + "tz": "US/Pacific", + "type": "float" + } + }, + "Zone-VAV-107 Zone Mean Air Temperature": { + "name": "Zone-VAV-107", + "type": "Zone Mean Air Temperature", + "topic": "devices/PNNL/BUILDING1/AHU3/VAV107/all", + "field": "ZoneTemperature", + "meta": { + "units": "degreesCentigrade", + "tz": "US/Pacific", + "type": "float" + } + }, + "Zone-VAV-108 Zone Mean Air Temperature": { + "name": "Zone-VAV-108", + "type": "Zone Mean Air Temperature", + "topic": "devices/PNNL/BUILDING1/AHU3/VAV108/all", + "field": "ZoneTemperature", + "meta": { + "units": "degreesCentigrade", + "tz": "US/Pacific", + "type": "float" + } + }, + "Zone-VAV-112 Zone Mean Air Temperature": { + "name": "Zone-VAV-112", + "type": "Zone Mean Air Temperature", + "topic": "devices/PNNL/BUILDING1/AHU3/VAV112/all", + "field": "ZoneTemperature", + "meta": { + "units": "degreesCentigrade", + "tz": "US/Pacific", + "type": "float" + } + }, + "Zone-VAV-116 Zone Mean Air Temperature": { + "name": "Zone-VAV-116", + "type": "Zone Mean Air Temperature", + "topic": "devices/PNNL/BUILDING1/AHU3/VAV116/all", + "field": "ZoneTemperature", + "meta": { + "units": "degreesCentigrade", + "tz": "US/Pacific", + "type": "float" + } + }, + "Zone-AHU-002 Zone Mean Air Temperature": { + "name": "Zone-AHU-002", + "type": "Zone Mean Air Temperature", + "topic": "devices/PNNL/BUILDING1/AHU2/all", + "field": "ZoneTemperature", + "meta": { + "units": "degreesCentigrade", + "tz": "US/Pacific", + "type": "float" + } + }, + "Zone-AHU-004 Zone Mean Air Temperature": { + "name": "Zone-AHU-004", + "type": "Zone Mean Air Temperature", + "topic": "devices/PNNL/BUILDING1/AHU4/all", + "field": "ZoneTemperature", + "meta": { + "units": "degreesCentigrade", + "tz": "US/Pacific", + "type": "float" + } + }, + "Zone-VAV-Corridor VAV BOX COMPONENT Zone Air Terminal VAV Damper Position": { + "name": "Zone-VAV-Corridor VAV BOX COMPONENT", + "type": "Zone Air Terminal VAV Damper Position", + "topic": "devices/PNNL/BUILDING1/AHU1/VAV100/all", + "field": "TerminalBoxDamperPosition", + "meta": { + "units": "percentage", + "tz": "US/Pacific", + "type": "float" + } + }, + "Zone-VAV-102 VAV BOX COMPONENT Zone Air Terminal VAV Damper Position": { + "name": "Zone-VAV-102 VAV BOX COMPONENT", + "type": "Zone Air Terminal VAV Damper Position", + "topic": "devices/PNNL/BUILDING1/AHU1/VAV102/all", + "field": "TerminalBoxDamperPosition", + "meta": { + "units": "percentage", + "tz": "US/Pacific", + "type": "float" + } + }, + "Zone-VAV-104 VAV BOX COMPONENT Zone Air Terminal VAV Damper Position": { + "name": "Zone-VAV-104 VAV BOX COMPONENT", + "type": "Zone Air Terminal VAV Damper Position", + "topic": "devices/PNNL/BUILDING1/AHU3/VAV104/all", + "field": "TerminalBoxDamperPosition", + "meta": { + "units": "percentage", + "tz": "US/Pacific", + "type": "float" + } + }, + "Zone-VAV-105 VAV BOX COMPONENT Zone Air Terminal VAV Damper Position": { + "name": "Zone-VAV-105 VAV BOX COMPONENT", + "type": "Zone Air Terminal VAV Damper Position", + "topic": "devices/PNNL/BUILDING1/AHU3/VAV105/all", + "field": "TerminalBoxDamperPosition", + "meta": { + "units": "percentage", + "tz": "US/Pacific", + "type": "float" + } + }, + "Zone-VAV-107 VAV BOX COMPONENT Zone Air Terminal VAV Damper Position": { + "name": "Zone-VAV-107 VAV BOX COMPONENT", + "type": "Zone Air Terminal VAV Damper Position", + "topic": "devices/PNNL/BUILDING1/AHU3/VAV107/all", + "field": "TerminalBoxDamperPosition", + "meta": { + "units": "percentage", + "tz": "US/Pacific", + "type": "float" + } + }, + "Zone-VAV-108 VAV BOX COMPONENT Zone Air Terminal VAV Damper Position": { + "name": "Zone-VAV-108 VAV BOX COMPONENT", + "type": "Zone Air Terminal VAV Damper Position", + "topic": "devices/PNNL/BUILDING1/AHU3/VAV108/all", + "field": "TerminalBoxDamperPosition", + "meta": { + "units": "percentage", + "tz": "US/Pacific", + "type": "float" + } + }, + "Zone-VAV-112 VAV BOX COMPONENT Zone Air Terminal VAV Damper Position": { + "name": "Zone-VAV-112 VAV BOX COMPONENT", + "type": "Zone Air Terminal VAV Damper Position", + "topic": "devices/PNNL/BUILDING1/AHU3/VAV112/all", + "field": "TerminalBoxDamperPosition", + "meta": { + "units": "percentage", + "tz": "US/Pacific", + "type": "float" + } + }, + "Zone-VAV-116 VAV BOX COMPONENT Zone Air Terminal VAV Damper Position": { + "name": "Zone-VAV-116 VAV BOX COMPONENT", + "type": "Zone Air Terminal VAV Damper Position", + "topic": "devices/PNNL/BUILDING1/AHU3/VAV116/all", + "field": "TerminalBoxDamperPosition", + "meta": { + "units": "percentage", + "tz": "US/Pacific", + "type": "float" + } + }, + "Zone-VAV-118 VAV BOX COMPONENT Zone Air Terminal VAV Damper Position": { + "name": "Zone-VAV-118 VAV BOX COMPONENT", + "type": "Zone Air Terminal VAV Damper Position", + "topic": "devices/PNNL/BUILDING1/AHU1/VAV118/all", + "field": "TerminalBoxDamperPosition", + "meta": { + "units": "percentage", + "tz": "US/Pacific", + "type": "float" + } + }, + "Zone-VAV-119 VAV BOX COMPONENT Zone Air Terminal VAV Damper Position": { + "name": "Zone-VAV-119 VAV BOX COMPONENT", + "type": "Zone Air Terminal VAV Damper Position", + "topic": "devices/PNNL/BUILDING1/AHU1/VAV119/all", + "field": "TerminalBoxDamperPosition", + "meta": { + "units": "percentage", + "tz": "US/Pacific", + "type": "float" + } + }, + "Zone-VAV-120 VAV BOX COMPONENT Zone Air Terminal VAV Damper Position": { + "name": "Zone-VAV-120 VAV BOX COMPONENT", + "type": "Zone Air Terminal VAV Damper Position", + "topic": "devices/PNNL/BUILDING1/AHU1/VAV120/all", + "field": "TerminalBoxDamperPosition", + "meta": { + "units": "percentage", + "tz": "US/Pacific", + "type": "float" + } + }, + "Zone-VAV-123A VAV BOX COMPONENT Zone Air Terminal VAV Damper Position": { + "name": "Zone-VAV-123A VAV BOX COMPONENT", + "type": "Zone Air Terminal VAV Damper Position", + "topic": "devices/PNNL/BUILDING1/AHU1/VAV123A/all", + "field": "TerminalBoxDamperPosition", + "meta": { + "units": "percentage", + "tz": "US/Pacific", + "type": "float" + } + }, + "Zone-VAV-123B VAV BOX COMPONENT Zone Air Terminal VAV Damper Position": { + "name": "Zone-VAV-123B VAV BOX COMPONENT", + "type": "Zone Air Terminal VAV Damper Position", + "topic": "devices/PNNL/BUILDING1/AHU1/VAV123B/all", + "field": "TerminalBoxDamperPosition", + "meta": { + "units": "percentage", + "tz": "US/Pacific", + "type": "float" + } + }, + "Zone-VAV-127A VAV BOX COMPONENT Zone Air Terminal VAV Damper Position": { + "name": "Zone-VAV-127A VAV BOX COMPONENT", + "type": "Zone Air Terminal VAV Damper Position", + "topic": "devices/PNNL/BUILDING1/AHU1/VAV127A/all", + "field": "TerminalBoxDamperPosition", + "meta": { + "units": "percentage", + "tz": "US/Pacific", + "type": "float" + } + }, + "Zone-VAV-127B VAV BOX COMPONENT Zone Air Terminal VAV Damper Position": { + "name": "Zone-VAV-127B VAV BOX COMPONENT", + "type": "Zone Air Terminal VAV Damper Position", + "topic": "devices/PNNL/BUILDING1/AHU1/VAV127B/all", + "field": "TerminalBoxDamperPosition", + "meta": { + "units": "percentage", + "tz": "US/Pacific", + "type": "float" + } + }, + "Zone-VAV-129 VAV BOX COMPONENT Zone Air Terminal VAV Damper Position": { + "name": "Zone-VAV-129 VAV BOX COMPONENT", + "type": "Zone Air Terminal VAV Damper Position", + "topic": "devices/PNNL/BUILDING1/AHU1/VAV129/all", + "field": "TerminalBoxDamperPosition", + "meta": { + "units": "percentage", + "tz": "US/Pacific", + "type": "float" + } + }, + "Zone-VAV-131 VAV BOX COMPONENT Zone Air Terminal VAV Damper Position": { + "name": "Zone-VAV-131 VAV BOX COMPONENT", + "type": "Zone Air Terminal VAV Damper Position", + "topic": "devices/PNNL/BUILDING1/AHU1/VAV131/all", + "field": "TerminalBoxDamperPosition", + "meta": { + "units": "percentage", + "tz": "US/Pacific", + "type": "float" + } + }, + "Zone-VAV-133 VAV BOX COMPONENT Zone Air Terminal VAV Damper Position": { + "name": "Zone-VAV-133 VAV BOX COMPONENT", + "type": "Zone Air Terminal VAV Damper Position", + "topic": "devices/PNNL/BUILDING1/AHU1/VAV133/all", + "field": "TerminalBoxDamperPosition", + "meta": { + "units": "percentage", + "tz": "US/Pacific", + "type": "float" + } + }, + "Zone-VAV-136 VAV BOX COMPONENT Zone Air Terminal VAV Damper Position": { + "name": "Zone-VAV-136 VAV BOX COMPONENT", + "type": "Zone Air Terminal VAV Damper Position", + "topic": "devices/PNNL/BUILDING1/AHU1/VAV136/all", + "field": "TerminalBoxDamperPosition", + "meta": { + "units": "percentage", + "tz": "US/Pacific", + "type": "float" + } + }, + "Zone-VAV-142 VAV BOX COMPONENT Zone Air Terminal VAV Damper Position": { + "name": "Zone-VAV-142 VAV BOX COMPONENT", + "type": "Zone Air Terminal VAV Damper Position", + "topic": "devices/PNNL/BUILDING1/AHU1/VAV142/all", + "field": "TerminalBoxDamperPosition", + "meta": { + "units": "percentage", + "tz": "US/Pacific", + "type": "float" + } + }, + "Zone-VAV-143 VAV BOX COMPONENT Zone Air Terminal VAV Damper Position": { + "name": "Zone-VAV-143 VAV BOX COMPONENT", + "type": "Zone Air Terminal VAV Damper Position", + "topic": "devices/PNNL/BUILDING1/AHU1/VAV143/all", + "field": "TerminalBoxDamperPosition", + "meta": { + "units": "percentage", + "tz": "US/Pacific", + "type": "float" + } + }, + "Zone-VAV-150 VAV BOX COMPONENT Zone Air Terminal VAV Damper Position": { + "name": "Zone-VAV-150 VAV BOX COMPONENT", + "type": "Zone Air Terminal VAV Damper Position", + "topic": "devices/PNNL/BUILDING1/AHU1/VAV150/all", + "field": "TerminalBoxDamperPosition", + "meta": { + "units": "percentage", + "tz": "US/Pacific", + "type": "float" + } + }, + "Zone-Restroom VAV BOX COMPONENT Zone Air Terminal VAV Damper Position": { + "name": "Zone-VAV-Restroom VAV Box Component", + "type": "Zone Air Terminal VAV Damper Position", + "topic": "devices/PNNL/BUILDING1/AHU1/VAV121/all", + "field": "TerminalBoxDamperPosition", + "meta": { + "units": "percentage", + "tz": "US/Pacific", + "type": "float" + } + }, + "Zone-VAV-102 VAV BOX OUTLET NODE System Node Mass Flow Rate": { + "name": "Zone-VAV-102 VAV BOX OUTLET NODE", + "type": "System Node Mass Flow Rate", + "topic": "devices/PNNL/BUILDING1/AHU1/VAV102/all", + "field": "ZoneAirFlow", + "meta": { + "units": "cubicMetersPerSecond", + "tz": "US/Pacific", + "type": "float" + } + }, + "Zone-VAV-118 VAV BOX OUTLET NODE System Node Mass Flow Rate": { + "name": "Zone-VAV-118 VAV BOX OUTLET NODE", + "type": "System Node Mass Flow Rate", + "topic": "devices/PNNL/BUILDING1/AHU1/VAV118/all", + "field": "ZoneAirFlow", + "meta": { + "units": "cubicMetersPerSecond", + "tz": "US/Pacific", + "type": "float" + } + }, + "Zone-VAV-119 VAV BOX OUTLET NODE System Node Mass Flow Rate": { + "name": "Zone-VAV-119 VAV BOX OUTLET NODE", + "type": "System Node Mass Flow Rate", + "topic": "devices/PNNL/BUILDING1/AHU1/VAV119/all", + "field": "ZoneAirFlow", + "meta": { + "units": "cubicMetersPerSecond", + "tz": "US/Pacific", + "type": "float" + } + }, + "Zone-VAV-120 VAV BOX OUTLET NODE System Node Mass Flow Rate": { + "name": "Zone-VAV-120 VAV BOX OUTLET NODE", + "type": "System Node Mass Flow Rate", + "topic": "devices/PNNL/BUILDING1/AHU1/VAV120/all", + "field": "ZoneAirFlow", + "meta": { + "units": "cubicMetersPerSecond", + "tz": "US/Pacific", + "type": "float" + } + }, + "Zone-VAV-123A VAV BOX OUTLET NODE System Node Mass Flow Rate": { + "name": "Zone-VAV-123A VAV BOX OUTLET NODE", + "type": "System Node Mass Flow Rate", + "topic": "devices/PNNL/BUILDING1/AHU1/VAV123A/all", + "field": "ZoneAirFlow", + "meta": { + "units": "cubicMetersPerSecond", + "tz": "US/Pacific", + "type": "float" + } + }, + "Zone-VAV-123B VAV BOX OUTLET NODE System Node Mass Flow Rate": { + "name": "Zone-VAV-123B VAV BOX OUTLET NODE", + "type": "System Node Mass Flow Rate", + "topic": "devices/PNNL/BUILDING1/AHU1/VAV123B/all", + "field": "ZoneAirFlow", + "meta": { + "units": "cubicMetersPerSecond", + "tz": "US/Pacific", + "type": "float" + } + }, + "Zone-VAV-127A VAV BOX OUTLET NODE System Node Mass Flow Rate": { + "name": "Zone-VAV-127A VAV BOX OUTLET NODE", + "type": "System Node Mass Flow Rate", + "topic": "devices/PNNL/BUILDING1/AHU1/VAV127A/all", + "field": "ZoneAirFlow", + "meta": { + "units": "cubicMetersPerSecond", + "tz": "US/Pacific", + "type": "float" + } + }, + "Zone-VAV-127B VAV BOX OUTLET NODE System Node Mass Flow Rate": { + "name": "Zone-VAV-127B VAV BOX OUTLET NODE", + "type": "System Node Mass Flow Rate", + "topic": "devices/PNNL/BUILDING1/AHU1/VAV127B/all", + "field": "ZoneAirFlow", + "meta": { + "units": "cubicMetersPerSecond", + "tz": "US/Pacific", + "type": "float" + } + }, + "Zone-VAV-129 VAV BOX OUTLET NODE System Node Mass Flow Rate": { + "name": "Zone-VAV-129 VAV BOX OUTLET NODE", + "type": "System Node Mass Flow Rate", + "topic": "devices/PNNL/BUILDING1/AHU1/VAV129/all", + "field": "ZoneAirFlow", + "meta": { + "units": "cubicMetersPerSecond", + "tz": "US/Pacific", + "type": "float" + } + }, + "Zone-VAV-131 VAV BOX OUTLET NODE System Node Mass Flow Rate": { + "name": "Zone-VAV-131 VAV BOX OUTLET NODE", + "type": "System Node Mass Flow Rate", + "topic": "devices/PNNL/BUILDING1/AHU1/VAV131/all", + "field": "ZoneAirFlow", + "meta": { + "units": "cubicMetersPerSecond", + "tz": "US/Pacific", + "type": "float" + } + }, + "Zone-VAV-133 VAV BOX OUTLET NODE System Node Mass Flow Rate": { + "name": "Zone-VAV-133 VAV BOX OUTLET NODE", + "type": "System Node Mass Flow Rate", + "topic": "devices/PNNL/BUILDING1/AHU1/VAV133/all", + "field": "ZoneAirFlow", + "meta": { + "units": "cubicMetersPerSecond", + "tz": "US/Pacific", + "type": "float" + } + }, + "Zone-VAV-136 VAV BOX OUTLET NODE System Node Mass Flow Rate": { + "name": "Zone-VAV-136 VAV BOX OUTLET NODE", + "type": "System Node Mass Flow Rate", + "topic": "devices/PNNL/BUILDING1/AHU1/VAV136/all", + "field": "ZoneAirFlow", + "meta": { + "units": "cubicMetersPerSecond", + "tz": "US/Pacific", + "type": "float" + } + }, + "Zone-VAV-142 VAV BOX OUTLET NODE System Node Mass Flow Rate": { + "name": "Zone-VAV-142 VAV BOX OUTLET NODE", + "type": "System Node Mass Flow Rate", + "topic": "devices/PNNL/BUILDING1/AHU1/VAV142/all", + "field": "ZoneAirFlow", + "meta": { + "units": "cubicMetersPerSecond", + "tz": "US/Pacific", + "type": "float" + } + }, + "Zone-VAV-143 VAV BOX OUTLET NODE System Node Mass Flow Rate": { + "name": "Zone-VAV-143 VAV BOX OUTLET NODE", + "type": "System Node Mass Flow Rate", + "topic": "devices/PNNL/BUILDING1/AHU1/VAV143/all", + "field": "ZoneAirFlow", + "meta": { + "units": "cubicMetersPerSecond", + "tz": "US/Pacific", + "type": "float" + } + }, + "Zone-VAV-150 VAV BOX OUTLET NODE System Node Mass Flow Rate": { + "name": "Zone-VAV-150 VAV BOX OUTLET NODE", + "type": "System Node Mass Flow Rate", + "topic": "devices/PNNL/BUILDING1/AHU1/VAV150/all", + "field": "ZoneAirFlow", + "meta": { + "units": "cubicMetersPerSecond", + "tz": "US/Pacific", + "type": "float" + } + }, + "Zone-VAV-100 VAV BOX OUTLET NODE System Node Mass Flow Rate": { + "name": "Zone-VAV-CORRIDOR VAV BOX OUTLET NODE", + "type": "System Node Mass Flow Rate", + "topic": "devices/PNNL/BUILDING1/AHU1/VAV100/all", + "field": "ZoneAirFlow", + "meta": { + "units": "cubicMetersPerSecond", + "tz": "US/Pacific", + "type": "float" + } + }, + "Zone-VAV-121 VAV BOX OUTLET NODE System Node Mass Flow Rate": { + "name": "Zone-VAV-RESTROOM VAV BOX OUTLET NODE", + "type": "System Node Mass Flow Rate", + "topic": "devices/PNNL/BUILDING1/AHU1/VAV121/all", + "field": "ZoneAirFlow", + "meta": { + "units": "cubicMetersPerSecond", + "tz": "US/Pacific", + "type": "float" + } + }, + "Zone-VAV-104 VAV BOX OUTLET NODE System Node Mass Flow Rate": { + "name": "Zone-VAV-104 VAV BOX OUTLET NODE", + "type": "System Node Mass Flow Rate", + "topic": "devices/PNNL/BUILDING1/AHU3/VAV104/all", + "field": "ZoneAirFlow", + "meta": { + "units": "cubicMetersPerSecond", + "tz": "US/Pacific", + "type": "float" + } + }, + "Zone-VAV-105 VAV BOX OUTLET NODE System Node Mass Flow Rate": { + "name": "Zone-VAV-105 VAV BOX OUTLET NODE", + "type": "System Node Mass Flow Rate", + "topic": "devices/PNNL/BUILDING1/AHU3/VAV105/all", + "field": "ZoneAirFlow", + "meta": { + "units": "cubicMetersPerSecond", + "tz": "US/Pacific", + "type": "float" + } + }, + "Zone-VAV-107 VAV BOX OUTLET NODE System Node Mass Flow Rate": { + "name": "Zone-VAV-107 VAV BOX OUTLET NODE", + "type": "System Node Mass Flow Rate", + "topic": "devices/PNNL/BUILDING1/AHU3/VAV107/all", + "field": "ZoneAirFlow", + "meta": { + "units": "cubicMetersPerSecond", + "tz": "US/Pacific", + "type": "float" + } + }, + "Zone-VAV-108 VAV BOX OUTLET NODE System Node Mass Flow Rate": { + "name": "Zone-VAV-108 VAV BOX OUTLET NODE", + "type": "System Node Mass Flow Rate", + "topic": "devices/PNNL/BUILDING1/AHU3/VAV108/all", + "field": "ZoneAirFlow", + "meta": { + "units": "cubicMetersPerSecond", + "tz": "US/Pacific", + "type": "float" + } + }, + "Zone-VAV-112 VAV BOX OUTLET NODE System Node Mass Flow Rate": { + "name": "Zone-VAV-112 VAV BOX OUTLET NODE", + "type": "System Node Mass Flow Rate", + "topic": "devices/PNNL/BUILDING1/AHU3/VAV112/all", + "field": "ZoneAirFlow", + "meta": { + "units": "cubicMetersPerSecond", + "tz": "US/Pacific", + "type": "float" + } + }, + "Zone-VAV-116 VAV BOX OUTLET NODE System Node Mass Flow Rate": { + "name": "Zone-VAV-116 VAV BOX OUTLET NODE", + "type": "System Node Mass Flow Rate", + "topic": "devices/PNNL/BUILDING1/AHU3/VAV116/all", + "field": "ZoneAirFlow", + "meta": { + "units": "cubicMetersPerSecond", + "tz": "US/Pacific", + "type": "float" + } + }, + "Zone-AHU-002 Direct Air Inlet Node Name System Node Mass Flow Rate": { + "name": "Zone-AHU-002 Direct Air Inlet Node Name", + "type": "System Node Mass Flow Rate", + "topic": "devices/PNNL/BUILDING1/AHU2/all", + "field": "SupplyAirFlow", + "meta": { + "units": "cubicMetersPerSecond", + "tz": "US/Pacific", + "type": "float" + } + }, + "Zone-AHU-004 Direct Air Inlet Node Name System Node Mass Flow Rate": { + "name": "Zone-AHU-004 Direct Air Inlet Node Name", + "type": "System Node Mass Flow Rate", + "topic": "devices/PNNL/BUILDING1/AHU4/all", + "field": "ZoneAirFlow", + "meta": { + "units": "cubicMetersPerSecond", + "tz": "US/Pacific", + "type": "float" + } + }, + "Zone-VAV-102 VAV BOX OUTLET NODE System Node Temperature": { + "name": "Zone-VAV-102 VAV BOX OUTLET NODE", + "type": "System Node Temperature", + "topic": "devices/PNNL/BUILDING1/AHU1/VAV102/all", + "field": "ZoneDischargeAirTemperature", + "meta": { + "units": "degreesCentigrade", + "tz": "US/Pacific", + "type": "float" + } + }, + "Zone-VAV-118 VAV BOX OUTLET NODE System Node Temperature": { + "name": "Zone-VAV-118 VAV BOX OUTLET NODE", + "type": "System Node Temperature", + "topic": "devices/PNNL/BUILDING1/AHU1/VAV118/all", + "field": "ZoneDischargeAirTemperature", + "meta": { + "units": "degreesCentigrade", + "tz": "US/Pacific", + "type": "float" + } + }, + "Zone-VAV-119 VAV BOX OUTLET NODE System Node Temperature": { + "name": "Zone-VAV-119 VAV BOX OUTLET NODE", + "type": "System Node Temperature", + "topic": "devices/PNNL/BUILDING1/AHU1/VAV119/all", + "field": "ZoneDischargeAirTemperature", + "meta": { + "units": "degreesCentigrade", + "tz": "US/Pacific", + "type": "float" + } + }, + "Zone-VAV-120 VAV BOX OUTLET NODE System Node Temperature": { + "name": "Zone-VAV-120 VAV BOX OUTLET NODE", + "type": "System Node Temperature", + "topic": "devices/PNNL/BUILDING1/AHU1/VAV120/all", + "field": "ZoneDischargeAirTemperature", + "meta": { + "units": "degreesCentigrade", + "tz": "US/Pacific", + "type": "float" + } + }, + "Zone-VAV-123A VAV BOX OUTLET NODE System Node Temperature": { + "name": "Zone-VAV-123A VAV BOX OUTLET NODE", + "type": "System Node Temperature", + "topic": "devices/PNNL/BUILDING1/AHU1/VAV123A/all", + "field": "ZoneDischargeAirTemperature", + "meta": { + "units": "degreesCentigrade", + "tz": "US/Pacific", + "type": "float" + } + }, + "Zone-VAV-123B VAV BOX OUTLET NODE System Node Temperature": { + "name": "Zone-VAV-123B VAV BOX OUTLET NODE", + "type": "System Node Temperature", + "topic": "devices/PNNL/BUILDING1/AHU1/VAV123B/all", + "field": "ZoneDischargeAirTemperature", + "meta": { + "units": "degreesCentigrade", + "tz": "US/Pacific", + "type": "float" + } + }, + "Zone-VAV-127A VAV BOX OUTLET NODE System Node Temperature": { + "name": "Zone-VAV-127A VAV BOX OUTLET NODE", + "type": "System Node Temperature", + "topic": "devices/PNNL/BUILDING1/AHU1/VAV127A/all", + "field": "ZoneDischargeAirTemperature", + "meta": { + "units": "degreesCentigrade", + "tz": "US/Pacific", + "type": "float" + } + }, + "Zone-VAV-127B VAV BOX OUTLET NODE System Node Temperature": { + "name": "Zone-VAV-127B VAV BOX OUTLET NODE", + "type": "System Node Temperature", + "topic": "devices/PNNL/BUILDING1/AHU1/VAV127B/all", + "field": "ZoneDischargeAirTemperature", + "meta": { + "units": "degreesCentigrade", + "tz": "US/Pacific", + "type": "float" + } + }, + "Zone-VAV-129 VAV BOX OUTLET NODE System Node Temperature": { + "name": "Zone-VAV-129 VAV BOX OUTLET NODE", + "type": "System Node Temperature", + "topic": "devices/PNNL/BUILDING1/AHU1/VAV129/all", + "field": "ZoneDischargeAirTemperature", + "meta": { + "units": "degreesCentigrade", + "tz": "US/Pacific", + "type": "float" + } + }, + "Zone-VAV-131 VAV BOX OUTLET NODE System Node Temperature": { + "name": "Zone-VAV-131 VAV BOX OUTLET NODE", + "type": "System Node Temperature", + "topic": "devices/PNNL/BUILDING1/AHU1/VAV131/all", + "field": "ZoneDischargeAirTemperature", + "meta": { + "units": "degreesCentigrade", + "tz": "US/Pacific", + "type": "float" + } + }, + "Zone-VAV-133 VAV BOX OUTLET NODE System Node Temperature": { + "name": "Zone-VAV-133 VAV BOX OUTLET NODE", + "type": "System Node Temperature", + "topic": "devices/PNNL/BUILDING1/AHU1/VAV133/all", + "field": "ZoneDischargeAirTemperature", + "meta": { + "units": "degreesCentigrade", + "tz": "US/Pacific", + "type": "float" + } + }, + "Zone-VAV-136 VAV BOX OUTLET NODE System Node Temperature": { + "name": "Zone-VAV-136 VAV BOX OUTLET NODE", + "type": "System Node Temperature", + "topic": "devices/PNNL/BUILDING1/AHU1/VAV136/all", + "field": "ZoneDischargeAirTemperature", + "meta": { + "units": "degreesCentigrade", + "tz": "US/Pacific", + "type": "float" + } + }, + "Zone-VAV-142 VAV BOX OUTLET NODE System Node Temperature": { + "name": "Zone-VAV-142 VAV BOX OUTLET NODE", + "type": "System Node Temperature", + "topic": "devices/PNNL/BUILDING1/AHU1/VAV142/all", + "field": "ZoneDischargeAirTemperature", + "meta": { + "units": "degreesCentigrade", + "tz": "US/Pacific", + "type": "float" + } + }, + "Zone-VAV-143 VAV BOX OUTLET NODE System Node Temperature": { + "name": "Zone-VAV-143 VAV BOX OUTLET NODE", + "type": "System Node Temperature", + "topic": "devices/PNNL/BUILDING1/AHU1/VAV143/all", + "field": "ZoneDischargeAirTemperature", + "meta": { + "units": "degreesCentigrade", + "tz": "US/Pacific", + "type": "float" + } + }, + "Zone-VAV-150 VAV BOX OUTLET NODE System Node Temperature": { + "name": "Zone-VAV-150 VAV BOX OUTLET NODE", + "type": "System Node Temperature", + "topic": "devices/PNNL/BUILDING1/AHU1/VAV150/all", + "field": "ZoneDischargeAirTemperature", + "meta": { + "units": "degreesCentigrade", + "tz": "US/Pacific", + "type": "float" + } + }, + "Zone-VAV-100 VAV BOX OUTLET NODE System Node Temperature": { + "name": "Zone-VAV-CORRIDOR VAV BOX OUTLET NODE", + "type": "System Node Temperature", + "topic": "devices/PNNL/BUILDING1/AHU1/VAV100/all", + "field": "ZoneDischargeAirTemperature", + "meta": { + "units": "degreesCentigrade", + "tz": "US/Pacific", + "type": "float" + } + }, + "Zone-VAV-121 VAV BOX OUTLET NODE System Node Temperature": { + "name": "Zone-VAV-RESTROOM VAV BOX OUTLET NODE", + "type": "System Node Temperature", + "topic": "devices/PNNL/BUILDING1/AHU1/VAV121/all", + "field": "ZoneDischargeAirTemperature", + "meta": { + "units": "degreesCentigrade", + "tz": "US/Pacific", + "type": "float" + } + }, + "Zone-VAV-104 VAV BOX OUTLET NODE System Node Temperature": { + "name": "Zone-VAV-104 VAV BOX OUTLET NODE", + "type": "System Node Temperature", + "topic": "devices/PNNL/BUILDING1/AHU3/VAV104/all", + "field": "ZoneDischargeAirTemperature", + "meta": { + "units": "degreesCentigrade", + "tz": "US/Pacific", + "type": "float" + } + }, + "Zone-VAV-105 VAV BOX OUTLET NODE System Node Temperature": { + "name": "Zone-VAV-105 VAV BOX OUTLET NODE", + "type": "System Node Temperature", + "topic": "devices/PNNL/BUILDING1/AHU3/VAV105/all", + "field": "ZoneDischargeAirTemperature", + "meta": { + "units": "degreesCentigrade", + "tz": "US/Pacific", + "type": "float" + } + }, + "Zone-VAV-107 VAV BOX OUTLET NODE System Node Temperature": { + "name": "Zone-VAV-107 VAV BOX OUTLET NODE", + "type": "System Node Temperature", + "topic": "devices/PNNL/BUILDING1/AHU3/VAV107/all", + "field": "ZoneDischargeAirTemperature", + "meta": { + "units": "degreesCentigrade", + "tz": "US/Pacific", + "type": "float" + } + }, + "Zone-VAV-108 VAV BOX OUTLET NODE System Node Temperature": { + "name": "Zone-VAV-108 VAV BOX OUTLET NODE", + "type": "System Node Temperature", + "topic": "devices/PNNL/BUILDING1/AHU3/VAV108/all", + "field": "ZoneDischargeAirTemperature", + "meta": { + "units": "degreesCentigrade", + "tz": "US/Pacific", + "type": "float" + } + }, + "Zone-VAV-112 VAV BOX OUTLET NODE System Node Temperature": { + "name": "Zone-VAV-112 VAV BOX OUTLET NODE", + "type": "System Node Temperature", + "topic": "devices/PNNL/BUILDING1/AHU3/VAV112/all", + "field": "ZoneDischargeAirTemperature", + "meta": { + "units": "degreesCentigrade", + "tz": "US/Pacific", + "type": "float" + } + }, + "Zone-VAV-116 VAV BOX OUTLET NODE System Node Temperature": { + "name": "Zone-VAV-116 VAV BOX OUTLET NODE", + "type": "System Node Temperature", + "topic": "devices/PNNL/BUILDING1/AHU3/VAV116/all", + "field": "ZoneDischargeAirTemperature", + "meta": { + "units": "degreesCentigrade", + "tz": "US/Pacific", + "type": "float" + } + }, + "Zone-AHU-002 Direct Air Inlet Node Name System Node Temperature": { + "name": "Zone-AHU-002 Direct Air Inlet Node Name", + "type": "System Node Temperature", + "topic": "devices/PNNL/BUILDING1/AHU2/all", + "field": "ZoneDischargeAirTemperature", + "meta": { + "units": "degreesCentigrade", + "tz": "US/Pacific", + "type": "float" + } + }, + "Zone-AHU-004 Direct Air Inlet Node Name System Node Temperature": { + "name": "Zone-AHU-004 Direct Air Inlet Node Name", + "type": "System Node Temperature", + "topic": "devices/PNNL/BUILDING1/AHU4/all", + "field": "ZoneDischargeAirTemperature", + "meta": { + "units": "degreesCentigrade", + "tz": "US/Pacific", + "type": "float" + } + }, + "AHU-001 Supply Equipment Outlet Node System Node Temperature": { + "name": "AHU-001 Supply Equipment Outlet Node", + "type": "System Node Temperature", + "topic": "devices/PNNL/BUILDING1/AHU1/all", + "field": "DischargeAirTemperature", + "meta": { + "units": "degreesCentigrade", + "tz": "US/Pacific", + "type": "float" + } + }, + "AHU-001 Supply Equipment Outlet Node System Node Mass Flow Rate": { + "name": "AHU-001 Supply Equipment Outlet Node", + "type": "System Node Mass Flow Rate", + "topic": "devices/PNNL/BUILDING1/AHU1/all", + "field": "SupplyAirFlow", + "meta": { + "units": "cubicMetersPerSecond", + "tz": "US/Pacific", + "type": "float" + } + }, + "AHU-001 Supply Equipment Inlet Node System Node Temperature": { + "name": "AHU-001 Supply Equipment Inlet Node", + "type": "System Node Temperature", + "topic": "devices/PNNL/BUILDING1/AHU1/all", + "field": "ReturnAirTemperature", + "meta": { + "units": "degreesCentigrade", + "tz": "US/Pacific", + "type": "float" + } + }, + "AHU-001 OA CoolCNode System Node Temperature": { + "name": "AHU-001 OA CoolCNode", + "type": "System Node Temperature", + "topic": "devices/PNNL/BUILDING1/AHU1/all", + "field": "MixedAirTemperature", + "meta": { + "units": "degreesCentigrade", + "tz": "US/Pacific", + "type": "float" + } + }, + "AHU-002 Supply Equipment Outlet Node System Node Temperature": { + "name": "AHU-002 Supply Equipment Outlet Node", + "type": "System Node Temperature", + "topic": "devices/PNNL/BUILDING1/AHU2/all", + "field": "DischargeAirTemperature", + "meta": { + "units": "degreesCentigrade", + "tz": "US/Pacific", + "type": "float" + } + }, + "AHU-002 Supply Equipment Outlet Node System Node Mass Flow Rate": { + "name": "AHU-002 Supply Equipment Outlet Node", + "type": "System Node Mass Flow Rate", + "topic": "devices/PNNL/BUILDING1/AHU2/all", + "field": "ZoneAirFlow", + "meta": { + "units": "cubicMetersPerSecond", + "tz": "US/Pacific", + "type": "float" + } + }, + "AHU-002 Supply Equipment Inlet Node System Node Temperature": { + "name": "AHU-002 Supply Equipment Inlet Node", + "type": "System Node Temperature", + "topic": "devices/PNNL/BUILDING1/AHU2/all", + "field": "ReturnAirTemperature", + "meta": { + "units": "degreesCentigrade", + "tz": "US/Pacific", + "type": "float" + } + }, + "AHU-002 OA CoolCNode System Node Temperature": { + "name": "AHU-002 OA CoolCNode", + "type": "System Node Temperature", + "topic": "devices/PNNL/BUILDING1/AHU2/all", + "field": "MixedAirTemperature", + "meta": { + "units": "degreesCentigrade", + "tz": "US/Pacific", + "type": "float" + } + }, + "AHU-003 Supply Equipment Outlet Node System Node Temperature": { + "name": "AHU-003 Supply Equipment Outlet Node", + "type": "System Node Temperature", + "topic": "devices/PNNL/BUILDING1/AHU3/all", + "field": "DischargeAirTemperature", + "meta": { + "units": "degreesCentigrade", + "tz": "US/Pacific", + "type": "float" + } + }, + "AHU-003 Supply Equipment Outlet Node System Node Mass Flow Rate": { + "name": "AHU-003 Supply Equipment Outlet Node", + "type": "System Node Mass Flow Rate", + "topic": "devices/PNNL/BUILDING1/AHU3/all", + "field": "SupplyAirFlow", + "meta": { + "units": "cubicMetersPerSecond", + "tz": "US/Pacific", + "type": "float" + } + }, + "AHU-003 Supply Equipment Inlet Node System Node Temperature": { + "name": "AHU-003 Supply Equipment Inlet Node", + "type": "System Node Temperature", + "topic": "devices/PNNL/BUILDING1/AHU3/all", + "field": "ReturnAirTemperature", + "meta": { + "units": "degreesCentigrade", + "tz": "US/Pacific", + "type": "float" + } + }, + "AHU-003 OA CoolCNode System Node Temperature": { + "name": "AHU-003 OA CoolCNode", + "type": "System Node Temperature", + "topic": "devices/PNNL/BUILDING1/AHU3/all", + "field": "MixedAirTemperature", + "meta": { + "units": "degreesCentigrade", + "tz": "US/Pacific", + "type": "float" + } + }, + "AHU-004 Supply Equipment Outlet Node System Node Temperature": { + "name": "AHU-004 Supply Equipment Outlet Node", + "type": "System Node Temperature", + "topic": "devices/PNNL/BUILDING1/AHU4/all", + "field": "DischargeAirTemperature", + "meta": { + "units": "degreesCentigrade", + "tz": "US/Pacific", + "type": "float" + } + }, + "AHU-004 Supply Equipment Outlet Node System Node Mass Flow Rate": { + "name": "AHU-004 Supply Equipment Outlet Node", + "type": "System Node Mass Flow Rate", + "topic": "devices/PNNL/BUILDING1/AHU4/all", + "field": "SupplyAirFlow", + "meta": { + "units": "cubicMetersPerSecond", + "tz": "US/Pacific", + "type": "float" + } + }, + "AHU-004 Supply Equipment Inlet Node System Node Temperature": { + "name": "AHU-004 Supply Equipment Inlet Node", + "type": "System Node Temperature", + "topic": "devices/PNNL/BUILDING1/AHU4/all", + "field": "ReturnAirTemperature", + "meta": { + "units": "degreesCentigrade", + "tz": "US/Pacific", + "type": "float" + } + }, + "AHU-004 OA CoolCNode System Node Temperature": { + "name": "AHU-004 OA CoolCNode", + "type": "System Node Temperature", + "topic": "devices/PNNL/BUILDING1/AHU4/all", + "field": "MixedAirTemperature", + "meta": { + "units": "degreesCentigrade", + "tz": "US/Pacific", + "type": "float" + } + }, + "HVACOperationSchd Schedule Value1": { + "name": "HVACOperationSchd", + "type": "Schedule Value", + "topic": "devices/PNNL/BUILDING1/AHU1/all", + "field": "SupplyFanStatus", + "meta": { + "units": "Enum", + "tz": "US/Pacific", + "type": "float" + } + }, + "HVACOperationSchd Schedule Value2": { + "name": "HVACOperationSchd", + "type": "Schedule Value", + "topic": "devices/PNNL/BUILDING1/AHU2/all", + "field": "SupplyFanStatus", + "meta": { + "units": "Enum", + "tz": "US/Pacific", + "type": "float" + } + }, + "HVACOperationSchd Schedule Value3": { + "name": "HVACOperationSchd", + "type": "Schedule Value", + "topic": "devices/PNNL/BUILDING1/AHU3/all", + "field": "SupplyFanStatus", + "meta": { + "units": "Enum", + "tz": "US/Pacific", + "type": "float" + } + }, + "HVACOperationSchd Schedule Value4": { + "name": "HVACOperationSchd", + "type": "Schedule Value", + "topic": "devices/PNNL/BUILDING1/AHU4/all", + "field": "SupplyFanStatus", + "meta": { + "units": "Enum", + "tz": "US/Pacific", + "type": "float" + } + }, + "ALWAYS ON Schedule Value VAVCorridor": { + "name": "ALWAYS ON", + "type": "Schedule Value", + "topic": "devices/PNNL/BUILDING1/AHU1/VAV100/all", + "field": "OccupancyMode", + "meta": { + "units": "None", + "tz": "US/Pacific", + "type": "float" + } + }, + "ALWAYS ON Schedule Value VAV102": { + "name": "ALWAYS ON", + "type": "Schedule Value", + "topic": "devices/PNNL/BUILDING1/AHU1/VAV102/all", + "field": "OccupancyMode", + "meta": { + "units": "None", + "tz": "US/Pacific", + "type": "float" + } + }, + "ALWAYS ON Schedule Value VAV118": { + "name": "ALWAYS ON", + "type": "Schedule Value", + "topic": "devices/PNNL/BUILDING1/AHU1/VAV118/all", + "field": "OccupancyMode", + "meta": { + "units": "None", + "tz": "US/Pacific", + "type": "float" + } + }, + "ALWAYS ON Schedule Value VAV119": { + "name": "ALWAYS ON", + "type": "Schedule Value", + "topic": "devices/PNNL/BUILDING1/AHU1/VAV119/all", + "field": "OccupancyMode", + "meta": { + "units": "None", + "tz": "US/Pacific", + "type": "float" + } + }, + "ALWAYS ON Schedule Value VAV120": { + "name": "ALWAYS ON", + "type": "Schedule Value", + "topic": "devices/PNNL/BUILDING1/AHU1/VAV120/all", + "field": "OccupancyMode", + "meta": { + "units": "None", + "tz": "US/Pacific", + "type": "float" + } + }, + "ALWAYS ON Schedule Value VAV123A": { + "name": "ALWAYS ON", + "type": "Schedule Value", + "topic": "devices/PNNL/BUILDING1/AHU1/VAV123A/all", + "field": "OccupancyMode", + "meta": { + "units": "None", + "tz": "US/Pacific", + "type": "float" + } + }, + "ALWAYS ON Schedule Value VAV123B": { + "name": "ALWAYS ON", + "type": "Schedule Value", + "topic": "devices/PNNL/BUILDING1/AHU1/VAV123B/all", + "field": "OccupancyMode", + "meta": { + "units": "None", + "tz": "US/Pacific", + "type": "float" + } + }, + "ALWAYS ON Schedule Value VAV127A": { + "name": "ALWAYS ON", + "type": "Schedule Value", + "topic": "devices/PNNL/BUILDING1/AHU1/VAV127A/all", + "field": "OccupancyMode", + "meta": { + "units": "None", + "tz": "US/Pacific", + "type": "float" + } + }, + "ALWAYS ON Schedule Value VAV127B": { + "name": "ALWAYS ON", + "type": "Schedule Value", + "topic": "devices/PNNL/BUILDING1/AHU1/VAV127B/all", + "field": "OccupancyMode", + "meta": { + "units": "None", + "tz": "US/Pacific", + "type": "float" + } + }, + "ALWAYS ON Schedule Value VAV129": { + "name": "ALWAYS ON", + "type": "Schedule Value", + "topic": "devices/PNNL/BUILDING1/AHU1/VAV129/all", + "field": "OccupancyMode", + "meta": { + "units": "None", + "tz": "US/Pacific", + "type": "float" + } + }, + "ALWAYS ON Schedule Value VAV131": { + "name": "ALWAYS ON", + "type": "Schedule Value", + "topic": "devices/PNNL/BUILDING1/AHU1/VAV131/all", + "field": "OccupancyMode", + "meta": { + "units": "None", + "tz": "US/Pacific", + "type": "float" + } + }, + "ALWAYS ON Schedule Value VAV136": { + "name": "ALWAYS ON", + "type": "Schedule Value", + "topic": "devices/PNNL/BUILDING1/AHU1/VAV136/all", + "field": "OccupancyMode", + "meta": { + "units": "None", + "tz": "US/Pacific", + "type": "float" + } + }, + "ALWAYS ON Schedule Value VAV133": { + "name": "ALWAYS ON", + "type": "Schedule Value", + "topic": "devices/PNNL/BUILDING1/AHU1/VAV133/all", + "field": "OccupancyMode", + "meta": { + "units": "None", + "tz": "US/Pacific", + "type": "float" + } + }, + "ALWAYS ON Schedule Value VAV142": { + "name": "ALWAYS ON", + "type": "Schedule Value", + "topic": "devices/PNNL/BUILDING1/AHU1/VAV142/all", + "field": "OccupancyMode", + "meta": { + "units": "None", + "tz": "US/Pacific", + "type": "float" + } + }, + "ALWAYS ON Schedule Value VAV143": { + "name": "ALWAYS ON", + "type": "Schedule Value", + "topic": "devices/PNNL/BUILDING1/AHU1/VAV143/all", + "field": "OccupancyMode", + "meta": { + "units": "None", + "tz": "US/Pacific", + "type": "float" + } + }, + "ALWAYS ON Schedule Value VAV150": { + "name": "ALWAYS ON", + "type": "Schedule Value", + "topic": "devices/PNNL/BUILDING1/AHU1/VAV150/all", + "field": "OccupancyMode", + "meta": { + "units": "None", + "tz": "US/Pacific", + "type": "float" + } + }, + "ALWAYS ON Schedule Value VAV104": { + "name": "ALWAYS ON", + "type": "Schedule Value", + "topic": "devices/PNNL/BUILDING1/AHU3/VAV104/all", + "field": "OccupancyMode", + "meta": { + "units": "None", + "tz": "US/Pacific", + "type": "float" + } + }, + "ALWAYS ON Schedule Value VAV105": { + "name": "ALWAYS ON", + "type": "Schedule Value", + "topic": "devices/PNNL/BUILDING1/AHU3/VAV105/all", + "field": "OccupancyMode", + "meta": { + "units": "None", + "tz": "US/Pacific", + "type": "float" + } + }, + "ALWAYS ON Schedule Value VAV107": { + "name": "ALWAYS ON", + "type": "Schedule Value", + "topic": "devices/PNNL/BUILDING1/AHU3/VAV107/all", + "field": "OccupancyMode", + "meta": { + "units": "None", + "tz": "US/Pacific", + "type": "float" + } + }, + "ALWAYS ON Schedule Value VAV108": { + "name": "ALWAYS ON", + "type": "Schedule Value", + "topic": "devices/PNNL/BUILDING1/AHU3/VAV108/all", + "field": "OccupancyMode", + "meta": { + "units": "None", + "tz": "US/Pacific", + "type": "float" + } + }, + "ALWAYS ON Schedule Value VAV112": { + "name": "ALWAYS ON", + "type": "Schedule Value", + "topic": "devices/PNNL/BUILDING1/AHU3/VAV112/all", + "field": "OccupancyMode", + "meta": { + "units": "None", + "tz": "US/Pacific", + "type": "float" + } + }, + "ALWAYS ON Schedule Value VAV116": { + "name": "ALWAYS ON", + "type": "Schedule Value", + "topic": "devices/PNNL/BUILDING1/AHU3/VAV116/all", + "field": "OccupancyMode", + "meta": { + "units": "None", + "tz": "US/Pacific", + "type": "float" + } + }, + "ALWAYS ON Schedule Value VAVRESTROOM": { + "name": "ALWAYS ON", + "type": "Schedule Value", + "topic": "devices/PNNL/BUILDING1/AHU1/VAV121/all", + "field": "OccupancyMode", + "meta": { + "units": "None", + "tz": "US/Pacific", + "type": "boolean" + } + }, + "AHU 1 Power": { + "name": "AHU-001 Fan", + "type": "Fan Electric Power", + "topic": "devices/PNNL/BUILDING1/AHU1/all", + "field": "SupplyFanPower", + "meta": { + "units": "WATTS", + "tz": "US/Pacific", + "type": "float" + } + }, + "AHU 2 Power": { + "name": "AHU-002 Fan", + "type": "Fan Electric Power", + "topic": "devices/PNNL/BUILDING1/AHU2/all", + "field": "SupplyFanPower", + "meta": { + "units": "WATTS", + "tz": "US/Pacific", + "type": "float" + } + }, + "AHU 3 Power": { + "name": "AHU-003 Fan", + "type": "Fan Electric Power", + "topic": "devices/PNNL/BUILDING1/AHU3/all", + "field": "SupplyFanPower", + "meta": { + "units": "WATTS", + "tz": "US/Pacific", + "type": "float" + } + }, + "AHU 4 Power": { + "name": "AHU-004 Fan", + "type": "Fan Electric Power", + "topic": "devices/PNNL/BUILDING1/AHU4/all", + "field": "SupplyFanPower", + "meta": { + "units": "WATTS", + "tz": "US/Pacific", + "type": "float" + } + }, + "DISTRICT COOLING District Cooling Rate": { + "name": "DISTRICT COOLING", + "type": "District Cooling Rate", + "topic": "devices/PNNL/BUILDING1/Chiller/all", + "field": "Power", + "meta": { + "units": "WATTS", + "tz": "US/Pacific", + "type": "float" + } + }, + "Zone-VAV-102 Lights Electric Power": { + "name": "Zone-VAV-102 Lights", + "type": "Lights Electric Power", + "topic": "devices/PNNL/BUILDING1/LIGHTING/ZONE102/all", + "field": "Power", + "meta": { + "units": "WATTS", + "tz": "US/Pacific", + "type": "float" + } + }, + "Zone-VAV-104 Lights Electric Power": { + "name": "Zone-VAV-104 Lights", + "type": "Lights Electric Power", + "topic": "devices/PNNL/BUILDING1/LIGHTING/ZONE104/all", + "field": "Power", + "meta": { + "units": "WATTS", + "tz": "US/Pacific", + "type": "float" + } + }, + "Zone-VAV-105 Lights Electric Power": { + "name": "Zone-VAV-105 Lights", + "type": "Lights Electric Power", + "topic": "devices/PNNL/BUILDING1/LIGHTING/ZONE105/all", + "field": "Power", + "meta": { + "units": "WATTS", + "tz": "US/Pacific", + "type": "float" + } + }, + "Zone-VAV-107 Lights Electric Power": { + "name": "Zone-VAV-107 Lights", + "type": "Lights Electric Power", + "topic": "devices/PNNL/BUILDING1/LIGHTING/ZONE107/all", + "field": "Power", + "meta": { + "units": "WATTS", + "tz": "US/Pacific", + "type": "float" + } + }, + "Zone-VAV-108 Lights Electric Power": { + "name": "Zone-VAV-108 Lights", + "type": "Lights Electric Power", + "topic": "devices/PNNL/BUILDING1/LIGHTING/ZONE108/all", + "field": "Power", + "meta": { + "units": "WATTS", + "tz": "US/Pacific", + "type": "float" + } + }, + "Zone-VAV-112 Lights Electric Power": { + "name": "Zone-VAV-112 Lights", + "type": "Lights Electric Power", + "topic": "devices/PNNL/BUILDING1/LIGHTING/ZONE112/all", + "field": "Power", + "meta": { + "units": "WATTS", + "tz": "US/Pacific", + "type": "float" + } + }, + "Zone-VAV-116 Lights Electric Power": { + "name": "Zone-VAV-116 Lights", + "type": "Lights Electric Power", + "topic": "devices/PNNL/BUILDING1/LIGHTING/ZONE116/all", + "field": "Power", + "meta": { + "units": "WATTS", + "tz": "US/Pacific", + "type": "float" + } + }, + "Zone-VAV-118 Lights Electric Power": { + "name": "Zone-VAV-118 Lights", + "type": "Lights Electric Power", + "topic": "devices/PNNL/BUILDING1/LIGHTING/ZONE118/all", + "field": "Power", + "meta": { + "units": "WATTS", + "tz": "US/Pacific", + "type": "float" + } + }, + "Zone-VAV-119 Lights Electric Power": { + "name": "Zone-VAV-119 Lights", + "type": "Lights Electric Power", + "topic": "devices/PNNL/BUILDING1/LIGHTING/ZONE119/all", + "field": "Power", + "meta": { + "units": "WATTS", + "tz": "US/Pacific", + "type": "float" + } + }, + "Zone-VAV-120 Lights Electric Power": { + "name": "Zone-VAV-120 Lights", + "type": "Lights Electric Power", + "topic": "devices/PNNL/BUILDING1/LIGHTING/ZONE120/all", + "field": "Power", + "meta": { + "units": "WATTS", + "tz": "US/Pacific", + "type": "float" + } + }, + "Zone-VAV-123A Lights Electric Power": { + "name": "Zone-VAV-123A Lights", + "type": "Lights Electric Power", + "topic": "devices/PNNL/BUILDING1/LIGHTING/ZONE123A/all", + "field": "Power", + "meta": { + "units": "WATTS", + "tz": "US/Pacific", + "type": "float" + } + }, + "Zone-VAV-123B Lights Electric Power": { + "name": "Zone-VAV-123B Lights", + "type": "Lights Electric Power", + "topic": "devices/PNNL/BUILDING1/LIGHTING/ZONE123B/all", + "field": "Power", + "meta": { + "units": "WATTS", + "tz": "US/Pacific", + "type": "float" + } + }, + "Zone-VAV-127A Lights Electric Power": { + "name": "Zone-VAV-127A Lights", + "type": "Lights Electric Power", + "topic": "devices/PNNL/BUILDING1/LIGHTING/ZONE127A/all", + "field": "Power", + "meta": { + "units": "WATTS", + "tz": "US/Pacific", + "type": "float" + } + }, + "Zone-VAV-127B Lights Electric Power": { + "name": "Zone-VAV-127B Lights", + "type": "Lights Electric Power", + "topic": "devices/PNNL/BUILDING1/LIGHTING/ZONE127B/all", + "field": "Power", + "meta": { + "units": "WATTS", + "tz": "US/Pacific", + "type": "float" + } + }, + "Zone-VAV-129 Lights Electric Power": { + "name": "Zone-VAV-129 Lights", + "type": "Lights Electric Power", + "topic": "devices/PNNL/BUILDING1/LIGHTING/ZONE129/all", + "field": "Power", + "meta": { + "units": "WATTS", + "tz": "US/Pacific", + "type": "float" + } + }, + "Zone-VAV-131 Lights Electric Power": { + "name": "Zone-VAV-131 Lights", + "type": "Lights Electric Power", + "topic": "devices/PNNL/BUILDING1/LIGHTING/ZONE131/all", + "field": "Power", + "meta": { + "units": "WATTS", + "tz": "US/Pacific", + "type": "float" + } + }, + "Zone-VAV-133 Lights Electric Power": { + "name": "Zone-VAV-133 Lights", + "type": "Lights Electric Power", + "topic": "devices/PNNL/BUILDING1/LIGHTING/ZONE133/all", + "field": "Power", + "meta": { + "units": "WATTS", + "tz": "US/Pacific", + "type": "float" + } + }, + "Zone-VAV-136 Lights Electric Power": { + "name": "Zone-VAV-136 Lights", + "type": "Lights Electric Power", + "topic": "devices/PNNL/BUILDING1/LIGHTING/ZONE136/all", + "field": "Power", + "meta": { + "units": "WATTS", + "tz": "US/Pacific", + "type": "float" + } + }, + "Zone-VAV-142 Lights Electric Power": { + "name": "Zone-VAV-142 Lights", + "type": "Lights Electric Power", + "topic": "devices/PNNL/BUILDING1/LIGHTING/ZONE142/all", + "field": "Power", + "meta": { + "units": "WATTS", + "tz": "US/Pacific", + "type": "float" + } + }, + "Zone-VAV-143 Lights Electric Power": { + "name": "Zone-VAV-143 Lights", + "type": "Lights Electric Power", + "topic": "devices/PNNL/BUILDING1/LIGHTING/ZONE143/all", + "field": "Power", + "meta": { + "units": "WATTS", + "tz": "US/Pacific", + "type": "float" + } + }, + "Zone-VAV-150 Lights Electric Power": { + "name": "Zone-VAV-150 Lights", + "type": "Lights Electric Power", + "topic": "devices/PNNL/BUILDING1/LIGHTING/ZONE150/all", + "field": "Power", + "meta": { + "units": "WATTS", + "tz": "US/Pacific", + "type": "float" + } + }, + "BLDG LIGHT SCH Zone-VAV-102": { + "name": "BLDG LIGHT SCH Zone-VAV-102", + "type": "Schedule Value", + "topic": "devices/PNNL/BUILDING1/LIGHTING/ZONE102/all", + "field": "DimmingLevelOutput", + "meta": { + "units": "fraction", + "tz": "US/Pacific", + "type": "float" + } + }, + "BLDG LIGHT SCH Zone-VAV-104": { + "name": "BLDG LIGHT SCH Zone-VAV-104", + "type": "Schedule Value", + "topic": "devices/PNNL/BUILDING1/LIGHTING/ZONE104/all", + "field": "DimmingLevelOutput", + "meta": { + "units": "fraction", + "tz": "US/Pacific", + "type": "float" + } + }, + "BLDG LIGHT SCH Zone-VAV-105": { + "name": "BLDG LIGHT SCH Zone-VAV-105", + "type": "Schedule Value", + "topic": "devices/PNNL/BUILDING1/LIGHTING/ZONE105/all", + "field": "DimmingLevelOutput", + "meta": { + "units": "fraction", + "tz": "US/Pacific", + "type": "float" + } + }, + "BLDG LIGHT SCH Zone-VAV-107": { + "name": "BLDG LIGHT SCH Zone-VAV-107", + "type": "Schedule Value", + "topic": "devices/PNNL/BUILDING1/LIGHTING/ZONE107/all", + "field": "DimmingLevelOutput", + "meta": { + "units": "fraction", + "tz": "US/Pacific", + "type": "float" + } + }, + "BLDG LIGHT SCH Zone-VAV-108": { + "name": "BLDG LIGHT SCH Zone-VAV-108", + "type": "Schedule Value", + "topic": "devices/PNNL/BUILDING1/LIGHTING/ZONE108/all", + "field": "DimmingLevelOutput", + "meta": { + "units": "fraction", + "tz": "US/Pacific", + "type": "float" + } + }, + "BLDG LIGHT SCH Zone-VAV-112": { + "name": "BLDG LIGHT SCH Zone-VAV-112", + "type": "Schedule Value", + "topic": "devices/PNNL/BUILDING1/LIGHTING/ZONE112/all", + "field": "DimmingLevelOutput", + "meta": { + "units": "fraction", + "tz": "US/Pacific", + "type": "float" + } + }, + "BLDG LIGHT SCH Zone-VAV-116": { + "name": "BLDG LIGHT SCH Zone-VAV-116", + "type": "Schedule Value", + "topic": "devices/PNNL/BUILDING1/LIGHTING/ZONE116/all", + "field": "DimmingLevelOutput", + "meta": { + "units": "fraction", + "tz": "US/Pacific", + "type": "float" + } + }, + "BLDG LIGHT SCH Zone-VAV-118": { + "name": "BLDG LIGHT SCH Zone-VAV-118", + "type": "Schedule Value", + "topic": "devices/PNNL/BUILDING1/LIGHTING/ZONE118/all", + "field": "DimmingLevelOutput", + "meta": { + "units": "fraction", + "tz": "US/Pacific", + "type": "float" + } + }, + "BLDG LIGHT SCH Zone-VAV-119": { + "name": "BLDG LIGHT SCH Zone-VAV-119", + "type": "Schedule Value", + "topic": "devices/PNNL/BUILDING1/LIGHTING/ZONE119/all", + "field": "DimmingLevelOutput", + "meta": { + "units": "fraction", + "tz": "US/Pacific", + "type": "float" + } + }, + "BLDG LIGHT SCH Zone-VAV-120": { + "name": "BLDG LIGHT SCH Zone-VAV-120", + "type": "Schedule Value", + "topic": "devices/PNNL/BUILDING1/LIGHTING/ZONE120/all", + "field": "DimmingLevelOutput", + "meta": { + "units": "fraction", + "tz": "US/Pacific", + "type": "float" + } + }, + "BLDG LIGHT SCH Zone-VAV-123A": { + "name": "BLDG LIGHT SCH Zone-VAV-123A", + "type": "Schedule Value", + "topic": "devices/PNNL/BUILDING1/LIGHTING/ZONE123A/all", + "field": "DimmingLevelOutput", + "meta": { + "units": "fraction", + "tz": "US/Pacific", + "type": "float" + } + }, + "BLDG LIGHT SCH Zone-VAV-123B": { + "name": "BLDG LIGHT SCH Zone-VAV-123B", + "type": "Schedule Value", + "topic": "devices/PNNL/BUILDING1/LIGHTING/ZONE123B/all", + "field": "DimmingLevelOutput", + "meta": { + "units": "fraction", + "tz": "US/Pacific", + "type": "float" + } + }, + "BLDG LIGHT SCH Zone-VAV-127A": { + "name": "BLDG LIGHT SCH Zone-VAV-127A", + "type": "Schedule Value", + "topic": "devices/PNNL/BUILDING1/LIGHTING/ZONE127A/all", + "field": "DimmingLevelOutput", + "meta": { + "units": "fraction", + "tz": "US/Pacific", + "type": "float" + } + }, + "BLDG LIGHT SCH Zone-VAV-127B": { + "name": "BLDG LIGHT SCH Zone-VAV-127B", + "type": "Schedule Value", + "topic": "devices/PNNL/BUILDING1/LIGHTING/ZONE127B/all", + "field": "DimmingLevelOutput", + "meta": { + "units": "fraction", + "tz": "US/Pacific", + "type": "float" + } + }, + "BLDG LIGHT SCH Zone-VAV-129": { + "name": "BLDG LIGHT SCH Zone-VAV-129", + "type": "Schedule Value", + "topic": "devices/PNNL/BUILDING1/LIGHTING/ZONE129/all", + "field": "DimmingLevelOutput", + "meta": { + "units": "fraction", + "tz": "US/Pacific", + "type": "float" + } + }, + "BLDG LIGHT SCH Zone-VAV-131": { + "name": "BLDG LIGHT SCH Zone-VAV-131", + "type": "Schedule Value", + "topic": "devices/PNNL/BUILDING1/LIGHTING/ZONE131/all", + "field": "DimmingLevelOutput", + "meta": { + "units": "fraction", + "tz": "US/Pacific", + "type": "float" + } + }, + "BLDG LIGHT SCH Zone-VAV-133": { + "name": "BLDG LIGHT SCH Zone-VAV-133", + "type": "Schedule Value", + "topic": "devices/PNNL/BUILDING1/LIGHTING/ZONE133/all", + "field": "DimmingLevelOutput", + "meta": { + "units": "fraction", + "tz": "US/Pacific", + "type": "float" + } + }, + "BLDG LIGHT SCH Zone-VAV-136": { + "name": "BLDG LIGHT SCH Zone-VAV-136", + "type": "Schedule Value", + "topic": "devices/PNNL/BUILDING1/LIGHTING/ZONE136/all", + "field": "DimmingLevelOutput", + "meta": { + "units": "fraction", + "tz": "US/Pacific", + "type": "float" + } + }, + "BLDG LIGHT SCH Zone-VAV-142": { + "name": "BLDG LIGHT SCH Zone-VAV-142", + "type": "Schedule Value", + "topic": "devices/PNNL/BUILDING1/LIGHTING/ZONE142/all", + "field": "DimmingLevelOutput", + "meta": { + "units": "fraction", + "tz": "US/Pacific", + "type": "float" + } + }, + "BLDG LIGHT SCH Zone-VAV-143": { + "name": "BLDG LIGHT SCH Zone-VAV-143", + "type": "Schedule Value", + "topic": "devices/PNNL/BUILDING1/LIGHTING/ZONE143/all", + "field": "DimmingLevelOutput", + "meta": { + "units": "fraction", + "tz": "US/Pacific", + "type": "float" + } + }, + "BLDG LIGHT SCH Zone-VAV-150": { + "name": "BLDG LIGHT SCH Zone-VAV-150", + "type": "Schedule Value", + "topic": "devices/PNNL/BUILDING1/LIGHTING/ZONE150/all", + "field": "DimmingLevelOutput", + "meta": { + "units": "fraction", + "tz": "US/Pacific", + "type": "float" + } + }, + "CLGTEMPSETPOINT Zone-VAV-102": { + "name": "CLGTEMPSETPOINT Zone-VAV-102", + "type": "Schedule Value", + "topic": "devices/PNNL/BUILDING1/AHU1/VAV102/all", + "field": "ZoneCoolingTemperatureSetPoint", + "meta": { + "units": "degreesCentigrade", + "tz": "US/Pacific", + "type": "float" + } + }, + "CLGTEMPSETPOINT Zone-VAV-118": { + "name": "CLGTEMPSETPOINT Zone-VAV-118", + "type": "Schedule Value", + "topic": "devices/PNNL/BUILDING1/AHU1/VAV118/all", + "field": "ZoneCoolingTemperatureSetPoint", + "meta": { + "units": "degreesCentigrade", + "tz": "US/Pacific", + "type": "float" + } + }, + "CLGTEMPSETPOINT Zone-VAV-119": { + "name": "CLGTEMPSETPOINT Zone-VAV-119", + "type": "Schedule Value", + "topic": "devices/PNNL/BUILDING1/AHU1/VAV119/all", + "field": "ZoneCoolingTemperatureSetPoint", + "meta": { + "units": "degreesCentigrade", + "tz": "US/Pacific", + "type": "float" + } + }, + "CLGTEMPSETPOINT Zone-VAV-120": { + "name": "CLGTEMPSETPOINT Zone-VAV-120", + "type": "Schedule Value", + "topic": "devices/PNNL/BUILDING1/AHU1/VAV120/all", + "field": "ZoneCoolingTemperatureSetPoint", + "meta": { + "units": "degreesCentigrade", + "tz": "US/Pacific", + "type": "float" + } + }, + "CLGTEMPSETPOINT Zone-VAV-123A": { + "name": "CLGTEMPSETPOINT Zone-VAV-123A", + "type": "Schedule Value", + "topic": "devices/PNNL/BUILDING1/AHU1/VAV123A/all", + "field": "ZoneCoolingTemperatureSetPoint", + "meta": { + "units": "degreesCentigrade", + "tz": "US/Pacific", + "type": "float" + } + }, + "CLGTEMPSETPOINT Zone-VAV-123B": { + "name": "CLGTEMPSETPOINT Zone-VAV-123B", + "type": "Schedule Value", + "topic": "devices/PNNL/BUILDING1/AHU1/VAV123B/all", + "field": "ZoneCoolingTemperatureSetPoint", + "meta": { + "units": "degreesCentigrade", + "tz": "US/Pacific", + "type": "float" + } + }, + "CLGTEMPSETPOINT Zone-VAV-127A": { + "name": "CLGTEMPSETPOINT Zone-VAV-127A", + "type": "Schedule Value", + "topic": "devices/PNNL/BUILDING1/AHU1/VAV127A/all", + "field": "ZoneCoolingTemperatureSetPoint", + "meta": { + "units": "degreesCentigrade", + "tz": "US/Pacific", + "type": "float" + } + }, + "CLGTEMPSETPOINT Zone-VAV-127B": { + "name": "CLGTEMPSETPOINT Zone-VAV-127B", + "type": "Schedule Value", + "topic": "devices/PNNL/BUILDING1/AHU1/VAV127B/all", + "field": "ZoneCoolingTemperatureSetPoint", + "meta": { + "units": "degreesCentigrade", + "tz": "US/Pacific", + "type": "float" + } + }, + "CLGTEMPSETPOINT Zone-VAV-129": { + "name": "CLGTEMPSETPOINT Zone-VAV-129", + "type": "Schedule Value", + "topic": "devices/PNNL/BUILDING1/AHU1/VAV129/all", + "field": "ZoneCoolingTemperatureSetPoint", + "meta": { + "units": "degreesCentigrade", + "tz": "US/Pacific", + "type": "float" + } + }, + "CLGTEMPSETPOINT Zone-VAV-131": { + "name": "CLGTEMPSETPOINT Zone-VAV-131", + "type": "Schedule Value", + "topic": "devices/PNNL/BUILDING1/AHU1/VAV131/all", + "field": "ZoneCoolingTemperatureSetPoint", + "meta": { + "units": "degreesCentigrade", + "tz": "US/Pacific", + "type": "float" + } + }, + "CLGTEMPSETPOINT Zone-VAV-133": { + "name": "CLGTEMPSETPOINT Zone-VAV-133", + "type": "Schedule Value", + "topic": "devices/PNNL/BUILDING1/AHU1/VAV133/all", + "field": "ZoneCoolingTemperatureSetPoint", + "meta": { + "units": "degreesCentigrade", + "tz": "US/Pacific", + "type": "float" + } + }, + "CLGTEMPSETPOINT Zone-VAV-136": { + "name": "CLGTEMPSETPOINT Zone-VAV-136", + "type": "Schedule Value", + "topic": "devices/PNNL/BUILDING1/AHU1/VAV136/all", + "field": "ZoneCoolingTemperatureSetPoint", + "meta": { + "units": "degreesCentigrade", + "tz": "US/Pacific", + "type": "float" + } + }, + "CLGTEMPSETPOINT Zone-VAV-142": { + "name": "CLGTEMPSETPOINT Zone-VAV-142", + "type": "Schedule Value", + "topic": "devices/PNNL/BUILDING1/AHU1/VAV142/all", + "field": "ZoneCoolingTemperatureSetPoint", + "meta": { + "units": "degreesCentigrade", + "tz": "US/Pacific", + "type": "float" + } + }, + "CLGTEMPSETPOINT Zone-VAV-143": { + "name": "CLGTEMPSETPOINT Zone-VAV-143", + "type": "Schedule Value", + "topic": "devices/PNNL/BUILDING1/AHU1/VAV143/all", + "field": "ZoneCoolingTemperatureSetPoint", + "meta": { + "units": "degreesCentigrade", + "tz": "US/Pacific", + "type": "float" + } + }, + "CLGTEMPSETPOINT Zone-VAV-150": { + "name": "CLGTEMPSETPOINT Zone-VAV-150", + "type": "Schedule Value", + "topic": "devices/PNNL/BUILDING1/AHU1/VAV150/all", + "field": "ZoneCoolingTemperatureSetPoint", + "meta": { + "units": "degreesCentigrade", + "tz": "US/Pacific", + "type": "float" + } + }, + "CLGTEMPSETPOINT Zone-VAV-100": { + "name": "CLGTEMPSETPOINT Zone-VAV-CORRIDOR", + "type": "Schedule Value", + "topic": "devices/PNNL/BUILDING1/AHU1/VAV100/all", + "field": "ZoneCoolingTemperatureSetPoint", + "meta": { + "units": "degreesCentigrade", + "tz": "US/Pacific", + "type": "float" + } + }, + "CLGTEMPSETPOINT Zone-VAV-121": { + "name": "CLGTEMPSETPOINT Zone-VAV-RESTROOM", + "type": "Schedule Value", + "topic": "devices/PNNL/BUILDING1/AHU1/VAV121/all", + "field": "ZoneCoolingTemperatureSetPoint", + "meta": { + "units": "degreesCentigrade", + "tz": "US/Pacific", + "type": "float" + } + }, + "CLGTEMPSETPOINT Zone-VAV-104": { + "name": "CLGTEMPSETPOINT Zone-VAV-104", + "type": "Schedule Value", + "topic": "devices/PNNL/BUILDING1/AHU3/VAV104/all", + "field": "ZoneCoolingTemperatureSetPoint", + "meta": { + "units": "degreesCentigrade", + "tz": "US/Pacific", + "type": "float" + } + }, + "CLGTEMPSETPOINT Zone-VAV-105": { + "name": "CLGTEMPSETPOINT Zone-VAV-105", + "type": "Schedule Value", + "topic": "devices/PNNL/BUILDING1/AHU3/VAV105/all", + "field": "ZoneCoolingTemperatureSetPoint", + "meta": { + "units": "degreesCentigrade", + "tz": "US/Pacific", + "type": "float" + } + }, + "CLGTEMPSETPOINT Zone-VAV-107": { + "name": "CLGTEMPSETPOINT Zone-VAV-107", + "type": "Schedule Value", + "topic": "devices/PNNL/BUILDING1/AHU3/VAV107/all", + "field": "ZoneCoolingTemperatureSetPoint", + "meta": { + "units": "degreesCentigrade", + "tz": "US/Pacific", + "type": "float" + } + }, + "CLGTEMPSETPOINT Zone-VAV-108": { + "name": "CLGTEMPSETPOINT Zone-VAV-108", + "type": "Schedule Value", + "topic": "devices/PNNL/BUILDING1/AHU3/VAV108/all", + "field": "ZoneCoolingTemperatureSetPoint", + "meta": { + "units": "degreesCentigrade", + "tz": "US/Pacific", + "type": "float" + } + }, + "CLGTEMPSETPOINT Zone-VAV-112": { + "name": "CLGTEMPSETPOINT Zone-VAV-112", + "type": "Schedule Value", + "topic": "devices/PNNL/BUILDING1/AHU3/VAV112/all", + "field": "ZoneCoolingTemperatureSetPoint", + "meta": { + "units": "degreesCentigrade", + "tz": "US/Pacific", + "type": "float" + } + }, + "CLGTEMPSETPOINT Zone-VAV-116": { + "name": "CLGTEMPSETPOINT Zone-VAV-116", + "type": "Schedule Value", + "topic": "devices/PNNL/BUILDING1/AHU3/VAV116/all", + "field": "ZoneCoolingTemperatureSetPoint", + "meta": { + "units": "degreesCentigrade", + "tz": "US/Pacific", + "type": "float" + } + }, + "CLGTEMPSETPOINT Zone-AHU-002": { + "name": "CLGTEMPSETPOINT Zone-AHU-002", + "type": "Schedule Value", + "topic": "devices/PNNL/BUILDING1/AHU2/all", + "field": "ZoneCoolingTemperatureSetPoint", + "meta": { + "units": "degreesCentigrade", + "tz": "US/Pacific", + "type": "float" + } + }, + "CLGTEMPSETPOINT Zone-AHU-004": { + "name": "CLGTEMPSETPOINT Zone-AHU-004", + "type": "Schedule Value", + "topic": "devices/PNNL/BUILDING1/AHU4/all", + "field": "ZoneCoolingTemperatureSetPoint", + "meta": { + "units": "degreesCentigrade", + "tz": "US/Pacific", + "type": "float" + } + }, + "Whole Building Power": { + "name": "Whole Building", + "type": "Facility Total Electric Demand Power", + "topic": "devices/PNNL/BUILDING1/METERS/all", + "field": "WholeBuildingPower", + "publish_last": true, + "meta": { + "units": "WATTS", + "tz": "US/Pacific", + "type": "float" + } + }, + "currentMonthV" : { + "name" : "EMS", + "type" : "currentMonthV" + }, + "currentDayOfMonthV" : { + "name" : "EMS", + "type" : "currentDayOfMonthV" + }, + "currentHourV" : { + "name" : "EMS", + "type" : "currentHourV" + }, + "currentMinuteV" : { + "name" : "EMS", + "type" : "currentMinuteV" + }, + "Dynamic_default1" : { + "name" : "BLDG LIGHT SCH bak", + "type" : "Schedule Value", + "default" : "BLDG LIGHT SCH Zone-VAV-102" + }, + "Dynamic_default2" : { + "name" : "BLDG LIGHT SCH bak", + "type" : "Schedule Value", + "default" : "BLDG LIGHT SCH Zone-VAV-104" + }, + "Dynamic_default3" : { + "name" : "BLDG LIGHT SCH bak", + "type" : "Schedule Value", + "default" : "BLDG LIGHT SCH Zone-VAV-105" + }, + "Dynamic_default4" : { + "name" : "BLDG LIGHT SCH bak", + "type" : "Schedule Value", + "default" : "BLDG LIGHT SCH Zone-VAV-107" + }, + "Dynamic_default5" : { + "name" : "BLDG LIGHT SCH bak", + "type" : "Schedule Value", + "default" : "BLDG LIGHT SCH Zone-VAV-108" + }, + "Dynamic_default6" : { + "name" : "BLDG LIGHT SCH bak", + "type" : "Schedule Value", + "default" : "BLDG LIGHT SCH Zone-VAV-112" + }, + "Dynamic_default7" : { + "name" : "BLDG LIGHT SCH bak", + "type" : "Schedule Value", + "default" : "BLDG LIGHT SCH Zone-VAV-116" + }, + "Dynamic_default8" : { + "name" : "BLDG LIGHT SCH bak", + "type" : "Schedule Value", + "default" : "BLDG LIGHT SCH Zone-VAV-118" + }, + "Dynamic_default9" : { + "name" : "BLDG LIGHT SCH bak", + "type" : "Schedule Value", + "default" : "BLDG LIGHT SCH Zone-VAV-119" + }, + "Dynamic_default10" : { + "name" : "BLDG LIGHT SCH bak", + "type" : "Schedule Value", + "default" : "BLDG LIGHT SCH Zone-VAV-120" + }, + "Dynamic_default11" : { + "name" : "BLDG LIGHT SCH bak", + "type" : "Schedule Value", + "default" : "BLDG LIGHT SCH Zone-VAV-123A" + }, + "Dynamic_default12" : { + "name" : "BLDG LIGHT SCH bak", + "type" : "Schedule Value", + "default" : "BLDG LIGHT SCH Zone-VAV-123B" + }, + "Dynamic_default13" : { + "name" : "BLDG LIGHT SCH bak", + "type" : "Schedule Value", + "default" : "BLDG LIGHT SCH Zone-VAV-127A" + }, + "Dynamic_default14" : { + "name" : "BLDG LIGHT SCH bak", + "type" : "Schedule Value", + "default" : "BLDG LIGHT SCH Zone-VAV-127B" + }, + "Dynamic_default15" : { + "name" : "BLDG LIGHT SCH bak", + "type" : "Schedule Value", + "default" : "BLDG LIGHT SCH Zone-VAV-129" + }, + "Dynamic_default16" : { + "name" : "BLDG LIGHT SCH bak", + "type" : "Schedule Value", + "default" : "BLDG LIGHT SCH Zone-VAV-131" + }, + "Dynamic_default17" : { + "name" : "BLDG LIGHT SCH bak", + "type" : "Schedule Value", + "default" : "BLDG LIGHT SCH Zone-VAV-133" + }, + "Dynamic_default18" : { + "name" : "BLDG LIGHT SCH bak", + "type" : "Schedule Value", + "default" : "BLDG LIGHT SCH Zone-VAV-142" + }, + "Dynamic_default19" : { + "name" : "BLDG LIGHT SCH bak", + "type" : "Schedule Value", + "default" : "BLDG LIGHT SCH Zone-VAV-143" + }, + "Dynamic_default20" : { + "name" : "BLDG LIGHT SCH bak", + "type" : "Schedule Value", + "default" : "BLDG LIGHT SCH Zone-VAV-150" + }, + "Dynamic_default21" : { + "name" : "BLDG LIGHT SCH bak", + "type" : "Schedule Value", + "default" : "BLDG LIGHT SCH Zone-VAV-136" + }, + "Dynamic_VAVdefault1" : { + "name" : "CLGTEMPSETPOINT", + "type" : "Schedule Value", + "default" : "CLGTEMPSETPOINT Zone-VAV-102" + }, + "Dynamic_VAVdefault2" : { + "name" : "CLGTEMPSETPOINT", + "type" : "Schedule Value", + "default" : "CLGTEMPSETPOINT Zone-VAV-118" + }, + "Dynamic_VAVdefault3" : { + "name" : "CLGTEMPSETPOINT", + "type" : "Schedule Value", + "default" : "CLGTEMPSETPOINT Zone-VAV-119" + }, + "Dynamic_VAVdefault4" : { + "name" : "CLGTEMPSETPOINT", + "type" : "Schedule Value", + "default" : "CLGTEMPSETPOINT Zone-VAV-120" + }, + "Dynamic_VAVdefault5" : { + "name" : "CLGTEMPSETPOINT", + "type" : "Schedule Value", + "default" : "CLGTEMPSETPOINT Zone-VAV-123A" + }, + "Dynamic_VAVdefault6" : { + "name" : "CLGTEMPSETPOINT", + "type" : "Schedule Value", + "default" : "CLGTEMPSETPOINT Zone-VAV-123B" + }, + "Dynamic_VAVdefault7" : { + "name" : "CLGTEMPSETPOINT", + "type" : "Schedule Value", + "default" : "CLGTEMPSETPOINT Zone-VAV-127A" + }, + "Dynamic_VAVdefault8" : { + "name" : "CLGTEMPSETPOINT", + "type" : "Schedule Value", + "default" : "CLGTEMPSETPOINT Zone-VAV-127B" + }, + "Dynamic_VAVdefault9" : { + "name" : "CLGTEMPSETPOINT", + "type" : "Schedule Value", + "default" : "CLGTEMPSETPOINT Zone-VAV-129" + }, + "Dynamic_VAVdefault10" : { + "name" : "CLGTEMPSETPOINT", + "type" : "Schedule Value", + "default" : "CLGTEMPSETPOINT Zone-VAV-131" + }, + "Dynamic_VAVdefault11" : { + "name" : "CLGTEMPSETPOINT", + "type" : "Schedule Value", + "default" : "CLGTEMPSETPOINT Zone-VAV-133" + }, + "Dynamic_VAVdefault12" : { + "name" : "CLGTEMPSETPOINT", + "type" : "Schedule Value", + "default" : "CLGTEMPSETPOINT Zone-VAV-136" + }, + "Dynamic_VAVdefault13" : { + "name" : "CLGTEMPSETPOINT", + "type" : "Schedule Value", + "default" : "CLGTEMPSETPOINT Zone-VAV-142" + }, + "Dynamic_VAVdefault14" : { + "name" : "CLGTEMPSETPOINT", + "type" : "Schedule Value", + "default" : "CLGTEMPSETPOINT Zone-VAV-143" + }, + "Dynamic_VAVdefault15" : { + "name" : "CLGTEMPSETPOINT", + "type" : "Schedule Value", + "default" : "CLGTEMPSETPOINT Zone-VAV-150" + }, + "Dynamic_VAVdefault16" : { + "name" : "CLGTEMPSETPOINT", + "type" : "Schedule Value", + "default" : "CLGTEMPSETPOINT Zone-VAV-CORRIDOR" + }, + "Dynamic_VAVdefault17" : { + "name" : "CLGTEMPSETPOINT", + "type" : "Schedule Value", + "default" : "CLGTEMPSETPOINT Zone-VAV-RESTROOM" + }, + "Dynamic_VAVdefault18" : { + "name" : "CLGTEMPSETPOINT", + "type" : "Schedule Value", + "default" : "CLGTEMPSETPOINT Zone-VAV-104" + }, + "Dynamic_VAVdefault19" : { + "name" : "CLGTEMPSETPOINT", + "type" : "Schedule Value", + "default" : "CLGTEMPSETPOINT Zone-VAV-105" + }, + "Dynamic_VAVdefault20" : { + "name" : "CLGTEMPSETPOINT", + "type" : "Schedule Value", + "default" : "CLGTEMPSETPOINT Zone-VAV-107" + }, + "Dynamic_VAVdefault21" : { + "name" : "CLGTEMPSETPOINT", + "type" : "Schedule Value", + "default" : "CLGTEMPSETPOINT Zone-VAV-108" + }, + "Dynamic_VAVdefault22" : { + "name" : "CLGTEMPSETPOINT", + "type" : "Schedule Value", + "default" : "CLGTEMPSETPOINT Zone-VAV-112" + }, + "Dynamic_VAVdefault23" : { + "name" : "CLGTEMPSETPOINT", + "type" : "Schedule Value", + "default" : "CLGTEMPSETPOINT Zone-VAV-116" + }, + "Dynamic_VAVdefault24" : { + "name" : "CLGTEMPSETPOINT", + "type" : "Schedule Value", + "default" : "CLGTEMPSETPOINT Zone-AHU-002" + }, + "Dynamic_VAVdefault25" : { + "name" : "CLGTEMPSETPOINT", + "type" : "Schedule Value", + "default" : "CLGTEMPSETPOINT Zone-AHU-004" + }, + "operation": { + "name": "HVACOperationSchd", + "type": "Schedule Value", + "field": "operation" + } + } +} diff --git a/examples/EnergyPlusAgent/ep_building1.yml b/examples/EnergyPlusAgent/ep_building1.yml new file mode 100644 index 0000000000..2bcf71276c --- /dev/null +++ b/examples/EnergyPlusAgent/ep_building1.yml @@ -0,0 +1,3018 @@ +# Config parameters for setting up EnergyPlus agent +properties: + identity: platform.actuator + model: ~/git/sim_volttron/volttron/eplus/BUILDING1.idf + weather: ~/git/sim_volttron/volttron/eplus/USA_WA_Pasco-Tri.Cities.AP.727845_TMY3.epw + bcvtb_home: ~/git/sim_volttron/volttron/bcvtb + size: 40960 + startmonth: 8 + startday: 1 + endmonth: 8 + endday: 31 + timestep: 60 + time_scale: 6 + cosimulation_sync: true + real_time_periodic: true + co_sim_timestep: 5 + real_time_flag: false + +# configuration for subscribing to EnergyPlus simulation +outputs: + # List of subscription information, typically contains + # - energyplus point name + # - type + # - publication topic for VOLTTRON (optional) to republish on VOLTTRON message bus + # - energyplus 'field' name + # - metadata information about the output + - sim_topic: ENVIRONMENT Site Outdoor Air Drybulb Temperature1 + name: ENVIRONMENT + type: Site Outdoor Air Drybulb Temperature + topic: devices/PNNL/BUILDING1/AHU1/all + field: OutdoorAirTemperature + meta: + units: degreesCentigrade + tz: US/Pacific + type: float + + - sim_topic: ENVIRONMENT Site Outdoor Air Drybulb Temperature2 + name: ENVIRONMENT + type: Site Outdoor Air Drybulb Temperature + topic: devices/PNNL/BUILDING1/AHU2/all + field: OutdoorAirTemperature + meta: + units: degreesCentigrade + tz: US/Pacific + type: float + + - sim_topic: ENVIRONMENT Site Outdoor Air Drybulb Temperature3 + name: ENVIRONMENT + type: Site Outdoor Air Drybulb Temperature + topic: devices/PNNL/BUILDING1/AHU3/all + field: OutdoorAirTemperature + meta: + units: degreesCentigrade + tz: US/Pacific + type: float + + - sim_topic: ENVIRONMENT Site Outdoor Air Drybulb Temperature4 + name: ENVIRONMENT + type: Site Outdoor Air Drybulb Temperature + topic: devices/PNNL/BUILDING1/AHU4/all + field: OutdoorAirTemperature + meta: + units: degreesCentigrade + tz: US/Pacific + type: float + + - sim_topic: Zone-VAV-102 Zone Mean Air Temperature + name: Zone-VAV-102 + type: Zone Mean Air Temperature + topic: devices/PNNL/BUILDING1/AHU1/VAV102/all + field: ZoneTemperature + meta: + units: degreesCentigrade + tz: US/Pacific + type: float + + - sim_topic: Zone-VAV-118 Zone Mean Air Temperature + name: Zone-VAV-118 + type: Zone Mean Air Temperature + topic: devices/PNNL/BUILDING1/AHU1/VAV118/all + field: ZoneTemperature + meta: + units: degreesCentigrade + tz: US/Pacific + type: float + + - sim_topic: Zone-VAV-119 Zone Mean Air Temperature + name: Zone-VAV-119 + type: Zone Mean Air Temperature + topic: devices/PNNL/BUILDING1/AHU1/VAV119/all + field: ZoneTemperature + meta: + units: degreesCentigrade + tz: US/Pacific + type: float + + + - sim_topic: Zone-VAV-120 Zone Mean Air Temperature + name: Zone-VAV-120 + type: Zone Mean Air Temperature + topic: devices/PNNL/BUILDING1/AHU1/VAV120/all + field: ZoneTemperature + meta: + units: degreesCentigrade + tz: US/Pacific + type: float + + + - sim_topic: Zone-VAV-123A Zone Mean Air Temperature + name: Zone-VAV-123A + type: Zone Mean Air Temperature + topic: devices/PNNL/BUILDING1/AHU1/VAV123A/all + field: ZoneTemperature + meta: + units: degreesCentigrade + tz: US/Pacific + type: float + + + - sim_topic: Zone-VAV-123B Zone Mean Air Temperature + name: Zone-VAV-123B + type: Zone Mean Air Temperature + topic: devices/PNNL/BUILDING1/AHU1/VAV123B/all + field: ZoneTemperature + meta: + units: degreesCentigrade + tz: US/Pacific + type: float + + + - sim_topic: Zone-VAV-127A Zone Mean Air Temperature + name: Zone-VAV-127A + type: Zone Mean Air Temperature + topic: devices/PNNL/BUILDING1/AHU1/VAV127A/all + field: ZoneTemperature + meta: + units: degreesCentigrade + tz: US/Pacific + type: float + + + - sim_topic: Zone-VAV-127B Zone Mean Air Temperature + name: Zone-VAV-127B + type: Zone Mean Air Temperature + topic: devices/PNNL/BUILDING1/AHU1/VAV127B/all + field: ZoneTemperature + meta: + units: degreesCentigrade + tz: US/Pacific + type: float + + + - sim_topic: Zone-VAV-129 Zone Mean Air Temperature + name: Zone-VAV-129 + type: Zone Mean Air Temperature + topic: devices/PNNL/BUILDING1/AHU1/VAV129/all + field: ZoneTemperature + meta: + units: degreesCentigrade + tz: US/Pacific + type: float + + + - sim_topic: Zone-VAV-131 Zone Mean Air Temperature + name: Zone-VAV-131 + type: Zone Mean Air Temperature + topic: devices/PNNL/BUILDING1/AHU1/VAV131/all + field: ZoneTemperature + meta: + units: degreesCentigrade + tz: US/Pacific + type: float + + + - sim_topic: Zone-VAV-133 Zone Mean Air Temperature + name: Zone-VAV-133 + type: Zone Mean Air Temperature + topic: devices/PNNL/BUILDING1/AHU1/VAV133/all + field: ZoneTemperature + meta: + units: degreesCentigrade + tz: US/Pacific + type: float + + + - sim_topic: Zone-VAV-136 Zone Mean Air Temperature + name: Zone-VAV-136 + type: Zone Mean Air Temperature + topic: devices/PNNL/BUILDING1/AHU1/VAV136/all + field: ZoneTemperature + meta: + units: degreesCentigrade + tz: US/Pacific + type: float + + + - sim_topic: Zone-VAV-142 Zone Mean Air Temperature + name: Zone-VAV-142 + type: Zone Mean Air Temperature + topic: devices/PNNL/BUILDING1/AHU1/VAV142/all + field: ZoneTemperature + meta: + units: degreesCentigrade + tz: US/Pacific + type: float + + + - sim_topic: Zone-VAV-143 Zone Mean Air Temperature + name: Zone-VAV-143 + type: Zone Mean Air Temperature + topic: devices/PNNL/BUILDING1/AHU1/VAV143/all + field: ZoneTemperature + meta: + units: degreesCentigrade + tz: US/Pacific + type: float + + + - sim_topic: Zone-VAV-150 Zone Mean Air Temperature + name: Zone-VAV-150 + type: Zone Mean Air Temperature + topic: devices/PNNL/BUILDING1/AHU1/VAV150/all + field: ZoneTemperature + meta: + units: degreesCentigrade + tz: US/Pacific + type: float + + + - sim_topic: Zone-VAV-100 Zone Mean Air Temperature + name: Zone-VAV-CORRIDOR + type: Zone Mean Air Temperature + topic: devices/PNNL/BUILDING1/AHU1/VAV100/all + field: ZoneTemperature + meta: + units: degreesCentigrade + tz: US/Pacific + type: float + + + - sim_topic: Zone-VAV-121 Zone Mean Air Temperature + name: Zone-VAV-RESTROOM + type: Zone Mean Air Temperature + topic: devices/PNNL/BUILDING1/AHU1/VAV121/all + field: ZoneTemperature + meta: + units: degreesCentigrade + tz: US/Pacific + type: float + + + - sim_topic: Zone-VAV-104 Zone Mean Air Temperature + name: Zone-VAV-104 + type: Zone Mean Air Temperature + topic: devices/PNNL/BUILDING1/AHU3/VAV104/all + field: ZoneTemperature + meta: + units: degreesCentigrade + tz: US/Pacific + type: float + + + - sim_topic: Zone-VAV-105 Zone Mean Air Temperature + name: Zone-VAV-105 + type: Zone Mean Air Temperature + topic: devices/PNNL/BUILDING1/AHU3/VAV105/all + field: ZoneTemperature + meta: + units: degreesCentigrade + tz: US/Pacific + type: float + + + - sim_topic: Zone-VAV-107 Zone Mean Air Temperature + name: Zone-VAV-107 + type: Zone Mean Air Temperature + topic: devices/PNNL/BUILDING1/AHU3/VAV107/all + field: ZoneTemperature + meta: + units: degreesCentigrade + tz: US/Pacific + type: float + + + - sim_topic: Zone-VAV-108 Zone Mean Air Temperature + name: Zone-VAV-108 + type: Zone Mean Air Temperature + topic: devices/PNNL/BUILDING1/AHU3/VAV108/all + field: ZoneTemperature + meta: + units: degreesCentigrade + tz: US/Pacific + type: float + + + - sim_topic: Zone-VAV-112 Zone Mean Air Temperature + name: Zone-VAV-112 + type: Zone Mean Air Temperature + topic: devices/PNNL/BUILDING1/AHU3/VAV112/all + field: ZoneTemperature + meta: + units: degreesCentigrade + tz: US/Pacific + type: float + + + - sim_topic: Zone-VAV-116 Zone Mean Air Temperature + name: Zone-VAV-116 + type: Zone Mean Air Temperature + topic: devices/PNNL/BUILDING1/AHU3/VAV116/all + field: ZoneTemperature + meta: + units: degreesCentigrade + tz: US/Pacific + type: float + + + - sim_topic: Zone-AHU-002 Zone Mean Air Temperature + name: Zone-AHU-002 + type: Zone Mean Air Temperature + topic: devices/PNNL/BUILDING1/AHU2/all + field: ZoneTemperature + meta: + units: degreesCentigrade + tz: US/Pacific + type: float + + + - sim_topic: Zone-AHU-004 Zone Mean Air Temperature + name: Zone-AHU-004 + type: Zone Mean Air Temperature + topic: devices/PNNL/BUILDING1/AHU4/all + field: ZoneTemperature + meta: + units: degreesCentigrade + tz: US/Pacific + type: float + + + - sim_topic: Zone-VAV-Corridor VAV BOX COMPONENT Zone Air Terminal VAV Damper Position + name: Zone-VAV-Corridor VAV BOX COMPONENT + type: Zone Air Terminal VAV Damper Position + topic: devices/PNNL/BUILDING1/AHU1/VAV100/all + field: TerminalBoxDamperPosition + meta: + units: percentage + tz: US/Pacific + type: float + + + - sim_topic: Zone-VAV-102 VAV BOX COMPONENT Zone Air Terminal VAV Damper Position + name: Zone-VAV-102 VAV BOX COMPONENT + type: Zone Air Terminal VAV Damper Position + topic: devices/PNNL/BUILDING1/AHU1/VAV102/all + field: TerminalBoxDamperPosition + meta: + units: percentage + tz: US/Pacific + type: float + + + - sim_topic: Zone-VAV-104 VAV BOX COMPONENT Zone Air Terminal VAV Damper Position + name: Zone-VAV-104 VAV BOX COMPONENT + type: Zone Air Terminal VAV Damper Position + topic: devices/PNNL/BUILDING1/AHU3/VAV104/all + field: TerminalBoxDamperPosition + meta: + units: percentage + tz: US/Pacific + type: float + + + - sim_topic: Zone-VAV-105 VAV BOX COMPONENT Zone Air Terminal VAV Damper Position + name: Zone-VAV-105 VAV BOX COMPONENT + type: Zone Air Terminal VAV Damper Position + topic: devices/PNNL/BUILDING1/AHU3/VAV105/all + field: TerminalBoxDamperPosition + meta: + units: percentage + tz: US/Pacific + type: float + + + - sim_topic: Zone-VAV-107 VAV BOX COMPONENT Zone Air Terminal VAV Damper Position + name: Zone-VAV-107 VAV BOX COMPONENT + type: Zone Air Terminal VAV Damper Position + topic: devices/PNNL/BUILDING1/AHU3/VAV107/all + field: TerminalBoxDamperPosition + meta: + units: percentage + tz: US/Pacific + type: float + + + - sim_topic: Zone-VAV-108 VAV BOX COMPONENT Zone Air Terminal VAV Damper Position + name: Zone-VAV-108 VAV BOX COMPONENT + type: Zone Air Terminal VAV Damper Position + topic: devices/PNNL/BUILDING1/AHU3/VAV108/all + field: TerminalBoxDamperPosition + meta: + units: percentage + tz: US/Pacific + type: float + + + - sim_topic: Zone-VAV-112 VAV BOX COMPONENT Zone Air Terminal VAV Damper Position + name: Zone-VAV-112 VAV BOX COMPONENT + type: Zone Air Terminal VAV Damper Position + topic: devices/PNNL/BUILDING1/AHU3/VAV112/all + field: TerminalBoxDamperPosition + meta: + units: percentage + tz: US/Pacific + type: float + + + - sim_topic: Zone-VAV-116 VAV BOX COMPONENT Zone Air Terminal VAV Damper Position + name: Zone-VAV-116 VAV BOX COMPONENT + type: Zone Air Terminal VAV Damper Position + topic: devices/PNNL/BUILDING1/AHU3/VAV116/all + field: TerminalBoxDamperPosition + meta: + units: percentage + tz: US/Pacific + type: float + + + - sim_topic: Zone-VAV-118 VAV BOX COMPONENT Zone Air Terminal VAV Damper Position + name: Zone-VAV-118 VAV BOX COMPONENT + type: Zone Air Terminal VAV Damper Position + topic: devices/PNNL/BUILDING1/AHU1/VAV118/all + field: TerminalBoxDamperPosition + meta: + units: percentage + tz: US/Pacific + type: float + + + - sim_topic: Zone-VAV-119 VAV BOX COMPONENT Zone Air Terminal VAV Damper Position + name: Zone-VAV-119 VAV BOX COMPONENT + type: Zone Air Terminal VAV Damper Position + topic: devices/PNNL/BUILDING1/AHU1/VAV119/all + field: TerminalBoxDamperPosition + meta: + units: percentage + tz: US/Pacific + type: float + + + - sim_topic: Zone-VAV-120 VAV BOX COMPONENT Zone Air Terminal VAV Damper Position + name: Zone-VAV-120 VAV BOX COMPONENT + type: Zone Air Terminal VAV Damper Position + topic: devices/PNNL/BUILDING1/AHU1/VAV120/all + field: TerminalBoxDamperPosition + meta: + units: percentage + tz: US/Pacific + type: float + + + - sim_topic: Zone-VAV-123A VAV BOX COMPONENT Zone Air Terminal VAV Damper Position + name: Zone-VAV-123A VAV BOX COMPONENT + type: Zone Air Terminal VAV Damper Position + topic: devices/PNNL/BUILDING1/AHU1/VAV123A/all + field: TerminalBoxDamperPosition + meta: + units: percentage + tz: US/Pacific + type: float + + + - sim_topic: Zone-VAV-123B VAV BOX COMPONENT Zone Air Terminal VAV Damper Position + name: Zone-VAV-123B VAV BOX COMPONENT + type: Zone Air Terminal VAV Damper Position + topic: devices/PNNL/BUILDING1/AHU1/VAV123B/all + field: TerminalBoxDamperPosition + meta: + units: percentage + tz: US/Pacific + type: float + + + - sim_topic: Zone-VAV-127A VAV BOX COMPONENT Zone Air Terminal VAV Damper Position + name: Zone-VAV-127A VAV BOX COMPONENT + type: Zone Air Terminal VAV Damper Position + topic: devices/PNNL/BUILDING1/AHU1/VAV127A/all + field: TerminalBoxDamperPosition + meta: + units: percentage + tz: US/Pacific + type: float + + + - sim_topic: Zone-VAV-127B VAV BOX COMPONENT Zone Air Terminal VAV Damper Position + name: Zone-VAV-127B VAV BOX COMPONENT + type: Zone Air Terminal VAV Damper Position + topic: devices/PNNL/BUILDING1/AHU1/VAV127B/all + field: TerminalBoxDamperPosition + meta: + units: percentage + tz: US/Pacific + type: float + + + - sim_topic: Zone-VAV-129 VAV BOX COMPONENT Zone Air Terminal VAV Damper Position + name: Zone-VAV-129 VAV BOX COMPONENT + type: Zone Air Terminal VAV Damper Position + topic: devices/PNNL/BUILDING1/AHU1/VAV129/all + field: TerminalBoxDamperPosition + meta: + units: percentage + tz: US/Pacific + type: float + + + - sim_topic: Zone-VAV-131 VAV BOX COMPONENT Zone Air Terminal VAV Damper Position + name: Zone-VAV-131 VAV BOX COMPONENT + type: Zone Air Terminal VAV Damper Position + topic: devices/PNNL/BUILDING1/AHU1/VAV131/all + field: TerminalBoxDamperPosition + meta: + units: percentage + tz: US/Pacific + type: float + + + - sim_topic: Zone-VAV-133 VAV BOX COMPONENT Zone Air Terminal VAV Damper Position + name: Zone-VAV-133 VAV BOX COMPONENT + type: Zone Air Terminal VAV Damper Position + topic: devices/PNNL/BUILDING1/AHU1/VAV133/all + field: TerminalBoxDamperPosition + meta: + units: percentage + tz: US/Pacific + type: float + + + - sim_topic: Zone-VAV-136 VAV BOX COMPONENT Zone Air Terminal VAV Damper Position + name: Zone-VAV-136 VAV BOX COMPONENT + type: Zone Air Terminal VAV Damper Position + topic: devices/PNNL/BUILDING1/AHU1/VAV136/all + field: TerminalBoxDamperPosition + meta: + units: percentage + tz: US/Pacific + type: float + + + - sim_topic: Zone-VAV-142 VAV BOX COMPONENT Zone Air Terminal VAV Damper Position + name: Zone-VAV-142 VAV BOX COMPONENT + type: Zone Air Terminal VAV Damper Position + topic: devices/PNNL/BUILDING1/AHU1/VAV142/all + field: TerminalBoxDamperPosition + meta: + units: percentage + tz: US/Pacific + type: float + + - sim_topic: Zone-VAV-143 VAV BOX COMPONENT Zone Air Terminal VAV Damper Position + name: Zone-VAV-143 VAV BOX COMPONENT + type: Zone Air Terminal VAV Damper Position + topic: devices/PNNL/BUILDING1/AHU1/VAV143/all + field: TerminalBoxDamperPosition + meta: + units: percentage + tz: US/Pacific + type: float + + + - sim_topic: Zone-VAV-150 VAV BOX COMPONENT Zone Air Terminal VAV Damper Position + name: Zone-VAV-150 VAV BOX COMPONENT + type: Zone Air Terminal VAV Damper Position + topic: devices/PNNL/BUILDING1/AHU1/VAV150/all + field: TerminalBoxDamperPosition + meta: + units: percentage + tz: US/Pacific + type: float + + - sim_topic: Zone-Restroom VAV BOX COMPONENT Zone Air Terminal VAV Damper Position + name: Zone-VAV-Restroom VAV Box Component + type: Zone Air Terminal VAV Damper Position + topic: devices/PNNL/BUILDING1/AHU1/VAV121/all + field: TerminalBoxDamperPosition + meta: + units: percentage + tz: US/Pacific + type: float + + + - sim_topic: Zone-VAV-102 VAV BOX OUTLET NODE System Node Mass Flow Rate + name: Zone-VAV-102 VAV BOX OUTLET NODE + type: System Node Mass Flow Rate + topic: devices/PNNL/BUILDING1/AHU1/VAV102/all + field: ZoneAirFlow + meta: + units: cubicMetersPerSecond + tz: US/Pacific + type: float + + + - sim_topic: Zone-VAV-118 VAV BOX OUTLET NODE System Node Mass Flow Rate + name: Zone-VAV-118 VAV BOX OUTLET NODE + type: System Node Mass Flow Rate + topic: devices/PNNL/BUILDING1/AHU1/VAV118/all + field: ZoneAirFlow + meta: + units: cubicMetersPerSecond + tz: US/Pacific + type: float + + + - sim_topic: Zone-VAV-119 VAV BOX OUTLET NODE System Node Mass Flow Rate + name: Zone-VAV-119 VAV BOX OUTLET NODE + type: System Node Mass Flow Rate + topic: devices/PNNL/BUILDING1/AHU1/VAV119/all + field: ZoneAirFlow + meta: + units: cubicMetersPerSecond + tz: US/Pacific + type: float + + + - sim_topic: Zone-VAV-120 VAV BOX OUTLET NODE System Node Mass Flow Rate + name: Zone-VAV-120 VAV BOX OUTLET NODE + type: System Node Mass Flow Rate + topic: devices/PNNL/BUILDING1/AHU1/VAV120/all + field: ZoneAirFlow + meta: + units: cubicMetersPerSecond + tz: US/Pacific + type: float + + + - sim_topic: Zone-VAV-123A VAV BOX OUTLET NODE System Node Mass Flow Rate + name: Zone-VAV-123A VAV BOX OUTLET NODE + type: System Node Mass Flow Rate + topic: devices/PNNL/BUILDING1/AHU1/VAV123A/all + field: ZoneAirFlow + meta: + units: cubicMetersPerSecond + tz: US/Pacific + type: float + + + - sim_topic: Zone-VAV-123B VAV BOX OUTLET NODE System Node Mass Flow Rate + name: Zone-VAV-123B VAV BOX OUTLET NODE + type: System Node Mass Flow Rate + topic: devices/PNNL/BUILDING1/AHU1/VAV123B/all + field: ZoneAirFlow + meta: + units: cubicMetersPerSecond + tz: US/Pacific + type: float + + + - sim_topic: Zone-VAV-127A VAV BOX OUTLET NODE System Node Mass Flow Rate + name: Zone-VAV-127A VAV BOX OUTLET NODE + type: System Node Mass Flow Rate + topic: devices/PNNL/BUILDING1/AHU1/VAV127A/all + field: ZoneAirFlow + meta: + units: cubicMetersPerSecond + tz: US/Pacific + type: float + + - sim_topic: Zone-VAV-127B VAV BOX OUTLET NODE System Node Mass Flow Rate + name: Zone-VAV-127B VAV BOX OUTLET NODE + type: System Node Mass Flow Rate + topic: devices/PNNL/BUILDING1/AHU1/VAV127B/all + field: ZoneAirFlow + meta: + units: cubicMetersPerSecond + tz: US/Pacific + type: float + + - sim_topic: Zone-VAV-129 VAV BOX OUTLET NODE System Node Mass Flow Rate + name: Zone-VAV-129 VAV BOX OUTLET NODE + type: System Node Mass Flow Rate + topic: devices/PNNL/BUILDING1/AHU1/VAV129/all + field: ZoneAirFlow + meta: + units: cubicMetersPerSecond + tz: US/Pacific + type: float + + + - sim_topic: Zone-VAV-131 VAV BOX OUTLET NODE System Node Mass Flow Rate + name: Zone-VAV-131 VAV BOX OUTLET NODE + type: System Node Mass Flow Rate + topic: devices/PNNL/BUILDING1/AHU1/VAV131/all + field: ZoneAirFlow + meta: + units: cubicMetersPerSecond + tz: US/Pacific + type: float + + + - sim_topic: Zone-VAV-133 VAV BOX OUTLET NODE System Node Mass Flow Rate + name: Zone-VAV-133 VAV BOX OUTLET NODE + type: System Node Mass Flow Rate + topic: devices/PNNL/BUILDING1/AHU1/VAV133/all + field: ZoneAirFlow + meta: + units: cubicMetersPerSecond + tz: US/Pacific + type: float + + + - sim_topic: Zone-VAV-136 VAV BOX OUTLET NODE System Node Mass Flow Rate + name: Zone-VAV-136 VAV BOX OUTLET NODE + type: System Node Mass Flow Rate + topic: devices/PNNL/BUILDING1/AHU1/VAV136/all + field: ZoneAirFlow + meta: + units: cubicMetersPerSecond + tz: US/Pacific + type: float + + + - sim_topic: Zone-VAV-142 VAV BOX OUTLET NODE System Node Mass Flow Rate + name: Zone-VAV-142 VAV BOX OUTLET NODE + type: System Node Mass Flow Rate + topic: devices/PNNL/BUILDING1/AHU1/VAV142/all + field: ZoneAirFlow + meta: + units: cubicMetersPerSecond + tz: US/Pacific + type: float + + + - sim_topic: Zone-VAV-143 VAV BOX OUTLET NODE System Node Mass Flow Rate + name: Zone-VAV-143 VAV BOX OUTLET NODE + type: System Node Mass Flow Rate + topic: devices/PNNL/BUILDING1/AHU1/VAV143/all + field: ZoneAirFlow + meta: + units: cubicMetersPerSecond + tz: US/Pacific + type: float + + + - sim_topic: Zone-VAV-150 VAV BOX OUTLET NODE System Node Mass Flow Rate + name: Zone-VAV-150 VAV BOX OUTLET NODE + type: System Node Mass Flow Rate + topic: devices/PNNL/BUILDING1/AHU1/VAV150/all + field: ZoneAirFlow + meta: + units: cubicMetersPerSecond + tz: US/Pacific + type: float + + + - sim_topic: Zone-VAV-100 VAV BOX OUTLET NODE System Node Mass Flow Rate + name: Zone-VAV-CORRIDOR VAV BOX OUTLET NODE + type: System Node Mass Flow Rate + topic: devices/PNNL/BUILDING1/AHU1/VAV100/all + field: ZoneAirFlow + meta: + units: cubicMetersPerSecond + tz: US/Pacific + type: float + + + - sim_topic: Zone-VAV-121 VAV BOX OUTLET NODE System Node Mass Flow Rate + name: Zone-VAV-RESTROOM VAV BOX OUTLET NODE + type: System Node Mass Flow Rate + topic: devices/PNNL/BUILDING1/AHU1/VAV121/all + field: ZoneAirFlow + meta: + units: cubicMetersPerSecond + tz: US/Pacific + type: float + + + - sim_topic: Zone-VAV-104 VAV BOX OUTLET NODE System Node Mass Flow Rate + name: Zone-VAV-104 VAV BOX OUTLET NODE + type: System Node Mass Flow Rate + topic: devices/PNNL/BUILDING1/AHU3/VAV104/all + field: ZoneAirFlow + meta: + units: cubicMetersPerSecond + tz: US/Pacific + type: float + + + - sim_topic: Zone-VAV-105 VAV BOX OUTLET NODE System Node Mass Flow Rate + name: Zone-VAV-105 VAV BOX OUTLET NODE + type: System Node Mass Flow Rate + topic: devices/PNNL/BUILDING1/AHU3/VAV105/all + field: ZoneAirFlow + meta: + units: cubicMetersPerSecond + tz: US/Pacific + type: float + + + - sim_topic: Zone-VAV-107 VAV BOX OUTLET NODE System Node Mass Flow Rate + name: Zone-VAV-107 VAV BOX OUTLET NODE + type: System Node Mass Flow Rate + topic: devices/PNNL/BUILDING1/AHU3/VAV107/all + field: ZoneAirFlow + meta: + units: cubicMetersPerSecond + tz: US/Pacific + type: float + + + - sim_topic: Zone-VAV-108 VAV BOX OUTLET NODE System Node Mass Flow Rate + name: Zone-VAV-108 VAV BOX OUTLET NODE + type: System Node Mass Flow Rate + topic: devices/PNNL/BUILDING1/AHU3/VAV108/all + field: ZoneAirFlow + meta: + units: cubicMetersPerSecond + tz: US/Pacific + type: float + + - sim_topic: Zone-VAV-112 VAV BOX OUTLET NODE System Node Mass Flow Rate + name: Zone-VAV-112 VAV BOX OUTLET NODE + type: System Node Mass Flow Rate + topic: devices/PNNL/BUILDING1/AHU3/VAV112/all + field: ZoneAirFlow + meta: + units: cubicMetersPerSecond + tz: US/Pacific + type: float + + + - sim_topic: Zone-VAV-116 VAV BOX OUTLET NODE System Node Mass Flow Rate + name: Zone-VAV-116 VAV BOX OUTLET NODE + type: System Node Mass Flow Rate + topic: devices/PNNL/BUILDING1/AHU3/VAV116/all + field: ZoneAirFlow + meta: + units: cubicMetersPerSecond + tz: US/Pacific + type: float + + + - sim_topic: Zone-AHU-002 Direct Air Inlet Node Name System Node Mass Flow Rate + name: Zone-AHU-002 Direct Air Inlet Node Name + type: System Node Mass Flow Rate + topic: devices/PNNL/BUILDING1/AHU2/all + field: SupplyAirFlow + meta: + units: cubicMetersPerSecond + tz: US/Pacific + type: float + + + - sim_topic: Zone-AHU-004 Direct Air Inlet Node Name System Node Mass Flow Rate + name: Zone-AHU-004 Direct Air Inlet Node Name + type: System Node Mass Flow Rate + topic: devices/PNNL/BUILDING1/AHU4/all + field: ZoneAirFlow + meta: + units: cubicMetersPerSecond + tz: US/Pacific + type: float + + + - sim_topic: Zone-VAV-102 VAV BOX OUTLET NODE System Node Temperature + name: Zone-VAV-102 VAV BOX OUTLET NODE + type: System Node Temperature + topic: devices/PNNL/BUILDING1/AHU1/VAV102/all + field: ZoneDischargeAirTemperature + meta: + units: degreesCentigrade + tz: US/Pacific + type: float + + + - sim_topic: Zone-VAV-118 VAV BOX OUTLET NODE System Node Temperature + name: Zone-VAV-118 VAV BOX OUTLET NODE + type: System Node Temperature + topic: devices/PNNL/BUILDING1/AHU1/VAV118/all + field: ZoneDischargeAirTemperature + meta: + units: degreesCentigrade + tz: US/Pacific + type: float + + + - sim_topic: Zone-VAV-119 VAV BOX OUTLET NODE System Node Temperature + name: Zone-VAV-119 VAV BOX OUTLET NODE + type: System Node Temperature + topic: devices/PNNL/BUILDING1/AHU1/VAV119/all + field: ZoneDischargeAirTemperature + meta: + units: degreesCentigrade + tz: US/Pacific + type: float + + + - sim_topic: Zone-VAV-120 VAV BOX OUTLET NODE System Node Temperature + name: Zone-VAV-120 VAV BOX OUTLET NODE + type: System Node Temperature + topic: devices/PNNL/BUILDING1/AHU1/VAV120/all + field: ZoneDischargeAirTemperature + meta: + units: degreesCentigrade + tz: US/Pacific + type: float + + - sim_topic: Zone-VAV-123A VAV BOX OUTLET NODE System Node Temperature + name: Zone-VAV-123A VAV BOX OUTLET NODE + type: System Node Temperature + topic: devices/PNNL/BUILDING1/AHU1/VAV123A/all + field: ZoneDischargeAirTemperature + meta: + units: degreesCentigrade + tz: US/Pacific + type: float + + + - sim_topic: Zone-VAV-123B VAV BOX OUTLET NODE System Node Temperature + name: Zone-VAV-123B VAV BOX OUTLET NODE + type: System Node Temperature + topic: devices/PNNL/BUILDING1/AHU1/VAV123B/all + field: ZoneDischargeAirTemperature + meta: + units: degreesCentigrade + tz: US/Pacific + type: float + + + - sim_topic: Zone-VAV-127A VAV BOX OUTLET NODE System Node Temperature + name: Zone-VAV-127A VAV BOX OUTLET NODE + type: System Node Temperature + topic: devices/PNNL/BUILDING1/AHU1/VAV127A/all + field: ZoneDischargeAirTemperature + meta: + units: degreesCentigrade + tz: US/Pacific + type: float + + + - sim_topic: Zone-VAV-127B VAV BOX OUTLET NODE System Node Temperature + name: Zone-VAV-127B VAV BOX OUTLET NODE + type: System Node Temperature + topic: devices/PNNL/BUILDING1/AHU1/VAV127B/all + field: ZoneDischargeAirTemperature + meta: + units: degreesCentigrade + tz: US/Pacific + type: float + + + - sim_topic: Zone-VAV-129 VAV BOX OUTLET NODE System Node Temperature + name: Zone-VAV-129 VAV BOX OUTLET NODE + type: System Node Temperature + topic: devices/PNNL/BUILDING1/AHU1/VAV129/all + field: ZoneDischargeAirTemperature + meta: + units: degreesCentigrade + tz: US/Pacific + type: float + + + - sim_topic: Zone-VAV-131 VAV BOX OUTLET NODE System Node Temperature + name: Zone-VAV-131 VAV BOX OUTLET NODE + type: System Node Temperature + topic: devices/PNNL/BUILDING1/AHU1/VAV131/all + field: ZoneDischargeAirTemperature + meta: + units: degreesCentigrade + tz: US/Pacific + type: float + + + - sim_topic: Zone-VAV-133 VAV BOX OUTLET NODE System Node Temperature + name: Zone-VAV-133 VAV BOX OUTLET NODE + type: System Node Temperature + topic: devices/PNNL/BUILDING1/AHU1/VAV133/all + field: ZoneDischargeAirTemperature + meta: + units: degreesCentigrade + tz: US/Pacific + type: float + + + - sim_topic: Zone-VAV-136 VAV BOX OUTLET NODE System Node Temperature + name: Zone-VAV-136 VAV BOX OUTLET NODE + type: System Node Temperature + topic: devices/PNNL/BUILDING1/AHU1/VAV136/all + field: ZoneDischargeAirTemperature + meta: + units: degreesCentigrade + tz: US/Pacific + type: float + + + - sim_topic: Zone-VAV-142 VAV BOX OUTLET NODE System Node Temperature + name: Zone-VAV-142 VAV BOX OUTLET NODE + type: System Node Temperature + topic: devices/PNNL/BUILDING1/AHU1/VAV142/all + field: ZoneDischargeAirTemperature + meta: + units: degreesCentigrade + tz: US/Pacific + type: float + + + - sim_topic: Zone-VAV-143 VAV BOX OUTLET NODE System Node Temperature + name: Zone-VAV-143 VAV BOX OUTLET NODE + type: System Node Temperature + topic: devices/PNNL/BUILDING1/AHU1/VAV143/all + field: ZoneDischargeAirTemperature + meta: + units: degreesCentigrade + tz: US/Pacific + type: float + + + - sim_topic: Zone-VAV-150 VAV BOX OUTLET NODE System Node Temperature + name: Zone-VAV-150 VAV BOX OUTLET NODE + type: System Node Temperature + topic: devices/PNNL/BUILDING1/AHU1/VAV150/all + field: ZoneDischargeAirTemperature + meta: + units: degreesCentigrade + tz: US/Pacific + type: float + + + - sim_topic: Zone-VAV-100 VAV BOX OUTLET NODE System Node Temperature + name: Zone-VAV-CORRIDOR VAV BOX OUTLET NODE + type: System Node Temperature + topic: devices/PNNL/BUILDING1/AHU1/VAV100/all + field: ZoneDischargeAirTemperature + meta: + units: degreesCentigrade + tz: US/Pacific + type: float + + + - sim_topic: Zone-VAV-121 VAV BOX OUTLET NODE System Node Temperature + name: Zone-VAV-RESTROOM VAV BOX OUTLET NODE + type: System Node Temperature + topic: devices/PNNL/BUILDING1/AHU1/VAV121/all + field: ZoneDischargeAirTemperature + meta: + units: degreesCentigrade + tz: US/Pacific + type: float + + + - sim_topic: Zone-VAV-104 VAV BOX OUTLET NODE System Node Temperature + name: Zone-VAV-104 VAV BOX OUTLET NODE + type: System Node Temperature + topic: devices/PNNL/BUILDING1/AHU3/VAV104/all + field: ZoneDischargeAirTemperature + meta: + units: degreesCentigrade + tz: US/Pacific + type: float + + + - sim_topic: Zone-VAV-105 VAV BOX OUTLET NODE System Node Temperature + name: Zone-VAV-105 VAV BOX OUTLET NODE + type: System Node Temperature + topic: devices/PNNL/BUILDING1/AHU3/VAV105/all + field: ZoneDischargeAirTemperature + meta: + units: degreesCentigrade + tz: US/Pacific + type: float + + + - sim_topic: Zone-VAV-107 VAV BOX OUTLET NODE System Node Temperature + name: Zone-VAV-107 VAV BOX OUTLET NODE + type: System Node Temperature + topic: devices/PNNL/BUILDING1/AHU3/VAV107/all + field: ZoneDischargeAirTemperature + meta: + units: degreesCentigrade + tz: US/Pacific + type: float + + + - sim_topic: Zone-VAV-108 VAV BOX OUTLET NODE System Node Temperature + name: Zone-VAV-108 VAV BOX OUTLET NODE + type: System Node Temperature + topic: devices/PNNL/BUILDING1/AHU3/VAV108/all + field: ZoneDischargeAirTemperature + meta: + units: degreesCentigrade + tz: US/Pacific + type: float + + + - sim_topic: Zone-VAV-112 VAV BOX OUTLET NODE System Node Temperature + name: Zone-VAV-112 VAV BOX OUTLET NODE + type: System Node Temperature + topic: devices/PNNL/BUILDING1/AHU3/VAV112/all + field: ZoneDischargeAirTemperature + meta: + units: degreesCentigrade + tz: US/Pacific + type: float + + + - sim_topic: Zone-VAV-116 VAV BOX OUTLET NODE System Node Temperature + name: Zone-VAV-116 VAV BOX OUTLET NODE + type: System Node Temperature + topic: devices/PNNL/BUILDING1/AHU3/VAV116/all + field: ZoneDischargeAirTemperature + meta: + units: degreesCentigrade + tz: US/Pacific + type: float + + + - sim_topic: Zone-AHU-002 Direct Air Inlet Node Name System Node Temperature + name: Zone-AHU-002 Direct Air Inlet Node Name + type: System Node Temperature + topic: devices/PNNL/BUILDING1/AHU2/all + field: ZoneDischargeAirTemperature + meta: + units: degreesCentigrade + tz: US/Pacific + type: float + + + - sim_topic: Zone-AHU-004 Direct Air Inlet Node Name System Node Temperature + name: Zone-AHU-004 Direct Air Inlet Node Name + type: System Node Temperature + topic: devices/PNNL/BUILDING1/AHU4/all + field: ZoneDischargeAirTemperature + meta: + units: degreesCentigrade + tz: US/Pacific + type: float + + + - sim_topic: AHU-001 Supply Equipment Outlet Node System Node Temperature + name: AHU-001 Supply Equipment Outlet Node + type: System Node Temperature + topic: devices/PNNL/BUILDING1/AHU1/all + field: DischargeAirTemperature + meta: + units: degreesCentigrade + tz: US/Pacific + type: float + + + - sim_topic: AHU-001 Supply Equipment Outlet Node System Node Mass Flow Rate + name: AHU-001 Supply Equipment Outlet Node + type: System Node Mass Flow Rate + topic: devices/PNNL/BUILDING1/AHU1/all + field: SupplyAirFlow + meta: + units: cubicMetersPerSecond + tz: US/Pacific + type: float + + + - sim_topic: AHU-001 Supply Equipment Inlet Node System Node Temperature + name: AHU-001 Supply Equipment Inlet Node + type: System Node Temperature + topic: devices/PNNL/BUILDING1/AHU1/all + field: ReturnAirTemperature + meta: + units: degreesCentigrade + tz: US/Pacific + type: float + + + - sim_topic: AHU-001 OA CoolCNode System Node Temperature + name: AHU-001 OA CoolCNode + type: System Node Temperature + topic: devices/PNNL/BUILDING1/AHU1/all + field: MixedAirTemperature + meta: + units: degreesCentigrade + tz: US/Pacific + type: float + + + - sim_topic: AHU-002 Supply Equipment Outlet Node System Node Temperature + name: AHU-002 Supply Equipment Outlet Node + type: System Node Temperature + topic: devices/PNNL/BUILDING1/AHU2/all + field: DischargeAirTemperature + meta: + units: degreesCentigrade + tz: US/Pacific + type: float + + - sim_topic: AHU-002 Supply Equipment Outlet Node System Node Mass Flow Rate + name: AHU-002 Supply Equipment Outlet Node + type: System Node Mass Flow Rate + topic: devices/PNNL/BUILDING1/AHU2/all + field: ZoneAirFlow + meta: + units: cubicMetersPerSecond + tz: US/Pacific + type: float + + + - sim_topic: AHU-002 Supply Equipment Inlet Node System Node Temperature + name: AHU-002 Supply Equipment Inlet Node + type: System Node Temperature + topic: devices/PNNL/BUILDING1/AHU2/all + field: ReturnAirTemperature + meta: + units: degreesCentigrade + tz: US/Pacific + type: float + + + - sim_topic: AHU-002 OA CoolCNode System Node Temperature + name: AHU-002 OA CoolCNode + type: System Node Temperature + topic: devices/PNNL/BUILDING1/AHU2/all + field: MixedAirTemperature + meta: + units: degreesCentigrade + tz: US/Pacific + type: float + + + - sim_topic: AHU-003 Supply Equipment Outlet Node System Node Temperature + name: AHU-003 Supply Equipment Outlet Node + type: System Node Temperature + topic: devices/PNNL/BUILDING1/AHU3/all + field: DischargeAirTemperature + meta: + units: degreesCentigrade + tz: US/Pacific + type: float + + + - sim_topic: AHU-003 Supply Equipment Outlet Node System Node Mass Flow Rate + name: AHU-003 Supply Equipment Outlet Node + type: System Node Mass Flow Rate + topic: devices/PNNL/BUILDING1/AHU3/all + field: SupplyAirFlow + meta: + units: cubicMetersPerSecond + tz: US/Pacific + type: float + + + - sim_topic: AHU-003 Supply Equipment Inlet Node System Node Temperature + name: AHU-003 Supply Equipment Inlet Node + type: System Node Temperature + topic: devices/PNNL/BUILDING1/AHU3/all + field: ReturnAirTemperature + meta: + units: degreesCentigrade + tz: US/Pacific + type: float + + + - sim_topic: AHU-003 OA CoolCNode System Node Temperature + name: AHU-003 OA CoolCNode + type: System Node Temperature + topic: devices/PNNL/BUILDING1/AHU3/all + field: MixedAirTemperature + meta: + units: degreesCentigrade + tz: US/Pacific + type: float + + + - sim_topic: AHU-004 Supply Equipment Outlet Node System Node Temperature + name: AHU-004 Supply Equipment Outlet Node + type: System Node Temperature + topic: devices/PNNL/BUILDING1/AHU4/all + field: DischargeAirTemperature + meta: + units: degreesCentigrade + tz: US/Pacific + type: float + + - sim_topic: AHU-004 Supply Equipment Outlet Node System Node Mass Flow Rate + name: AHU-004 Supply Equipment Outlet Node + type: System Node Mass Flow Rate + topic: devices/PNNL/BUILDING1/AHU4/all + field: SupplyAirFlow + meta: + units: cubicMetersPerSecond + tz: US/Pacific + type: float + + + - sim_topic: AHU-004 Supply Equipment Inlet Node System Node Temperature + name: AHU-004 Supply Equipment Inlet Node + type: System Node Temperature + topic: devices/PNNL/BUILDING1/AHU4/all + field: ReturnAirTemperature + meta: + units: degreesCentigrade + tz: US/Pacific + type: float + + + - sim_topic: AHU-004 OA CoolCNode System Node Temperature + name: AHU-004 OA CoolCNode + type: System Node Temperature + topic: devices/PNNL/BUILDING1/AHU4/all + field: MixedAirTemperature + meta: + units: degreesCentigrade + tz: US/Pacific + type: float + + - sim_topic: HVACOperationSchd Schedule Value1 + name: HVACOperationSchd + type: Schedule Value + topic: devices/PNNL/BUILDING1/AHU1/all + field: SupplyFanStatus + meta: + units: Enum + tz: US/Pacific + type: float + + + - sim_topic: HVACOperationSchd Schedule Value2 + name: HVACOperationSchd + type: Schedule Value + topic: devices/PNNL/BUILDING1/AHU2/all + field: SupplyFanStatus + meta: + units: Enum + tz: US/Pacific + type: float + + + - sim_topic: HVACOperationSchd Schedule Value3 + name: HVACOperationSchd + type: Schedule Value + topic: devices/PNNL/BUILDING1/AHU3/all + field: SupplyFanStatus + meta: + units: Enum + tz: US/Pacific + type: float + + + - sim_topic: HVACOperationSchd Schedule Value4 + name: HVACOperationSchd + type: Schedule Value + topic: devices/PNNL/BUILDING1/AHU4/all + field: SupplyFanStatus + meta: + units: Enum + tz: US/Pacific + type: float + + + - sim_topic: ALWAYS ON Schedule Value VAVCorridor + name: ALWAYS ON + type: Schedule Value + topic: devices/PNNL/BUILDING1/AHU1/VAV100/all + field: OccupancyMode + meta: + units: None + tz: US/Pacific + type: float + + + - sim_topic: ALWAYS ON Schedule Value VAV102 + name: ALWAYS ON + type: Schedule Value + topic: devices/PNNL/BUILDING1/AHU1/VAV102/all + field: OccupancyMode + meta: + units: None + tz: US/Pacific + type: float + + + - sim_topic: ALWAYS ON Schedule Value VAV118 + name: ALWAYS ON + type: Schedule Value + topic: devices/PNNL/BUILDING1/AHU1/VAV118/all + field: OccupancyMode + meta: + units: None + tz: US/Pacific + type: float + + + - sim_topic: ALWAYS ON Schedule Value VAV119 + name: ALWAYS ON + type: Schedule Value + topic: devices/PNNL/BUILDING1/AHU1/VAV119/all + field: OccupancyMode + meta: + units: None + tz: US/Pacific + type: float + + + - sim_topic: ALWAYS ON Schedule Value VAV120 + name: ALWAYS ON + type: Schedule Value + topic: devices/PNNL/BUILDING1/AHU1/VAV120/all + field: OccupancyMode + meta: + units: None + tz: US/Pacific + type: float + + + - sim_topic: ALWAYS ON Schedule Value VAV123A + name: ALWAYS ON + type: Schedule Value + topic: devices/PNNL/BUILDING1/AHU1/VAV123A/all + field: OccupancyMode + meta: + units: None + tz: US/Pacific + type: float + + + - sim_topic: ALWAYS ON Schedule Value VAV123B + name: ALWAYS ON + type: Schedule Value + topic: devices/PNNL/BUILDING1/AHU1/VAV123B/all + field: OccupancyMode + meta: + units: None + tz: US/Pacific + type: float + + + - sim_topic: ALWAYS ON Schedule Value VAV127A + name: ALWAYS ON + type: Schedule Value + topic: devices/PNNL/BUILDING1/AHU1/VAV127A/all + field: OccupancyMode + meta: + units: None + tz: US/Pacific + type: float + + - sim_topic: ALWAYS ON Schedule Value VAV127B + name: ALWAYS ON + type: Schedule Value + topic: devices/PNNL/BUILDING1/AHU1/VAV127B/all + field: OccupancyMode + meta: + units: None + tz: US/Pacific + type: float + + - sim_topic: ALWAYS ON Schedule Value VAV129 + name: ALWAYS ON + type: Schedule Value + topic: devices/PNNL/BUILDING1/AHU1/VAV129/all + field: OccupancyMode + meta: + units: None + tz: US/Pacific + type: float + + + - sim_topic: ALWAYS ON Schedule Value VAV131 + name: ALWAYS ON + type: Schedule Value + topic: devices/PNNL/BUILDING1/AHU1/VAV131/all + field: OccupancyMode + meta: + units: None + tz: US/Pacific + type: float + + - sim_topic: ALWAYS ON Schedule Value VAV136 + name: ALWAYS ON + type: Schedule Value + topic: devices/PNNL/BUILDING1/AHU1/VAV136/all + field: OccupancyMode + meta: + units: None + tz: US/Pacific + type: float + + - sim_topic: ALWAYS ON Schedule Value VAV133 + name: ALWAYS ON + type: Schedule Value + topic: devices/PNNL/BUILDING1/AHU1/VAV133/all + field: OccupancyMode + meta: + units: None + tz: US/Pacific + type: float + + + - sim_topic: ALWAYS ON Schedule Value VAV142 + name: ALWAYS ON + type: Schedule Value + topic: devices/PNNL/BUILDING1/AHU1/VAV142/all + field: OccupancyMode + meta: + units: None + tz: US/Pacific + type: float + + - sim_topic: ALWAYS ON Schedule Value VAV143 + name: ALWAYS ON + type: Schedule Value + topic: devices/PNNL/BUILDING1/AHU1/VAV143/all + field: OccupancyMode + meta: + units: None + tz: US/Pacific + type: float + + - sim_topic: ALWAYS ON Schedule Value VAV150 + name: ALWAYS ON + type: Schedule Value + topic: devices/PNNL/BUILDING1/AHU1/VAV150/all + field: OccupancyMode + meta: + units: None + tz: US/Pacific + type: float + + - sim_topic: ALWAYS ON Schedule Value VAV104 + name: ALWAYS ON + type: Schedule Value + topic: devices/PNNL/BUILDING1/AHU3/VAV104/all + field: OccupancyMode + meta: + units: None + tz: US/Pacific + type: float + + - sim_topic: ALWAYS ON Schedule Value VAV105 + name: ALWAYS ON + type: Schedule Value + topic: devices/PNNL/BUILDING1/AHU3/VAV105/all + field: OccupancyMode + meta: + units: None + tz: US/Pacific + type: float + + + - sim_topic: ALWAYS ON Schedule Value VAV107 + name: ALWAYS ON + type: Schedule Value + topic: devices/PNNL/BUILDING1/AHU3/VAV107/all + field: OccupancyMode + meta: + units: None + tz: US/Pacific + type: float + + - sim_topic: ALWAYS ON Schedule Value VAV108 + name: ALWAYS ON + type: Schedule Value + topic: devices/PNNL/BUILDING1/AHU3/VAV108/all + field: OccupancyMode + meta: + units: None + tz: US/Pacific + type: float + + - sim_topic: ALWAYS ON Schedule Value VAV112 + name: ALWAYS ON + type: Schedule Value + topic: devices/PNNL/BUILDING1/AHU3/VAV112/all + field: OccupancyMode + meta: + units: None + tz: US/Pacific + type: float + + + - sim_topic: ALWAYS ON Schedule Value VAV116 + name: ALWAYS ON + type: Schedule Value + topic: devices/PNNL/BUILDING1/AHU3/VAV116/all + field: OccupancyMode + meta: + units: None + tz: US/Pacific + type: float + + - sim_topic: ALWAYS ON Schedule Value VAVRESTROOM + name: ALWAYS ON + type: Schedule Value + topic: devices/PNNL/BUILDING1/AHU1/VAV121/all + field: OccupancyMode + meta: + units: None + tz: US/Pacific + type: boolean + + - sim_topic: AHU 1 Power + name: AHU-001 Fan + type: Fan Electric Power + topic: devices/PNNL/BUILDING1/AHU1/all + field: SupplyFanPower + meta: + units: WATTS + tz: US/Pacific + type: float + + - sim_topic: AHU 2 Power + name: AHU-002 Fan + type: Fan Electric Power + topic: devices/PNNL/BUILDING1/AHU2/all + field: SupplyFanPower + meta: + units: WATTS + tz: US/Pacific + type: float + + - sim_topic: AHU 3 Power + name: AHU-003 Fan + type: Fan Electric Power + topic: devices/PNNL/BUILDING1/AHU3/all + field: SupplyFanPower + meta: + units: WATTS + tz: US/Pacific + type: float + + + - sim_topic: AHU 4 Power + name: AHU-004 Fan + type: Fan Electric Power + topic: devices/PNNL/BUILDING1/AHU4/all + field: SupplyFanPower + meta: + units: WATTS + tz: US/Pacific + type: float + + + - sim_topic: DISTRICT COOLING District Cooling Rate + name: DISTRICT COOLING + type: District Cooling Rate + topic: devices/PNNL/BUILDING1/Chiller/all + field: Power + meta: + units: WATTS + tz: US/Pacific + type: float + + - sim_topic: Zone-VAV-102 Lights Electric Power + name: Zone-VAV-102 Lights + type: Lights Electric Power + topic: devices/PNNL/BUILDING1/LIGHTING/ZONE102/all + field: Power + meta: + units: WATTS + tz: US/Pacific + type: float + + + - sim_topic: Zone-VAV-104 Lights Electric Power + name: Zone-VAV-104 Lights + type: Lights Electric Power + topic: devices/PNNL/BUILDING1/LIGHTING/ZONE104/all + field: Power + meta: + units: WATTS + tz: US/Pacific + type: float + + - sim_topic: Zone-VAV-105 Lights Electric Power + name: Zone-VAV-105 Lights + type: Lights Electric Power + topic: devices/PNNL/BUILDING1/LIGHTING/ZONE105/all + field: Power + meta: + units: WATTS + tz: US/Pacific + type: float + + - sim_topic: Zone-VAV-107 Lights Electric Power + name: Zone-VAV-107 Lights + type: Lights Electric Power + topic: devices/PNNL/BUILDING1/LIGHTING/ZONE107/all + field: Power + meta: + units: WATTS + tz: US/Pacific + type: float + + + - sim_topic: Zone-VAV-108 Lights Electric Power + name: Zone-VAV-108 Lights + type: Lights Electric Power + topic: devices/PNNL/BUILDING1/LIGHTING/ZONE108/all + field: Power + meta: + units: WATTS + tz: US/Pacific + type: float + + + - sim_topic: Zone-VAV-112 Lights Electric Power + name: Zone-VAV-112 Lights + type: Lights Electric Power + topic: devices/PNNL/BUILDING1/LIGHTING/ZONE112/all + field: Power + meta: + units: WATTS + tz: US/Pacific + type: float + + + - sim_topic: Zone-VAV-116 Lights Electric Power + name: Zone-VAV-116 Lights + type: Lights Electric Power + topic: devices/PNNL/BUILDING1/LIGHTING/ZONE116/all + field: Power + meta: + units: WATTS + tz: US/Pacific + type: float + + + - sim_topic: Zone-VAV-118 Lights Electric Power + name: Zone-VAV-118 Lights + type: Lights Electric Power + topic: devices/PNNL/BUILDING1/LIGHTING/ZONE118/all + field: Power + meta: + units: WATTS + tz: US/Pacific + type: float + + + - sim_topic: Zone-VAV-119 Lights Electric Power + name: Zone-VAV-119 Lights + type: Lights Electric Power + topic: devices/PNNL/BUILDING1/LIGHTING/ZONE119/all + field: Power + meta: + units: WATTS + tz: US/Pacific + type: float + + + - sim_topic: Zone-VAV-120 Lights Electric Power + name: Zone-VAV-120 Lights + type: Lights Electric Power + topic: devices/PNNL/BUILDING1/LIGHTING/ZONE120/all + field: Power + meta: + units: WATTS + tz: US/Pacific + type: float + + - sim_topic: Zone-VAV-123A Lights Electric Power + name: Zone-VAV-123A Lights + type: Lights Electric Power + topic: devices/PNNL/BUILDING1/LIGHTING/ZONE123A/all + field: Power + meta: + units: WATTS + tz: US/Pacific + type: float + + + - sim_topic: Zone-VAV-123B Lights Electric Power + name: Zone-VAV-123B Lights + type: Lights Electric Power + topic: devices/PNNL/BUILDING1/LIGHTING/ZONE123B/all + field: Power + meta: + units: WATTS + tz: US/Pacific + type: float + + + - sim_topic: Zone-VAV-127A Lights Electric Power + name: Zone-VAV-127A Lights + type: Lights Electric Power + topic: devices/PNNL/BUILDING1/LIGHTING/ZONE127A/all + field: Power + meta: + units: WATTS + tz: US/Pacific + type: float + + + - sim_topic: Zone-VAV-127B Lights Electric Power + name: Zone-VAV-127B Lights + type: Lights Electric Power + topic: devices/PNNL/BUILDING1/LIGHTING/ZONE127B/all + field: Power + meta: + units: WATTS + tz: US/Pacific + type: float + + + - sim_topic: Zone-VAV-129 Lights Electric Power + name: Zone-VAV-129 Lights + type: Lights Electric Power + topic: devices/PNNL/BUILDING1/LIGHTING/ZONE129/all + field: Power + meta: + units: WATTS + tz: US/Pacific + type: float + + + - sim_topic: Zone-VAV-131 Lights Electric Power + name: Zone-VAV-131 Lights + type: Lights Electric Power + topic: devices/PNNL/BUILDING1/LIGHTING/ZONE131/all + field: Power + meta: + units: WATTS + tz: US/Pacific + type: float + + + - sim_topic: Zone-VAV-133 Lights Electric Power + name: Zone-VAV-133 Lights + type: Lights Electric Power + topic: devices/PNNL/BUILDING1/LIGHTING/ZONE133/all + field: Power + meta: + units: WATTS + tz: US/Pacific + type: float + + + - sim_topic: Zone-VAV-136 Lights Electric Power + name: Zone-VAV-136 Lights + type: Lights Electric Power + topic: devices/PNNL/BUILDING1/LIGHTING/ZONE136/all + field: Power + meta: + units: WATTS + tz: US/Pacific + type: float + + + - sim_topic: Zone-VAV-142 Lights Electric Power + name: Zone-VAV-142 Lights + type: Lights Electric Power + topic: devices/PNNL/BUILDING1/LIGHTING/ZONE142/all + field: Power + meta: + units: WATTS + tz: US/Pacific + type: float + + + - sim_topic: Zone-VAV-143 Lights Electric Power + name: Zone-VAV-143 Lights + type: Lights Electric Power + topic: devices/PNNL/BUILDING1/LIGHTING/ZONE143/all + field: Power + meta: + units: WATTS + tz: US/Pacific + type: float + + + - sim_topic: Zone-VAV-150 Lights Electric Power + name: Zone-VAV-150 Lights + type: Lights Electric Power + topic: devices/PNNL/BUILDING1/LIGHTING/ZONE150/all + field: Power + meta: + units: WATTS + tz: US/Pacific + type: float + + + - sim_topic: BLDG LIGHT SCH Zone-VAV-102 + name: BLDG LIGHT SCH Zone-VAV-102 + type: Schedule Value + topic: devices/PNNL/BUILDING1/LIGHTING/ZONE102/all + field: DimmingLevelOutput + meta: + units: fraction + tz: US/Pacific + type: float + + + - sim_topic: BLDG LIGHT SCH Zone-VAV-104 + name: BLDG LIGHT SCH Zone-VAV-104 + type: Schedule Value + topic: devices/PNNL/BUILDING1/LIGHTING/ZONE104/all + field: DimmingLevelOutput + meta: + units: fraction + tz: US/Pacific + type: float + + - sim_topic: BLDG LIGHT SCH Zone-VAV-105 + name: BLDG LIGHT SCH Zone-VAV-105 + type: Schedule Value + topic: devices/PNNL/BUILDING1/LIGHTING/ZONE105/all + field: DimmingLevelOutput + meta: + units: fraction + tz: US/Pacific + type: float + + + - sim_topic: BLDG LIGHT SCH Zone-VAV-107 + name: BLDG LIGHT SCH Zone-VAV-107 + type: Schedule Value + topic: devices/PNNL/BUILDING1/LIGHTING/ZONE107/all + field: DimmingLevelOutput + meta: + units: fraction + tz: US/Pacific + type: float + + + - sim_topic: BLDG LIGHT SCH Zone-VAV-108 + name: BLDG LIGHT SCH Zone-VAV-108 + type: Schedule Value + topic: devices/PNNL/BUILDING1/LIGHTING/ZONE108/all + field: DimmingLevelOutput + meta: + units: fraction + tz: US/Pacific + type: float + + + - sim_topic: BLDG LIGHT SCH Zone-VAV-112 + name: BLDG LIGHT SCH Zone-VAV-112 + type: Schedule Value + topic: devices/PNNL/BUILDING1/LIGHTING/ZONE112/all + field: DimmingLevelOutput + meta: + units: fraction + tz: US/Pacific + type: float + + + - sim_topic: BLDG LIGHT SCH Zone-VAV-116 + name: BLDG LIGHT SCH Zone-VAV-116 + type: Schedule Value + topic: devices/PNNL/BUILDING1/LIGHTING/ZONE116/all + field: DimmingLevelOutput + meta: + units: fraction + tz: US/Pacific + type: float + + + - sim_topic: BLDG LIGHT SCH Zone-VAV-118 + name: BLDG LIGHT SCH Zone-VAV-118 + type: Schedule Value + topic: devices/PNNL/BUILDING1/LIGHTING/ZONE118/all + field: DimmingLevelOutput + meta: + units: fraction + tz: US/Pacific + type: float + + + - sim_topic: BLDG LIGHT SCH Zone-VAV-119 + name: BLDG LIGHT SCH Zone-VAV-119 + type: Schedule Value + topic: devices/PNNL/BUILDING1/LIGHTING/ZONE119/all + field: DimmingLevelOutput + meta: + units: fraction + tz: US/Pacific + type: float + + + - sim_topic: BLDG LIGHT SCH Zone-VAV-120 + name: BLDG LIGHT SCH Zone-VAV-120 + type: Schedule Value + topic: devices/PNNL/BUILDING1/LIGHTING/ZONE120/all + field: DimmingLevelOutput + meta: + units: fraction + tz: US/Pacific + type: float + + + - sim_topic: BLDG LIGHT SCH Zone-VAV-123A + name: BLDG LIGHT SCH Zone-VAV-123A + type: Schedule Value + topic: devices/PNNL/BUILDING1/LIGHTING/ZONE123A/all + field: DimmingLevelOutput + meta: + units: fraction + tz: US/Pacific + type: float + + + - sim_topic: BLDG LIGHT SCH Zone-VAV-123B + name: BLDG LIGHT SCH Zone-VAV-123B + type: Schedule Value + topic: devices/PNNL/BUILDING1/LIGHTING/ZONE123B/all + field: DimmingLevelOutput + meta: + units: fraction + tz: US/Pacific + type: float + + + - sim_topic: BLDG LIGHT SCH Zone-VAV-127A + name: BLDG LIGHT SCH Zone-VAV-127A + type: Schedule Value + topic: devices/PNNL/BUILDING1/LIGHTING/ZONE127A/all + field: DimmingLevelOutput + meta: + units: fraction + tz: US/Pacific + type: float + + + - sim_topic: BLDG LIGHT SCH Zone-VAV-127B + name: BLDG LIGHT SCH Zone-VAV-127B + type: Schedule Value + topic: devices/PNNL/BUILDING1/LIGHTING/ZONE127B/all + field: DimmingLevelOutput + meta: + units: fraction + tz: US/Pacific + type: float + + + - sim_topic: BLDG LIGHT SCH Zone-VAV-129 + name: BLDG LIGHT SCH Zone-VAV-129 + type: Schedule Value + topic: devices/PNNL/BUILDING1/LIGHTING/ZONE129/all + field: DimmingLevelOutput + meta: + units: fraction + tz: US/Pacific + type: float + + + - sim_topic: BLDG LIGHT SCH Zone-VAV-131 + name: BLDG LIGHT SCH Zone-VAV-131 + type: Schedule Value + topic: devices/PNNL/BUILDING1/LIGHTING/ZONE131/all + field: DimmingLevelOutput + meta: + units: fraction + tz: US/Pacific + type: float + + + - sim_topic: BLDG LIGHT SCH Zone-VAV-133 + name: BLDG LIGHT SCH Zone-VAV-133 + type: Schedule Value + topic: devices/PNNL/BUILDING1/LIGHTING/ZONE133/all + field: DimmingLevelOutput + meta: + units: fraction + tz: US/Pacific + type: float + + + - sim_topic: BLDG LIGHT SCH Zone-VAV-136 + name: BLDG LIGHT SCH Zone-VAV-136 + type: Schedule Value + topic: devices/PNNL/BUILDING1/LIGHTING/ZONE136/all + field: DimmingLevelOutput + meta: + units: fraction + tz: US/Pacific + type: float + + + - sim_topic: BLDG LIGHT SCH Zone-VAV-142 + name: BLDG LIGHT SCH Zone-VAV-142 + type: Schedule Value + topic: devices/PNNL/BUILDING1/LIGHTING/ZONE142/all + field: DimmingLevelOutput + meta: + units: fraction + tz: US/Pacific + type: float + + + - sim_topic: BLDG LIGHT SCH Zone-VAV-143 + name: BLDG LIGHT SCH Zone-VAV-143 + type: Schedule Value + topic: devices/PNNL/BUILDING1/LIGHTING/ZONE143/all + field: DimmingLevelOutput + meta: + units: fraction + tz: US/Pacific + type: float + + + - sim_topic: BLDG LIGHT SCH Zone-VAV-150 + name: BLDG LIGHT SCH Zone-VAV-150 + type: Schedule Value + topic: devices/PNNL/BUILDING1/LIGHTING/ZONE150/all + field: DimmingLevelOutput + meta: + units: fraction + tz: US/Pacific + type: float + + + - sim_topic: CLGTEMPSETPOINT Zone-VAV-102 + name: CLGTEMPSETPOINT Zone-VAV-102 + type: Schedule Value + topic: devices/PNNL/BUILDING1/AHU1/VAV102/all + field: ZoneCoolingTemperatureSetPoint + meta: + units: degreesCentigrade + tz: US/Pacific + type: float + + + - sim_topic: CLGTEMPSETPOINT Zone-VAV-118 + name: CLGTEMPSETPOINT Zone-VAV-118 + type: Schedule Value + topic: devices/PNNL/BUILDING1/AHU1/VAV118/all + field: ZoneCoolingTemperatureSetPoint + meta: + units: degreesCentigrade + tz: US/Pacific + type: float + + + - sim_topic: CLGTEMPSETPOINT Zone-VAV-119 + name: CLGTEMPSETPOINT Zone-VAV-119 + type: Schedule Value + topic: devices/PNNL/BUILDING1/AHU1/VAV119/all + field: ZoneCoolingTemperatureSetPoint + meta: + units: degreesCentigrade + tz: US/Pacific + type: float + + + - sim_topic: CLGTEMPSETPOINT Zone-VAV-120 + name: CLGTEMPSETPOINT Zone-VAV-120 + type: Schedule Value + topic: devices/PNNL/BUILDING1/AHU1/VAV120/all + field: ZoneCoolingTemperatureSetPoint + meta: + units: degreesCentigrade + tz: US/Pacific + type: float + + + - sim_topic: CLGTEMPSETPOINT Zone-VAV-123A + name: CLGTEMPSETPOINT Zone-VAV-123A + type: Schedule Value + topic: devices/PNNL/BUILDING1/AHU1/VAV123A/all + field: ZoneCoolingTemperatureSetPoint + meta: + units: degreesCentigrade + tz: US/Pacific + type: float + + + - sim_topic: CLGTEMPSETPOINT Zone-VAV-123B + name: CLGTEMPSETPOINT Zone-VAV-123B + type: Schedule Value + topic: devices/PNNL/BUILDING1/AHU1/VAV123B/all + field: ZoneCoolingTemperatureSetPoint + meta: + units: degreesCentigrade + tz: US/Pacific + type: float + + + - sim_topic: CLGTEMPSETPOINT Zone-VAV-127A + name: CLGTEMPSETPOINT Zone-VAV-127A + type: Schedule Value + topic: devices/PNNL/BUILDING1/AHU1/VAV127A/all + field: ZoneCoolingTemperatureSetPoint + meta: + units: degreesCentigrade + tz: US/Pacific + type: float + + + - sim_topic: CLGTEMPSETPOINT Zone-VAV-127B + name: CLGTEMPSETPOINT Zone-VAV-127B + type: Schedule Value + topic: devices/PNNL/BUILDING1/AHU1/VAV127B/all + field: ZoneCoolingTemperatureSetPoint + meta: + units: degreesCentigrade + tz: US/Pacific + type: float + + + - sim_topic: CLGTEMPSETPOINT Zone-VAV-129 + name: CLGTEMPSETPOINT Zone-VAV-129 + type: Schedule Value + topic: devices/PNNL/BUILDING1/AHU1/VAV129/all + field: ZoneCoolingTemperatureSetPoint + meta: + units: degreesCentigrade + tz: US/Pacific + type: float + + + - sim_topic: CLGTEMPSETPOINT Zone-VAV-131 + name: CLGTEMPSETPOINT Zone-VAV-131 + type: Schedule Value + topic: devices/PNNL/BUILDING1/AHU1/VAV131/all + field: ZoneCoolingTemperatureSetPoint + meta: + units: degreesCentigrade + tz: US/Pacific + type: float + + + - sim_topic: CLGTEMPSETPOINT Zone-VAV-133 + name: CLGTEMPSETPOINT Zone-VAV-133 + type: Schedule Value + topic: devices/PNNL/BUILDING1/AHU1/VAV133/all + field: ZoneCoolingTemperatureSetPoint + meta: + units: degreesCentigrade + tz: US/Pacific + type: float + + + - sim_topic: CLGTEMPSETPOINT Zone-VAV-136 + name: CLGTEMPSETPOINT Zone-VAV-136 + type: Schedule Value + topic: devices/PNNL/BUILDING1/AHU1/VAV136/all + field: ZoneCoolingTemperatureSetPoint + meta: + units: degreesCentigrade + tz: US/Pacific + type: float + + + - sim_topic: CLGTEMPSETPOINT Zone-VAV-142 + name: CLGTEMPSETPOINT Zone-VAV-142 + type: Schedule Value + topic: devices/PNNL/BUILDING1/AHU1/VAV142/all + field: ZoneCoolingTemperatureSetPoint + meta: + units: degreesCentigrade + tz: US/Pacific + type: float + + + - sim_topic: CLGTEMPSETPOINT Zone-VAV-143 + name: CLGTEMPSETPOINT Zone-VAV-143 + type: Schedule Value + topic: devices/PNNL/BUILDING1/AHU1/VAV143/all + field: ZoneCoolingTemperatureSetPoint + meta: + units: degreesCentigrade + tz: US/Pacific + type: float + + + - sim_topic: CLGTEMPSETPOINT Zone-VAV-150 + name: CLGTEMPSETPOINT Zone-VAV-150 + type: Schedule Value + topic: devices/PNNL/BUILDING1/AHU1/VAV150/all + field: ZoneCoolingTemperatureSetPoint + meta: + units: degreesCentigrade + tz: US/Pacific + type: float + + + - sim_topic: CLGTEMPSETPOINT Zone-VAV-100 + name: CLGTEMPSETPOINT Zone-VAV-CORRIDOR + type: Schedule Value + topic: devices/PNNL/BUILDING1/AHU1/VAV100/all + field: ZoneCoolingTemperatureSetPoint + meta: + units: degreesCentigrade + tz: US/Pacific + type: float + + + - sim_topic: CLGTEMPSETPOINT Zone-VAV-121 + name: CLGTEMPSETPOINT Zone-VAV-RESTROOM + type: Schedule Value + topic: devices/PNNL/BUILDING1/AHU1/VAV121/all + field: ZoneCoolingTemperatureSetPoint + meta: + units: degreesCentigrade + tz: US/Pacific + type: float + + + - sim_topic: CLGTEMPSETPOINT Zone-VAV-104 + name: CLGTEMPSETPOINT Zone-VAV-104 + type: Schedule Value + topic: devices/PNNL/BUILDING1/AHU3/VAV104/all + field: ZoneCoolingTemperatureSetPoint + meta: + units: degreesCentigrade + tz: US/Pacific + type: float + + + - sim_topic: CLGTEMPSETPOINT Zone-VAV-105 + name: CLGTEMPSETPOINT Zone-VAV-105 + type: Schedule Value + topic: devices/PNNL/BUILDING1/AHU3/VAV105/all + field: ZoneCoolingTemperatureSetPoint + meta: + units: degreesCentigrade + tz: US/Pacific + type: float + + + - sim_topic: CLGTEMPSETPOINT Zone-VAV-107 + name: CLGTEMPSETPOINT Zone-VAV-107 + type: Schedule Value + topic: devices/PNNL/BUILDING1/AHU3/VAV107/all + field: ZoneCoolingTemperatureSetPoint + meta: + units: degreesCentigrade + tz: US/Pacific + type: float + + + - sim_topic: CLGTEMPSETPOINT Zone-VAV-108 + name: CLGTEMPSETPOINT Zone-VAV-108 + type: Schedule Value + topic: devices/PNNL/BUILDING1/AHU3/VAV108/all + field: ZoneCoolingTemperatureSetPoint + meta: + units: degreesCentigrade + tz: US/Pacific + type: float + + + - sim_topic: CLGTEMPSETPOINT Zone-VAV-112 + name: CLGTEMPSETPOINT Zone-VAV-112 + type: Schedule Value + topic: devices/PNNL/BUILDING1/AHU3/VAV112/all + field: ZoneCoolingTemperatureSetPoint + meta: + units: degreesCentigrade + tz: US/Pacific + type: float + + + - sim_topic: CLGTEMPSETPOINT Zone-VAV-116 + name: CLGTEMPSETPOINT Zone-VAV-116 + type: Schedule Value + topic: devices/PNNL/BUILDING1/AHU3/VAV116/all + field: ZoneCoolingTemperatureSetPoint + meta: + units: degreesCentigrade + tz: US/Pacific + type: float + + + - sim_topic: CLGTEMPSETPOINT Zone-AHU-002 + name: CLGTEMPSETPOINT Zone-AHU-002 + type: Schedule Value + topic: devices/PNNL/BUILDING1/AHU2/all + field: ZoneCoolingTemperatureSetPoint + meta: + units: degreesCentigrade + tz: US/Pacific + type: float + + + - sim_topic: CLGTEMPSETPOINT Zone-AHU-004 + name: CLGTEMPSETPOINT Zone-AHU-004 + type: Schedule Value + topic: devices/PNNL/BUILDING1/AHU4/all + field: ZoneCoolingTemperatureSetPoint + meta: + units: degreesCentigrade + tz: US/Pacific + type: float + + - sim_topic: currentMonthV + name: EMS + type: currentMonthV + + - sim_topic: currentDayOfMonthV + name: EMS + type: currentDayOfMonthV + + - sim_topic: currentHourV + name: EMS + type: currentHourV + + - sim_topic: currentMinuteV + name: EMS + type: currentMinuteV + + - sim_topic: Dynamic_default1 + name: BLDG LIGHT SCH bak + type: Schedule Value + default: BLDG LIGHT SCH Zone-VAV-102 + + - sim_topic: Dynamic_default2 + name: BLDG LIGHT SCH bak + type: Schedule Value + default: BLDG LIGHT SCH Zone-VAV-104 + + - sim_topic: Dynamic_default3 + name: BLDG LIGHT SCH bak + type: Schedule Value + default: BLDG LIGHT SCH Zone-VAV-105 + + - sim_topic: Dynamic_default4 + name: BLDG LIGHT SCH bak + type: Schedule Value + default: BLDG LIGHT SCH Zone-VAV-107 + + - sim_topic: Dynamic_default5 + name: BLDG LIGHT SCH bak + type: Schedule Value + default: BLDG LIGHT SCH Zone-VAV-108 + + - sim_topic: Dynamic_default6 + name: BLDG LIGHT SCH bak + type: Schedule Value + default: BLDG LIGHT SCH Zone-VAV-112 + + - sim_topic: Dynamic_default7 + name: BLDG LIGHT SCH bak + type: Schedule Value + default: BLDG LIGHT SCH Zone-VAV-116 + + - sim_topic: Dynamic_default8 + name: BLDG LIGHT SCH bak + type: Schedule Value + default: BLDG LIGHT SCH Zone-VAV-118 + + - sim_topic: Dynamic_default9 + name: BLDG LIGHT SCH bak + type: Schedule Value + default: BLDG LIGHT SCH Zone-VAV-119 + + - sim_topic: Dynamic_default10 + name: BLDG LIGHT SCH bak + type: Schedule Value + default: BLDG LIGHT SCH Zone-VAV-120 + + - sim_topic: Dynamic_default11 + name: BLDG LIGHT SCH bak + type: Schedule Value + default: BLDG LIGHT SCH Zone-VAV-123A + + - sim_topic: Dynamic_default12 + name: BLDG LIGHT SCH bak + type: Schedule Value + default: BLDG LIGHT SCH Zone-VAV-123B + + - sim_topic: Dynamic_default13 + name: BLDG LIGHT SCH bak + type: Schedule Value + default: BLDG LIGHT SCH Zone-VAV-127A + + - sim_topic: Dynamic_default14 + name: BLDG LIGHT SCH bak + type: Schedule Value + default: BLDG LIGHT SCH Zone-VAV-127B + + - sim_topic: Dynamic_default15 + name: BLDG LIGHT SCH bak + type: Schedule Value + default: BLDG LIGHT SCH Zone-VAV-129 + + - sim_topic: Dynamic_default16 + name: BLDG LIGHT SCH bak + type: Schedule Value + default: BLDG LIGHT SCH Zone-VAV-131 + + - sim_topic: Dynamic_default17 + name: BLDG LIGHT SCH bak + type: Schedule Value + default: BLDG LIGHT SCH Zone-VAV-133 + + - sim_topic: Dynamic_default18 + name: BLDG LIGHT SCH bak + type: Schedule Value + default: BLDG LIGHT SCH Zone-VAV-142 + + - sim_topic: Dynamic_default19 + name: BLDG LIGHT SCH bak + type: Schedule Value + default: BLDG LIGHT SCH Zone-VAV-143 + + - sim_topic: Dynamic_default20 + name: BLDG LIGHT SCH bak + type: Schedule Value + default: BLDG LIGHT SCH Zone-VAV-150 + + - sim_topic: Dynamic_default21 + name: BLDG LIGHT SCH bak + type: Schedule Value + default: BLDG LIGHT SCH Zone-VAV-136 + + - sim_topic: Dynamic_VAVdefault1 + name: CLGTEMPSETPOINT + type: Schedule Value + default: CLGTEMPSETPOINT Zone-VAV-102 + + - sim_topic: Dynamic_VAVdefault2 + name: CLGTEMPSETPOINT + type: Schedule Value + default: CLGTEMPSETPOINT Zone-VAV-118 + + - sim_topic: Dynamic_VAVdefault3 + name: CLGTEMPSETPOINT + type: Schedule Value + default: CLGTEMPSETPOINT Zone-VAV-119 + + - sim_topic: Dynamic_VAVdefault4 + name: CLGTEMPSETPOINT + type: Schedule Value + default: CLGTEMPSETPOINT Zone-VAV-120 + + - sim_topic: Dynamic_VAVdefault5 + name: CLGTEMPSETPOINT + type: Schedule Value + default: CLGTEMPSETPOINT Zone-VAV-123A + + - sim_topic: Dynamic_VAVdefault6 + name: CLGTEMPSETPOINT + type: Schedule Value + default: CLGTEMPSETPOINT Zone-VAV-123B + + - sim_topic: Dynamic_VAVdefault7 + name: CLGTEMPSETPOINT + type: Schedule Value + default: CLGTEMPSETPOINT Zone-VAV-127A + + - sim_topic: Dynamic_VAVdefault8 + name: CLGTEMPSETPOINT + type: Schedule Value + default: CLGTEMPSETPOINT Zone-VAV-127B + + - sim_topic: Dynamic_VAVdefault9 + name: CLGTEMPSETPOINT + type: Schedule Value + default: CLGTEMPSETPOINT Zone-VAV-129 + + - sim_topic: Dynamic_VAVdefault10 + name: CLGTEMPSETPOINT + type: Schedule Value + default: CLGTEMPSETPOINT Zone-VAV-131 + + - sim_topic: Dynamic_VAVdefault11 + name: CLGTEMPSETPOINT + type: Schedule Value + default: CLGTEMPSETPOINT Zone-VAV-133 + + - sim_topic: Dynamic_VAVdefault12 + name: CLGTEMPSETPOINT + type: Schedule Value + default: CLGTEMPSETPOINT Zone-VAV-136 + + - sim_topic: Dynamic_VAVdefault13 + name: CLGTEMPSETPOINT + type: Schedule Value + default: CLGTEMPSETPOINT Zone-VAV-142 + + - sim_topic: Dynamic_VAVdefault14 + name: CLGTEMPSETPOINT + type: Schedule Value + default: CLGTEMPSETPOINT Zone-VAV-143 + + - sim_topic: Dynamic_VAVdefault15 + name: CLGTEMPSETPOINT + type: Schedule Value + default: CLGTEMPSETPOINT Zone-VAV-150 + + - sim_topic: Dynamic_VAVdefault16 + name: CLGTEMPSETPOINT + type: Schedule Value + default: CLGTEMPSETPOINT Zone-VAV-CORRIDOR + + - sim_topic: Dynamic_VAVdefault17 + name: CLGTEMPSETPOINT + type: Schedule Value + default: CLGTEMPSETPOINT Zone-VAV-RESTROOM + + - sim_topic: Dynamic_VAVdefault18 + name: CLGTEMPSETPOINT + type: Schedule Value + default: CLGTEMPSETPOINT Zone-VAV-104 + + - sim_topic: Dynamic_VAVdefault19 + name: CLGTEMPSETPOINT + type: Schedule Value + default: CLGTEMPSETPOINT Zone-VAV-105 + + - sim_topic: Dynamic_VAVdefault20 + name: CLGTEMPSETPOINT + type: Schedule Value + default: CLGTEMPSETPOINT Zone-VAV-107 + + - sim_topic: Dynamic_VAVdefault21 + name: CLGTEMPSETPOINT + type: Schedule Value + default: CLGTEMPSETPOINT Zone-VAV-108 + + - sim_topic: Dynamic_VAVdefault22 + name: CLGTEMPSETPOINT + type: Schedule Value + default: CLGTEMPSETPOINT Zone-VAV-112 + + - sim_topic: Dynamic_VAVdefault23 + name: CLGTEMPSETPOINT + type: Schedule Value + default: CLGTEMPSETPOINT Zone-VAV-116 + + - sim_topic: Dynamic_VAVdefault24 + name: CLGTEMPSETPOINT + type: Schedule Value + default: CLGTEMPSETPOINT Zone-AHU-002 + + - sim_topic: Dynamic_VAVdefault25 + name: CLGTEMPSETPOINT + type: Schedule Value + default: CLGTEMPSETPOINT Zone-AHU-004 + + - sim_topic: operation + name: HVACOperationSchd + type: Schedule Value + field: operation + + - sim_topic: Whole Building Power + name: Whole Building + type: Facility Total Electric Demand Power + topic: devices/PNNL/BUILDING1/METERS/all + field: WholeBuildingPower + publish_last: true + meta: + units: WATTS + tz: US/Pacific + type: float + +# configuration for sending to EnergyPlus simulation +inputs: + # List of subscription information typically contains + # - EnergyPlus input name + # - type + # - subscription topic for VOLTTRON to receive message from VOLTTRON (agents) + # - EnergyPlus field + # - default value (if any) + # - dynamic default (receive dynamic default from + - sim_topic: CLGTEMPSETPOINT Zone-VAV-102 + name: CLGTEMPSETPOINT Zone-VAV-102 + type: schedule + topic: PNNL/BUILDING1/AHU1/VAV102 + field: ZoneCoolingTemperatureSetPoint + default: 21.11 + dynamic_default: 1.0 + + - sim_topic: CLGTEMPSETPOINT Zone-VAV-118 + name: CLGTEMPSETPOINT Zone-VAV-118 + type: schedule + topic: PNNL/BUILDING1/AHU1/VAV118 + field: ZoneCoolingTemperatureSetPoint + default: 21.11 + dynamic_default: 1.0 + + - sim_topic: CLGTEMPSETPOINT Zone-VAV-119 + name: CLGTEMPSETPOINT Zone-VAV-119 + type: schedule + topic: PNNL/BUILDING1/AHU1/VAV119 + field: ZoneCoolingTemperatureSetPoint + default: 21.11 + dynamic_default: 1.0 + + - sim_topic: CLGTEMPSETPOINT Zone-VAV-120 + name: CLGTEMPSETPOINT Zone-VAV-120 + type: schedule + topic: PNNL/BUILDING1/AHU1/VAV120 + field: ZoneCoolingTemperatureSetPoint + default: 21.11 + dynamic_default: 1.0 + + - sim_topic: CLGTEMPSETPOINT Zone-VAV-123A + name: CLGTEMPSETPOINT Zone-VAV-123A + type: schedule + topic: PNNL/BUILDING1/AHU1/VAV123A + field: ZoneCoolingTemperatureSetPoint + default: 21.11 + dynamic_default: 1.0 + + - sim_topic: CLGTEMPSETPOINT Zone-VAV-123B + name: CLGTEMPSETPOINT Zone-VAV-123B + type: schedule + topic: PNNL/BUILDING1/AHU1/VAV123B + field: ZoneCoolingTemperatureSetPoint + default: 21.11 + dynamic_default: 1.0 + + - sim_topic: CLGTEMPSETPOINT Zone-VAV-127A + name: CLGTEMPSETPOINT Zone-VAV-127A + type: schedule + topic: PNNL/BUILDING1/AHU1/VAV127A + field: ZoneCoolingTemperatureSetPoint + default: 21.11 + dynamic_default: 1.0 + + - sim_topic: CLGTEMPSETPOINT Zone-VAV-127B + name: CLGTEMPSETPOINT Zone-VAV-127B + type: schedule + topic: PNNL/BUILDING1/AHU1/VAV127B + field: ZoneCoolingTemperatureSetPoint + default: 21.11 + dynamic_default: 1.0 + + - sim_topic: CLGTEMPSETPOINT Zone-VAV-129 + name: CLGTEMPSETPOINT Zone-VAV-129 + type: schedule + topic: PNNL/BUILDING1/AHU1/VAV129 + field: ZoneCoolingTemperatureSetPoint + default: 21.11 + dynamic_default: 1.0 + + - sim_topic: CLGTEMPSETPOINT Zone-VAV-131 + name: CLGTEMPSETPOINT Zone-VAV-131 + type: schedule + topic: PNNL/BUILDING1/AHU1/VAV131 + field: ZoneCoolingTemperatureSetPoint + default: 21.11 + dynamic_default: 1.0 + + - sim_topic: CLGTEMPSETPOINT Zone-VAV-133 + name: CLGTEMPSETPOINT Zone-VAV-133 + type: schedule + topic: PNNL/BUILDING1/AHU1/VAV133 + field: ZoneCoolingTemperatureSetPoint + default: 21.11 + dynamic_default: 1.0 + + - sim_topic: CLGTEMPSETPOINT Zone-VAV-136 + name: CLGTEMPSETPOINT Zone-VAV-136 + type: schedule + topic: PNNL/BUILDING1/AHU1/VAV136 + field: ZoneCoolingTemperatureSetPoint + default: 21.11 + dynamic_default: 1.0 + + - sim_topic: CLGTEMPSETPOINT Zone-VAV-142 + name: CLGTEMPSETPOINT Zone-VAV-142 + type: schedule + topic: PNNL/BUILDING1/AHU1/VAV142 + field: ZoneCoolingTemperatureSetPoint + default: 21.11 + dynamic_default: 1.0 + + - sim_topic: CLGTEMPSETPOINT Zone-VAV-143 + name: CLGTEMPSETPOINT Zone-VAV-143 + type: schedule + topic: PNNL/BUILDING1/AHU1/VAV143 + field: ZoneCoolingTemperatureSetPoint + default: 21.11 + dynamic_default: 1.0 + + - sim_topic: CLGTEMPSETPOINT Zone-VAV-150 + name: CLGTEMPSETPOINT Zone-VAV-150 + type: schedule + topic: PNNL/BUILDING1/AHU1/VAV150 + field: ZoneCoolingTemperatureSetPoint + default: 21.11 + dynamic_default: 1.0 + + - sim_topic: CLGTEMPSETPOINT Zone-VAV-100 + name: CLGTEMPSETPOINT Zone-VAV-CORRIDOR + type: schedule + topic: PNNL/BUILDING1/AHU1/VAV100 + field: ZoneCoolingTemperatureSetPoint + default: 21.11 + dynamic_default: 1.0 + + - sim_topic: CLGTEMPSETPOINT Zone-VAV-121 + name: CLGTEMPSETPOINT Zone-VAV-RESTROOM + type: schedule + topic: PNNL/BUILDING1/AHU1/VAV121 + field: ZoneCoolingTemperatureSetPoint + default: 21.11 + dynamic_default: 1.0 + + - sim_topic: CLGTEMPSETPOINT Zone-VAV-104 + name: CLGTEMPSETPOINT Zone-VAV-104 + type: schedule + topic: PNNL/BUILDING1/AHU3/VAV104 + field: ZoneCoolingTemperatureSetPoint + default: 21.11 + dynamic_default: 1.0 + + - sim_topic: CLGTEMPSETPOINT Zone-VAV-105 + name: CLGTEMPSETPOINT Zone-VAV-105 + type: schedule + topic: PNNL/BUILDING1/AHU3/VAV105 + field: ZoneCoolingTemperatureSetPoint + default: 21.11 + dynamic_default: 1.0 + + - sim_topic: CLGTEMPSETPOINT Zone-VAV-107 + name: CLGTEMPSETPOINT Zone-VAV-107 + type: schedule + topic: PNNL/BUILDING1/AHU3/VAV107 + field: ZoneCoolingTemperatureSetPoint + default: 21.11 + dynamic_default: 1.0 + + - sim_topic: CLGTEMPSETPOINT Zone-VAV-108 + name: CLGTEMPSETPOINT Zone-VAV-108 + type: schedule + topic: PNNL/BUILDING1/AHU3/VAV108 + field: ZoneCoolingTemperatureSetPoint + default: 21.11 + dynamic_default: 1.0 + + - sim_topic: CLGTEMPSETPOINT Zone-VAV-112 + name: CLGTEMPSETPOINT Zone-VAV-112 + type: schedule + topic: PNNL/BUILDING1/AHU3/VAV112 + field: ZoneCoolingTemperatureSetPoint + default: 21.11 + dynamic_default: 1.0 + + - sim_topic: CLGTEMPSETPOINT Zone-VAV-116 + name: CLGTEMPSETPOINT Zone-VAV-116 + type: schedule + topic: PNNL/BUILDING1/AHU3/VAV116 + field: ZoneCoolingTemperatureSetPoint + default: 21.11 + dynamic_default: 1.0 + + - sim_topic: CLGTEMPSETPOINT Zone-AHU-002 + name: CLGTEMPSETPOINT Zone-AHU-002 + type: schedule + topic: PNNL/BUILDING1/AHU2 + field: ZoneCoolingTemperatureSetPoint + default: 21.11 + dynamic_default: 1.0 + + - sim_topic: CLGTEMPSETPOINT Zone-AHU-004 + name: CLGTEMPSETPOINT Zone-AHU-004 + type: schedule + topic: PNNL/BUILDING1/AHU4 + field: ZoneCoolingTemperatureSetPoint + default: 21.11 + dynamic_default: 1.0 + + - sim_topic: BLDG LIGHT SCH Zone-VAV-102 + name: BLDG LIGHT SCH Zone-VAV-102 + type: schedule + topic: PNNL/BUILDING1/LIGHTING/ZONE102 + field: DimmingLevelOutput + default: 1.0 + dynamic_default: 1.0 + + - sim_topic: BLDG LIGHT SCH Zone-VAV-104 + name: BLDG LIGHT SCH Zone-VAV-104 + type: schedule + topic: PNNL/BUILDING1/LIGHTING/ZONE104 + field: DimmingLevelOutput + default: 1.0 + dynamic_default: 1.0 + + - sim_topic: BLDG LIGHT SCH Zone-VAV-105 + name: BLDG LIGHT SCH Zone-VAV-105 + type: schedule + topic: PNNL/BUILDING1/LIGHTING/ZONE105 + field: DimmingLevelOutput + default: 1.0 + dynamic_default: 1.0 + + - sim_topic: BLDG LIGHT SCH Zone-VAV-107 + name: BLDG LIGHT SCH Zone-VAV-107 + type: schedule + topic: PNNL/BUILDING1/LIGHTING/ZONE107 + field: DimmingLevelOutput + default: 1.0 + dynamic_default: 1.0 + + - sim_topic: BLDG LIGHT SCH Zone-VAV-108 + name: BLDG LIGHT SCH Zone-VAV-108 + type: schedule + topic: PNNL/BUILDING1/LIGHTING/ZONE108 + field: DimmingLevelOutput + default: 1.0 + dynamic_default: 1.0 + + - sim_topic: BLDG LIGHT SCH Zone-VAV-112 + name: BLDG LIGHT SCH Zone-VAV-112 + type: schedule + topic: PNNL/BUILDING1/LIGHTING/ZONE112 + field: DimmingLevelOutput + default: 1.0 + dynamic_default: 1.0 + + - sim_topic: BLDG LIGHT SCH Zone-VAV-116 + name: BLDG LIGHT SCH Zone-VAV-116 + type: schedule + topic: PNNL/BUILDING1/LIGHTING/ZONE116 + field: DimmingLevelOutput + default: 1.0 + dynamic_default: 1.0 + + - sim_topic: BLDG LIGHT SCH Zone-VAV-118 + name: BLDG LIGHT SCH Zone-VAV-118 + type: schedule + topic: PNNL/BUILDING1/LIGHTING/ZONE118 + field: DimmingLevelOutput + default: 1.0 + dynamic_default: 1.0 + + - sim_topic: BLDG LIGHT SCH Zone-VAV-119 + name: BLDG LIGHT SCH Zone-VAV-119 + type: schedule + topic: PNNL/BUILDING1/LIGHTING/ZONE119 + field: DimmingLevelOutput + default: 1.0 + dynamic_default: 1.0 + + - sim_topic: BLDG LIGHT SCH Zone-VAV-120 + name: BLDG LIGHT SCH Zone-VAV-120 + type: schedule + topic: PNNL/BUILDING1/LIGHTING/ZONE120 + field: DimmingLevelOutput + default: 1.0 + dynamic_default: 1.0 + + - sim_topic: BLDG LIGHT SCH Zone-VAV-123A + name: BLDG LIGHT SCH Zone-VAV-123A + type: schedule + topic: PNNL/BUILDING1/LIGHTING/ZONE123A + field: DimmingLevelOutput + default: 1.0 + dynamic_default: 1.0 + + - sim_topic: BLDG LIGHT SCH Zone-VAV-123B + name: BLDG LIGHT SCH Zone-VAV-123B + type: schedule + topic: PNNL/BUILDING1/LIGHTING/ZONE123B + field: DimmingLevelOutput + default: 1.0 + dynamic_default: 1.0 + + - sim_topic: BLDG LIGHT SCH Zone-VAV-127A + name: BLDG LIGHT SCH Zone-VAV-127A + type: schedule + topic: PNNL/BUILDING1/LIGHTING/ZONE127A + field: DimmingLevelOutput + default: 1.0 + dynamic_default: 1.0 + + - sim_topic: BLDG LIGHT SCH Zone-VAV-127B + name: BLDG LIGHT SCH Zone-VAV-127B + type: schedule + topic: PNNL/BUILDING1/LIGHTING/ZONE127B + field: DimmingLevelOutput + default: 1.0 + dynamic_default: 1.0 + + - sim_topic: BLDG LIGHT SCH Zone-VAV-129 + name: BLDG LIGHT SCH Zone-VAV-129 + type: schedule + topic: PNNL/BUILDING1/LIGHTING/ZONE129 + field: DimmingLevelOutput + default: 1.0 + dynamic_default: 1.0 + + - sim_topic: BLDG LIGHT SCH Zone-VAV-131 + name: BLDG LIGHT SCH Zone-VAV-131 + type: schedule + topic: PNNL/BUILDING1/LIGHTING/ZONE131 + field: DimmingLevelOutput + default: 1.0 + dynamic_default: 1.0 + + - sim_topic: BLDG LIGHT SCH Zone-VAV-133 + name: BLDG LIGHT SCH Zone-VAV-133 + type: schedule + topic: PNNL/BUILDING1/LIGHTING/ZONE133 + field: DimmingLevelOutput + default: 1.0 + dynamic_default: 1.0 + + - sim_topic: BLDG LIGHT SCH Zone-VAV-142 + name: BLDG LIGHT SCH Zone-VAV-142 + type: schedule + topic: PNNL/BUILDING1/LIGHTING/ZONE142 + field: DimmingLevelOutput + default: 1.0 + dynamic_default: 1.0 + + - sim_topic: BLDG LIGHT SCH Zone-VAV-143 + name: BLDG LIGHT SCH Zone-VAV-143 + type: schedule + topic: PNNL/BUILDING1/LIGHTING/ZONE143 + field: DimmingLevelOutput + default: 1.0 + + dynamic_default: 1.0 + + - sim_topic: BLDG LIGHT SCH Zone-VAV-150 + name: BLDG LIGHT SCH Zone-VAV-150 + type: schedule + topic: PNNL/BUILDING1/LIGHTING/ZONE150 + field: DimmingLevelOutput + default: 1.0 + dynamic_default: 1.0 + + - sim_topic: BLDG LIGHT SCH Zone-VAV-136 + name: BLDG LIGHT SCH Zone-VAV-136 + type: schedule + topic: PNNL/BUILDING1/LIGHTING/ZONE136 + field: DimmingLevelOutput + default: 1.0 + dynamic_default: 1.0 + diff --git a/examples/EnergyPlusAgent/setup.py b/examples/EnergyPlusAgent/setup.py new file mode 100644 index 0000000000..a0134583c3 --- /dev/null +++ b/examples/EnergyPlusAgent/setup.py @@ -0,0 +1,26 @@ +from setuptools import setup, find_packages + +MAIN_MODULE = 'agent' + +# Find the agent package that contains the main module +packages = find_packages('.') +agent_package = 'energyplus' + +# Find the version number from the main module +agent_module = agent_package + '.' + MAIN_MODULE +_temp = __import__(agent_module, globals(), locals(), ['__version__'], 0) +__version__ = _temp.__version__ + +# Setup +setup( + name=agent_package + 'agent', + version=__version__, + + install_requires=['volttron'], + packages=packages, + entry_points={ + 'setuptools.installation': [ + 'eggsecutable = ' + agent_module + ':main', + ] + } +) diff --git a/examples/ExampleSubscriber/setup.py b/examples/ExampleSubscriber/setup.py index a0680ad2be..d831b7e574 100644 --- a/examples/ExampleSubscriber/setup.py +++ b/examples/ExampleSubscriber/setup.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- {{{ # vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et: # -# Copyright 2019, Battelle Memorial Institute. +# Copyright 2020, Battelle Memorial Institute. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/examples/ExampleSubscriber/subscriber/subscriber_agent.py b/examples/ExampleSubscriber/subscriber/subscriber_agent.py index d55955bf22..2ea9b899d0 100644 --- a/examples/ExampleSubscriber/subscriber/subscriber_agent.py +++ b/examples/ExampleSubscriber/subscriber/subscriber_agent.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- {{{ # vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et: # -# Copyright 2019, Battelle Memorial Institute. +# Copyright 2020, Battelle Memorial Institute. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/examples/GridAPPS-D/README.md b/examples/GridAPPS-D/README.md new file mode 100644 index 0000000000..14a45a3b2a --- /dev/null +++ b/examples/GridAPPS-D/README.md @@ -0,0 +1,108 @@ +# GridAPPS-D Simulation Example Agent + +This is an example agent that demonstrates how to integrate with GridAPPS-D platform, +run power system simulations and send/receive messages back and forth between VOLTTRON and +GridAPPS-D environment. Technical documentation about the simulation framework can be found at +https://volttron.readthedocs.io/en/develop/developing-volttron/integrating-simulations/index.html + +## GridAPPS-D installation +For installing setup in Ubuntu based systems, follow the steps described in +https://GridAPPS-D.readthedocs.io/en/master/installing_GridAPPS-D/index.html + +## GridAPPS-D Agent Configuration + +In activated VOLTTRON environment, install all the GridAPPS-D dependent python packages + +``` +cd examples/GridAPPS-D/ +pip install -r requirements.txt +``` + +You can specify the configuration in either json or yaml format. The json format is specified +below. + +```` json +{ + "power_system_config": { + "GeographicalRegion_name": "_73C512BD-7249-4F50-50DA-D93849B89C43", + "SubGeographicalRegion_name": "_ABEB635F-729D-24BF-B8A4-E2EF268D8B9E", + "Line_name": "_49AD8E07-3BF9-A4E2-CB8F-C3722F837B62" + }, + "application_config": { + "applications": [] + }, + "simulation_config": { + "start_time": "1595361226", + "duration": "120", + "simulator": "GridLAB-D", + "timestep_frequency": "1000", + "timestep_increment": "1000", + "run_realtime": true, + "simulation_name": "ieee13nodeckt", + "power_flow_solver_method": "NR", + "model_creation_config": { + "load_scaling_factor": "1", + "schedule_name": "ieeezipload", + "z_fraction": "0", + "i_fraction": "1", + "p_fraction": "0", + "randomize_zipload_fractions": false, + "use_houses": false + } + }, + "test_config": { + "events": [], + "appId": "" + }, + "service_configs": [] +} +```` + +## Running GridAPPS-D Simulation Example agent + +1. In a new terminal, navigate to 'GridAPPS-D-docker' directory. Start container services needed by GridAPPS-D. + ```` + ./run.sh + ```` + +2. Start GridAPPS-D within the docker environment + ```` + ./run-docker.sh + ```` + +3. In another terminal, start VOLTTRON and run a listener agent + ```` + ./start-volttron + ```` + +4. Start GridAPPS-D simulation example agent + ```` + source env/bin/activate + python scripts/install-agent.py -s examples/GridAPPS-D/ -c examples/GridAPPS-D/test_GridAPPS-D.json -i gappsd --start --force + ```` + +5. You will see that GridAPPS-D simulation starts and sends measurement data to VOLTTRON which is then republished + on VOLTTRON message bus + + ```` + 04 17:51:14,642 (listeneragent-3.3 27855) __main__ INFO: Peer: pubsub, Sender: gappsd:, Bus: , Topic: GridAPPS-D/measurement, Headers: {'Date': '2020-08-04T21:51:14.596162+00:00', 'Content-Type': 'application/json', 'min_compatible_version': '3.0', 'max_compatible_version': ''}, Message: + {'data': {'_00ff72f5-628c-462b-bdd1-2dcc1bd519b5': {'measurement_mrid': '_00ff72f5-628c-462b-bdd1-2dcc1bd519b5', + 'value': 1}, + '_017f359e-77e5-48ca-9a02-eaa59d14a941': {'angle': 86.21660957775951, + 'magnitude': 2560.0286365239986, + 'measurement_mrid': '_017f359e-77e5-48ca-9a02-eaa59d14a941'}, + '_04d9f780-ad0c-4205-b94d-531e66087f2d': {'measurement_mrid': '_04d9f780-ad0c-4205-b94d-531e66087f2d', + 'value': 1}, + '_0769c269-2a4f-4e30-a5ae-fa30f7dc271b': {'angle': 82.74673304218659, + 'magnitude': 2519.580420609152, + 'measurement_mrid': '_0769c269-2a4f-4e30-a5ae-fa30f7dc271b'}, + '_0793bcc6-eab5-45d1-891a-973379c5cdec': {'angle': 82.74673304218659, + 'magnitude': 2519.580420609152, + 'measurement_mrid': '_0793bcc6-eab5-45d1-891a-973379c5cdec'}, + '_0c2e8ddb-6043-4721-a276-27fc19e86c04': {'angle': -15.330435576009576, + 'magnitude': 569290.5754298957, + 'measurement_mrid': '_0c2e8ddb-6043-4721-a276-27fc19e86c04'}, + } + } + ```` + diff --git a/examples/GridAPPS-D/gridappsd_example.json b/examples/GridAPPS-D/gridappsd_example.json new file mode 100644 index 0000000000..6740054a9b --- /dev/null +++ b/examples/GridAPPS-D/gridappsd_example.json @@ -0,0 +1,46 @@ +{ + "power_system_config": { + "GeographicalRegion_name": "_73C512BD-7249-4F50-50DA-D93849B89C43", + "SubGeographicalRegion_name": "_ABEB635F-729D-24BF-B8A4-E2EF268D8B9E", + "Line_name": "_49AD8E07-3BF9-A4E2-CB8F-C3722F837B62" + }, + "application_config": { + "applications": [] + }, + "simulation_config": { + "start_time": "1373814060", + "duration": "600", + "simulator": "GridLAB-D", + "timestep_frequency": "1000", + "timestep_increment": "1000", + "run_realtime": false, + "simulation_name": "ieee13nodeckt", + "power_flow_solver_method": "NR", + "model_creation_config": { + "load_scaling_factor": "1", + "schedule_name": "ieeezipload", + "z_fraction": "0", + "i_fraction": "1", + "p_fraction": "0", + "randomize_zipload_fractions": false, + "use_houses": false + } + }, + "test_config": { + "events": [{"message":{"forward_differences":[{"object":"_302E3119-B3ED-46A1-87D5-EBC8496357DF","attribute":"Switch.open","value":1}],"reverse_differences":[{"object":"_302E3119-B3ED-46A1-87D5-EBC8496357DF","attribute":"Switch.open","value":0}]},"event_type":"ScheduledCommandEvent","occuredDateTime":1373814120,"stopDateTime":1373817600}], + "appId": "" + }, + "service_configs": [{ + "id": "gridappsd-sensor-simulator", + "user_options": { + "default-perunit-confidence-band": 0.02, + "simulate-all": false, + "sensors-config": {}, + "default-normal-value": 100, + "random-seed": 0, + "default-aggregation-interval": 30, + "passthrough-if-not-specified": false, + "default-perunit-drop-rate": 0.05 + } + }] +} diff --git a/services/core/MasterDriverAgent/master_driver/interfaces/modbus_tk/tests/__init__.py b/examples/GridAPPS-D/gridappsd_example/__init__.py similarity index 100% rename from services/core/MasterDriverAgent/master_driver/interfaces/modbus_tk/tests/__init__.py rename to examples/GridAPPS-D/gridappsd_example/__init__.py diff --git a/examples/GridAPPS-D/gridappsd_example/agent.py b/examples/GridAPPS-D/gridappsd_example/agent.py new file mode 100644 index 0000000000..18c53ac9aa --- /dev/null +++ b/examples/GridAPPS-D/gridappsd_example/agent.py @@ -0,0 +1,287 @@ +# -*- coding: utf-8 -*- {{{ +# vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et: +# +# Copyright 2020, Battelle Memorial Institute. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# This material was prepared as an account of work sponsored by an agency of +# the United States Government. Neither the United States Government nor the +# United States Department of Energy, nor Battelle, nor any of their +# employees, nor any jurisdiction or organization that has cooperated in the +# development of these materials, makes any warranty, express or +# implied, or assumes any legal liability or responsibility for the accuracy, +# completeness, or usefulness or any information, apparatus, product, +# software, or process disclosed, or represents that its use would not infringe +# privately owned rights. Reference herein to any specific commercial product, +# process, or service by trade name, trademark, manufacturer, or otherwise +# does not necessarily constitute or imply its endorsement, recommendation, or +# favoring by the United States Government or any agency thereof, or +# Battelle Memorial Institute. The views and opinions of authors expressed +# herein do not necessarily state or reflect those of the +# United States Government or any agency thereof. +# +# PACIFIC NORTHWEST NATIONAL LABORATORY operated by +# BATTELLE for the UNITED STATES DEPARTMENT OF ENERGY +# under Contract DE-AC05-76RL01830 +# }}} + + +__docformat__ = 'reStructuredText' + +from gevent.monkey import patch_all +patch_all() + +import gevent +import stomp +import logging +import sys +from gridappsd import topics as t +import yaml +from volttron.platform.messaging import headers as headers_mod +from volttron.platform.agent import utils +from volttron.platform.vip.agent import Agent, Core, RPC +from integrations.gridappsd_integration import GridAPPSDSimIntegration + +_log = logging.getLogger(__name__) +utils.setup_logging() +__version__ = "0.1" + + +def gridappsd_example(config_path, **kwargs): + """Parses the Agent configuration and returns an instance of + the agent created using that configuration. + + :param config_path: Path to a configuration file. + + :type config_path: str + :returns: GridappsdExample + :rtype: GridappsdExample + """ + try: + config = utils.load_config(config_path) + except Exception: + config = {} + _log.debug("CONFIG: {}".format(config)) + if not config: + _log.info("Using Agent defaults for starting configuration.") + + return GridappsdExample(config, **kwargs) + + +class GridappsdExample(Agent): + """ + GridappsdExampleAgent demonstrates how VOLTTRON agent can interact with + Gridappsd simulation environment + """ + + def __init__(self, config, **kwargs): + super(GridappsdExample, self).__init__(enable_store=False, **kwargs) + _log.debug("vip_identity: " + self.core.identity) + self.config = config + self.Gridappsd_sim = GridAPPSDSimIntegration(config, self.vip.pubsub) + self.volttron_subscriptions = None + self.sim_complete = False + self.rcvd_measurement = False + self.rcvd_first_measurement = 0 + self.are_we_paused = False + self.sim_complete = True + self.sim_running = False + + @Core.receiver("onstart") + def onstart(self, sender, **kwargs): + """ + Subscribe to VOLTTRON topics on VOLTTRON message bus. + Register config parameters with Gridappsd. + Start Gridappsd simulation. + """ + # subscribe to the VOLTTRON topics if given. + if self.volttron_subscriptions is not None: + for sub in self.volttron_subscriptions: + _log.info('Subscribing to {}'.format(sub)) + self.vip.pubsub.subscribe(peer='pubsub', + prefix=sub, + callback=self.on_receive_publisher_message) + + # Exit if GridAPPSD isn't installed in the current environment. + if not self.Gridappsd_sim.is_sim_installed(): + _log.error("GridAPPSD module is unavailable please add it to the python environment.") + self.core.stop() + return + + try: + # Register events with GridAPPSD + # There are 4 event callbacks that GridAPPSD provides to monitor the status + # - onstart, ontimestep, onmesurement, oncomplete + # This example shows how to register with GridAPPSD simulation to get + # event notifications + event_callbacks = {'MEASUREMENT': self.onmeasurement, + "TIMESTEP": self.ontimestep, + "FINISH": self.onfinishsimulation} + self.Gridappsd_sim.register_event_callbacks(event_callbacks) + + # Register the config file with GridAPPS-D + self.Gridappsd_sim.register_inputs(self.config, self.do_work) + # Start the simulation + self.Gridappsd_sim.start_simulation() + + # Waiting for simulation to start + i = 1 + while not self.Gridappsd_sim.sim_id and i < 20: + _log.debug(f"waiting for simulation to start {i}") + gevent.sleep(1) + i += 1 + + # Subscribe to GridAPPSD log messages + if self.Gridappsd_sim.sim_id: + self.Gridappsd_sim.gridappsd.subscribe( + t.simulation_log_topic(self.Gridappsd_sim.sim_id), + self.on_message) + self.sim_running = True + else: + self.sim_running = False + _log.debug("Simulation did not start") + except stomp.exception.NotConnectedException as ex: + _log.error("Unable to connect to GridAPPSD: {}".format(ex)) + _log.error("Exiting!!") + self.core.stop() + except ValueError as ex: + _log.error("Unable to register inputs with GridAPPSD: {}".format(ex)) + self.core.stop() + return + + def do_work(self): + """ + Dummy callback for GridAPPS-D sim + Unused + :return: + """ + pass + + def on_receive_publisher_message(self, peer, sender, bus, topic, headers, message): + """ + Subscribe to publisher publications and change the data accordingly + """ + # Update controller data + val = message[0] + # Do something with message + + @Core.receiver("onstop") + def onstop(self, sender, **kwargs): + """ + This method is called when the Agent is about to shutdown. + Disconnect from GridAPPSD simulation + """ + if self.sim_running: + self.Gridappsd_sim.stop_simulation() + + def onmeasurement(self, sim, timestep, measurements): + """ + Callback method to receive measurements + :param sim: simulation object + :param timestep: time step + :param measurements: measurement value + :return: + """ + _log.info('Measurement received at %s', timestep) + + if not self.are_we_paused and not self.rcvd_first_measurement: + _log.debug("Pausing sim now") + self.Gridappsd_sim.pause_simulation() + self.are_we_paused = True + _log.debug(f"ARWEPAUSED {self.are_we_paused}") + # Setting up so if we get another measurement while we + # are paused we know it + self.rcvd_measurement = False + # Resume simulation after 30 sec + self.core.spawn_later(30, self.resume_gridappsd_simulation) + + if not self.rcvd_measurement: + print(f"A measurement {measurements} happened at {timestep}") + data = {"data": measurements} + + headers = { + headers_mod.DATE: utils.format_timestamp(utils.get_aware_utc_now()), + headers_mod.CONTENT_TYPE: headers_mod.CONTENT_TYPE.JSON + } + # Publishing measurement on VOLTTRON message bus + self.vip.pubsub.publish(peer='pubsub', + topic='gridappsd/measurement', + message=data, + headers=headers) + self.rcvd_measurement = True + else: + self.rcvd_measurement = True + self.rcvd_first_measurement = True + + def ontimestep(self, sim, timestep): + """ + Event callback for timestep change + :param sim: + :param timestep: + :return: + """ + _log.debug("Timestamp: {}".format(timestep)) + + def onfinishsimulation(self, sim): + """ + Event callback to get notified when simulation has completed + :param sim: + :return: + """ + self.sim_complete = True + _log.info('Simulation Complete') + + def resume_gridappsd_simulation(self): + """ + Resume simulation if paused + :return: + """ + if self.are_we_paused: + _log.debug('Resuming simulation') + self.Gridappsd_sim.resume_simulation() + _log.debug('Resumed simulation') + self.are_we_paused = False + + def on_message(self, headers, message): + """ + Callback method to receive GridAPPSD log messages + :param headers: headers + :param message: log message + :return: + """ + try: + _log.debug("Received GridAPPSD log message: {}".format(message)) + json_msg = yaml.safe_load(str(message)) + + if "PAUSED" in json_msg["processStatus"]: + _log.debug("GridAPPSD simulation has paused!!") + + if "resume" in json_msg["logMessage"]: + _log.debug("GridAPPSD simulation has resumed!!") + + except Exception as e: + message_str = "An error occurred while trying to translate the message received" + str(e) + + +def main(): + """Main method called to start the agent.""" + utils.vip_main(gridappsd_example, version=__version__) + + +if __name__ == '__main__': + # Entry point for script + try: + sys.exit(main()) + except KeyboardInterrupt: + pass diff --git a/examples/GridAPPS-D/requirements.txt b/examples/GridAPPS-D/requirements.txt new file mode 100644 index 0000000000..503df4b6b5 --- /dev/null +++ b/examples/GridAPPS-D/requirements.txt @@ -0,0 +1,7 @@ +git+https://github.com/GRIDAPPSD/gridappsd-python.git@develop +stomp.py==6.0.0 +PyYAML==5.3 +pytz==2019.3 +# psutil==5.4.7 +dateutils==0.6.7 +docker diff --git a/examples/GridAPPS-D/setup.py b/examples/GridAPPS-D/setup.py new file mode 100644 index 0000000000..3314272884 --- /dev/null +++ b/examples/GridAPPS-D/setup.py @@ -0,0 +1,26 @@ +from setuptools import setup, find_packages + +MAIN_MODULE = 'agent' + +# Find the agent package that contains the main module +packages = find_packages('.') +agent_package = 'gridappsd_example' + +# Find the version number from the main module +agent_module = agent_package + '.' + MAIN_MODULE +_temp = __import__(agent_module, globals(), locals(), ['__version__'], 0) +__version__ = _temp.__version__ + +# Setup +setup( + name=agent_package + 'agent', + version=__version__, + + install_requires=['volttron'], + packages=packages, + entry_points={ + 'setuptools.installation': [ + 'eggsecutable = ' + agent_module + ':main', + ] + } +) diff --git a/examples/GridAPPS-D/test_gridappsd.json b/examples/GridAPPS-D/test_gridappsd.json new file mode 100644 index 0000000000..b95a117afd --- /dev/null +++ b/examples/GridAPPS-D/test_gridappsd.json @@ -0,0 +1,34 @@ +{ + "power_system_config": { + "GeographicalRegion_name": "_73C512BD-7249-4F50-50DA-D93849B89C43", + "SubGeographicalRegion_name": "_ABEB635F-729D-24BF-B8A4-E2EF268D8B9E", + "Line_name": "_49AD8E07-3BF9-A4E2-CB8F-C3722F837B62" + }, + "application_config": { + "applications": [] + }, + "simulation_config": { + "start_time": "1595361226", + "duration": "120", + "simulator": "GridLAB-D", + "timestep_frequency": "1000", + "timestep_increment": "1000", + "run_realtime": true, + "simulation_name": "ieee13nodeckt", + "power_flow_solver_method": "NR", + "model_creation_config": { + "load_scaling_factor": "1", + "schedule_name": "ieeezipload", + "z_fraction": "0", + "i_fraction": "1", + "p_fraction": "0", + "randomize_zipload_fractions": false, + "use_houses": false + } + }, + "test_config": { + "events": [], + "appId": "" + }, + "service_configs": [] +} diff --git a/examples/GridAPPS-DAgent/requirements.txt b/examples/GridAPPS-DAgent/requirements.txt index 063708d1ba..d5af7b8a5f 100644 --- a/examples/GridAPPS-DAgent/requirements.txt +++ b/examples/GridAPPS-DAgent/requirements.txt @@ -1,5 +1,5 @@ -git+https://github.com/craig8/gridappsd-python.git@simulator_testing -stomp.py==5.0.1 +git+https://github.com/GRIDAPPSD/gridappsd-python.git@develop +stomp.py==6.0.0 PyYAML==5.3 pytz==2019.3 # psutil==5.4.7 diff --git a/examples/HELICS/README.md b/examples/HELICS/README.md new file mode 100644 index 0000000000..d0f3f7e0a8 --- /dev/null +++ b/examples/HELICS/README.md @@ -0,0 +1,110 @@ +# HELICS Example Agent + +This is an example agent that demonstrates how to integrate with HELICS co-simulation platform. Technical documentation +about the simulation framework can be found at https://volttron.readthedocs.io/en/develop/developing-volttron/integrating-simulations/index.html +## HELICS installation +For installing setup in Ubuntu based systems, follow the steps described in +https://helics.readthedocs.io/en/latest/installation/linux.html + +## Install python bindings of HELICS + +We need to also install python bindings of HELICS inside VOLTTRON environment. +This can be done by the following steps: + +1. Activate a VOLTTRON environment shell + ```` + source env/bin/activate + ```` +2. VOLTTRON uses older version of pip3. Upgrade to latest pip version since HELICS needs it. + ```` + pip install -U pip + ```` +3. Install python support for HELICS + ```` + pip install helics + ```` + +## HELICS Agent Configuration + +You can specify the configuration in either json or yaml format. The yaml format is specified +below. + +```` yml +# Config parameters for setting up HELICS federate +properties: + name: federate1 # unique name for the federate + loglevel: 5 # log level + coreType: zmq # core type + timeDelta: 1.0 # time delta (defaults to 1s) + uninterruptible: true + simulation_length: 360 # simulation length in seconds (defaults to 360s) + +# configuration for subscribing to HELICS simulation +outputs: + # List of subscription information, typically contains + # - HELICS subscription topic, + # - datatype + # - publication topic for VOLTTRON (optional) to republish the + # message on VOLTTRON message bus + # - additional/optional HELICS specific configuration + - sim_topic: federate2/totalLoad + volttron_topic: helics/abc + type: complex + required: true + +# configuration for publishing to HELICS simulation +inputs: + # List of publication information, containing + # - HELICS publication topic, + # - datatype + # - metadata associated with the topic (for example unit) + # - subscription topic for VOLTTRON message bus (optional) which can then be + # republished on HELICS with HELICS publication topic + # - additional/optional publication specific configuration + - sim_topic: pub1 # HELICS publication key + type: double # datatype + unit: m # unit + info: this is an information string for use by the application #additional info + volttron_topic: pub1/all # topic to subscribe on VOLTTRON bus + global: true + - sim_topic: pub2 + type: double + volttron_topic: pub2/all + +# Send/Receive messages directly to endpoints +endpoints: + # List of endpoint configuration + - name: federate1/EV6 # your endpoint (base prefix needs to be federate name, in our case it's "federate1") + destination: federate2/EV6 # destination endpoint + type: genmessage #message type + global: true # global endpoint: true/false + - name: federate1/EV5 + destination: federate2/EV5 + type: genmessage + global: true + +volttron_subscriptions: + - feeder0_output/all + +```` + +## Running HELICS Example agent + +1. Start HELICS broker in new terminal. We will specify two federates - one for HELICS example agent and another for +separate python federate script. + ```` + helics_broker -f 2 + ```` +2. Start HELICS example agent in new terminal at the root of VOLTTRON source directory + ```` + source env/bin/activate + python scripts/install-agent.py -s examples/HELICS/ -c examples/HELICS/helics_federate1.yml -i hexample --start --force + ```` +3. In another terminal, start another python federate. At the root of VOLTTRON source directory. + ```` + source env/bin/activate + python examples/HELICS/helics_federate.py examples/HELICS/helics_federate2.json + ```` + +You will see that messages are being sent and received between the two federates + diff --git a/services/core/MasterDriverAgent/master_driver/interfaces/modbus_tk/tests/example_config/__init__.py b/examples/HELICS/helics_example/__init__.py similarity index 100% rename from services/core/MasterDriverAgent/master_driver/interfaces/modbus_tk/tests/example_config/__init__.py rename to examples/HELICS/helics_example/__init__.py diff --git a/examples/HELICS/helics_example/agent.py b/examples/HELICS/helics_example/agent.py new file mode 100644 index 0000000000..3bd3ca56d7 --- /dev/null +++ b/examples/HELICS/helics_example/agent.py @@ -0,0 +1,231 @@ +# -*- coding: utf-8 -*- {{{ +# vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et: +# +# Copyright 2020, Battelle Memorial Institute. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# This material was prepared as an account of work sponsored by an agency of +# the United States Government. Neither the United States Government nor the +# United States Department of Energy, nor Battelle, nor any of their +# employees, nor any jurisdiction or organization that has cooperated in the +# development of these materials, makes any warranty, express or +# implied, or assumes any legal liability or responsibility for the accuracy, +# completeness, or usefulness or any information, apparatus, product, +# software, or process disclosed, or represents that its use would not infringe +# privately owned rights. Reference herein to any specific commercial product, +# process, or service by trade name, trademark, manufacturer, or otherwise +# does not necessarily constitute or imply its endorsement, recommendation, or +# favoring by the United States Government or any agency thereof, or +# Battelle Memorial Institute. The views and opinions of authors expressed +# herein do not necessarily state or reflect those of the +# United States Government or any agency thereof. +# +# PACIFIC NORTHWEST NATIONAL LABORATORY operated by +# BATTELLE for the UNITED STATES DEPARTMENT OF ENERGY +# under Contract DE-AC05-76RL01830 +# }}} + + +__docformat__ = 'reStructuredText' + +import gevent +import logging +import random +import sys +from volttron.platform.agent import utils +from volttron.platform.vip.agent import Agent, Core, RPC +from integrations.helics_integration import HELICSSimIntegration + + +_log = logging.getLogger(__name__) +utils.setup_logging() +__version__ = "0.1" + + +def helics_example(config_path, **kwargs): + """Parses the Agent configuration and returns an instance of + the agent created using that configuration. + + :param config_path: Path to a configuration file. + + :type config_path: str + :returns: HelicsExample + :rtype: HelicsExample + """ + try: + config = utils.load_config(config_path) + except Exception: + config = {} + _log.debug("CONFIG: {}".format(config)) + if not config: + _log.info("Using Agent defaults for starting configuration.") + + return HelicsExample(config, **kwargs) + + +class HelicsExample(Agent): + """ + HelicsExampleAgent demonstrates how VOLTTRON agent can interact with HELICS simulation environment + """ + + def __init__(self, config, **kwargs): + super(HelicsExample, self).__init__(enable_store=False, **kwargs) + _log.debug("vip_identity: " + self.core.identity) + self.config = config + self.helics_sim = HELICSSimIntegration(config, self.vip.pubsub) + try: + self._federate_name = config['properties']['name'] + except KeyError: + self._federate_name = self.core.identity + self.volttron_subscriptions = config.get('volttron_subscriptions', None) + _log.debug("volttron subscriptions: {}".format(self.volttron_subscriptions)) + self.volttron_messages = None + self.endpoints = config.get('endpoints', None) + self.publications = config.get('inputs', None) + + @Core.receiver("onstart") + def onstart(self, sender, **kwargs): + """ + Subscribe to VOLTTRON topics on VOLTTRON message bus. + Register config parameters with HELICS. + Start HELICS simulation. + """ + # subscribe to the VOLTTRON topics if given. + if self.volttron_subscriptions is not None: + for sub in self.volttron_subscriptions: + _log.info('Subscribing to {}'.format(sub)) + self.vip.pubsub.subscribe(peer='pubsub', + prefix=sub, + callback=self.on_receive_publisher_message) + + # Subscribe to VOLTTRON topic to be republished on HELICS bus if needed + if self.publications is not None: + for pub in self.publications: + volttron_topic = pub.get('volttron_topic', None) + if volttron_topic is not None: + if self.volttron_messages is None: + self.volttron_messages = dict() + _log.info('Subscribing to {}'.format(volttron_topic)) + self.vip.pubsub.subscribe(peer='pubsub', + prefix=volttron_topic, + callback=self.on_receive_volttron_message) + self.volttron_messages[volttron_topic] = dict(pub_key=pub['sim_topic'], + value=None, + global_flag=pub.get('global', False), + received=False) + + # Exit if HELICS isn't installed in the current environment. + if not self.helics_sim.is_sim_installed(): + _log.error("HELICS module is unavailable please add it to the python environment.") + self.core.stop() + return + + # Register inputs with HELICS and start simulation + try: + self.helics_sim.register_inputs(self.config, self.do_work) + self.helics_sim.start_simulation() + except ValueError as ex: + _log.error("Unable to register inputs with HELICS: {}".format(ex)) + self.core.stop() + return + + def do_work(self): + """ + Perform application specific work here using HELICS messages + :return: + """ + current_values = self.helics_sim.current_values + _log.debug("Doing work: {}".format(self.core.identity)) + _log.debug("Current set of values from HELICS: {}".format(current_values)) + # Do something with HELICS messages + # agent specific work!!! + + # Send messages to endpoints as well + for endpoint in self.endpoints: + val = '200000 + 0 j' + status = self.helics_sim.send_to_endpoint(endpoint['name'], endpoint['destination'], val) + + for pub in self.publications: + key = pub['sim_topic'] + # Check if VOLTTRON topic has been configured. If no, publish dummy value for the HELICS + # publication key + volttron_topic = pub.get('volttron_topic', None) + if volttron_topic is None: + value = 90.5 + global_flag = pub.get('global', False) + # If global flag is False, prepend federate name to the key + if not global_flag: + key = "{fed}/{key}".format(fed=self._federate_name, key=key) + value = 67.90 + self.helics_sim.publish_to_simulation(key, value) + + value = {} + # Check if the VOLTTRON agents update the information + # if self.volttron_messages is not None: + # topics_ready = all([v['received'] for k, v in self.volttron_messages.items()]) + # while not topics_ready: + # gevent.sleep(0.2) + # topics_ready = all([v['received'] for k, v in self.volttron_messages.items()]) + # for k, v in self.received_volttron.items(): + # self.received_volttron[k] = False + # + # for topic, msg in self.volttron_messages: + # key = msg['pub_key'] + # if not msg['global_flag']: + # key = "{fed}/{key}".format(fed=self._federate_name, key=key) + # value = msg['value'] + # self.helics_sim.publish_to_simulation(key, value) + # _log.debug("Published New value : {} to HELICS key: {}".format(value)) + + # Request HELICS to advance timestep + self.helics_sim.make_time_request() + + def on_receive_publisher_message(self, peer, sender, bus, topic, headers, message): + """ + Subscribe to publisher publications and change the data accordingly + """ + # Update controller data + val = message[0] + # Do something with message + + def on_receive_volttron_message(self, peer, sender, bus, topic, headers, message): + """ + Subscribe to publisher publications and change the data accordingly + """ + _log.debug("Received volttron topic: {}, value: {}".format(topic, message)) + # Update controller data + val = message + self.volttron_messages[topic]['value'] = val + self.volttron_messages[topic]['received'] = True + + @Core.receiver("onstop") + def onstop(self, sender, **kwargs): + """ + This method is called when the Agent is about to shutdown, but before it + disconnects from the message bus. + """ + pass + + +def main(): + """Main method called to start the agent.""" + utils.vip_main(helics_example, version=__version__) + + +if __name__ == '__main__': + # Entry point for script + try: + sys.exit(main()) + except KeyboardInterrupt: + pass diff --git a/examples/HELICS/helics_federate.py b/examples/HELICS/helics_federate.py new file mode 100644 index 0000000000..5674aca904 --- /dev/null +++ b/examples/HELICS/helics_federate.py @@ -0,0 +1,156 @@ +# -*- coding: utf-8 -*- +# {{{ +# vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et: +# +# Copyright 2020, Battelle Memorial Institute. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# This material was prepared as an account of work sponsored by an agency of +# the United States Government. Neither the United States Government nor the +# United States Department of Energy, nor Battelle, nor any of their +# employees, nor any jurisdiction or organization that has cooperated in the +# development of these materials, makes any warranty, express or +# implied, or assumes any legal liability or responsibility for the accuracy, +# completeness, or usefulness or any information, apparatus, product, +# software, or process disclosed, or represents that its use would not infringe +# privately owned rights. Reference herein to any specific commercial product, +# process, or service by trade name, trademark, manufacturer, or otherwise +# does not necessarily constitute or imply its endorsement, recommendation, or +# favoring by the United States Government or any agency thereof, or +# Battelle Memorial Institute. The views and opinions of authors expressed +# herein do not necessarily state or reflect those of the +# United States Government or any agency thereof. +# +# PACIFIC NORTHWEST NATIONAL LABORATORY operated by +# BATTELLE for the UNITED STATES DEPARTMENT OF ENERGY +# under Contract DE-AC05-76RL01830 +# }}} + +import time +import helics as h +import logging +import argparse +import random + +logger = logging.getLogger(__name__) + + +def destroy_federate(fed): + """ + fed: federate to destroy + """ + h.helicsFederateFinalize(fed) + h.helicsFederateFree(fed) + h.helicsCloseLibrary() + + +def federate_example(config_path): + # Registering federate from json + + try: + fed = h.helicsCreateCombinationFederateFromConfig(config_path) + except h._helics.HelicsException as exc: + print("Exception occured".format(exc)) + exit(-1) + federate_name = h.helicsFederateGetName(fed) + print(federate_name) + endpoint_count = h.helicsFederateGetEndpointCount(fed) + subkeys_count = h.helicsFederateGetInputCount(fed) + pubkeys_count = h.helicsFederateGetPublicationCount(fed) + + # Reference to Publications and Subscription form index + endid = {} + subid = {} + pubid = {} + for i in range(0,endpoint_count): + endid["m{}".format(i)] = h.helicsFederateGetEndpointByIndex(fed, i) + end_name = h.helicsEndpointGetName(endid["m{}".format(i)]) + logger.info( 'Registered Endpoint ---> {}'.format(end_name)) + + for i in range(0, subkeys_count): + idx = h.helicsFederateGetInputByIndex(fed, i) + subid["m{}".format(i)] = idx + status = h.helicsInputSetDefaultComplex(subid["m{}".format(i)], 0, 0) + sub_key = h.helicsSubscriptionGetKey(idx) + logger.info( 'Registered Subscription ---> {}'.format(sub_key)) + + for i in range(0, pubkeys_count): + idx = h.helicsFederateGetPublicationByIndex(fed, i) + pubid["m{}".format(i)] = idx + pub_key = h.helicsPublicationGetKey(idx) + logger.info( 'Registered Publications ---> {}'.format(pub_key)) + + print('###############################################################################################') + print('######################## Entering Execution Mode ##########################################') + # Entering Execution Mode + h.helicsFederateEnterExecutingMode(fed) + print('###############################################################################################') + + hours = 0.1 + total_inteval = int(60 * 60 * hours) + grantedtime = -1 + update_interval = 1 # 5*60 + k = 0 + data ={} + time.sleep(5) + time_sim = [] + real = 0 + for t in range(0, total_inteval, update_interval): + while grantedtime < t: + grantedtime = h.helicsFederateRequestTime (fed, t) + time.sleep(0.1) + print('######################## Time interval {} ##########################################'.format(t)) + print('######################## Publishing to topics ######################################') + real = real + 1 + for i in range(0, pubkeys_count): + idx = pubid["m{}".format(i)] + h.helicsPublicationPublishComplex(idx, real*i, 78) + + print( '######################## Get input from subscribed topics #########################') + for i in range(0, subkeys_count): + idx = subid["m{}".format(i)] + value = h.helicsInputGetDouble(idx) + key = h.helicsSubscriptionGetKey(idx) + print("Value for key: {} is {}".format(key, value)) + + print('######################## Get from Endpoint #########################################') + idx = endid["m{}".format(0)] + while h.helicsEndpointHasMessage(idx): + msg = h.helicsEndpointGetMessage(idx) + end_name = h.helicsEndpointGetName(idx) + print("Value from endpoint name: {} is {}".format(end_name, msg.data)) + + print('######################## Send to VOLTTRON Endpoint #################################') + for i in range(0, endpoint_count): + idx = endid["m{}".format(i)] + value = i + random.randint(1, 101) + 89.7 + end_name = h.helicsEndpointGetName(idx) + print("Sending Value:{0} for endpoint: {1}".format(value, end_name)) + h.helicsEndpointSendEventRaw(idx, '', str(value), t) + end_name = h.helicsEndpointGetName(idx) + + logger.info("Destroying federate") + destroy_federate(fed) + + +if __name__ == "__main__": + parser = argparse.ArgumentParser( + formatter_class=argparse.RawTextHelpFormatter) + parser.add_argument('config_path', + help='path of the config file') + args = parser.parse_args() + try: + federate_example(args.config_path) + except KeyboardInterrupt: + logger.info("Exiting due to keyboard interrupt") diff --git a/examples/HELICS/helics_federate1.yml b/examples/HELICS/helics_federate1.yml new file mode 100644 index 0000000000..82c22682f7 --- /dev/null +++ b/examples/HELICS/helics_federate1.yml @@ -0,0 +1,90 @@ +# Config parameters for setting up HELICS federate +properties: + name: federate1 # unique name for the federate + loglevel: 5 # log level + coreType: zmq # core type + timeDelta: 1.0 # time delta (defaults to 1s) + uninterruptible: true + simulation_length: 360 # simulation length in seconds (defaults to 360s) + +# configuration for subscribing to HELICS simulation +outputs: + # List of subscription information, typically contains + # - HELICS subscription topic, + # - datatype + # - publication topic for VOLTTRON (optional) to republish the + # message on VOLTTRON message bus + # - additional/optional simulation specific configuration + - sim_topic: federate2/totalLoad + volttron_topic: helics/abc + type: complex + required: true + - sim_topic: federate2/charge_EV6 + volttron_topic: helics/ev6 + type: complex + required: true + - sim_topic: federate2/charge_EV5 + type: complex + required: true + - sim_topic: federate2/charge_EV4 + type: complex + required: true + - sim_topic: federate2/charge_EV3 + type: complex + required: true + - sim_topic: federate2/charge_EV2 + type: complex + required: true + - sim_topic: federate2/charge_EV1 + type: complex + required: true + +# configuration for publishing to HELICS simulation +inputs: + # List of publication information, containing + # - HELICS publication topic, + # - datatype + # - metadata associated with the topic (for example unit) + # - subscription topic for VOLTTRON message bus (optional) which can then be + # republished on HELICS with HELICS publication topic + # - additional/optional publication specific configuration + - sim_topic: pub1 # HELICS publication key + type: double # datatype + unit: m # unit + info: this is an information string for use by the application #additional info + volttron_topic: pub1/all # topic to subscribe on VOLTTRON bus + global: true + - sim_topic: pub2 + type: double + volttron_topic: pub2/all + +# Send/Receive messages directly to endpoints +endpoints: + # List of endpoint configuration + - name: federate1/EV6 # your endpoint (base prefix needs to be federate name, in our case it's "federate1") + destination: federate2/EV6 # destination endpoint + type: genmessage #message type + global: true # global endpoint: true/false + - name: federate1/EV5 + destination: federate2/EV5 + type: genmessage + global: true + - name: federate1/EV4 + destination: federate2/EV4 + type: genmessage + global: true + - name: federate1/EV3 + destination: federate2/EV3 + type: genmessage + global: true + - name: federate1/EV2 + destination: federate2/EV2 + type: genmessage + global: true + - name: federate1/EV1 + destination: federate2/EV1 + type: genmessage + global: true + +volttron_subscriptions: + - feeder0_output/all diff --git a/examples/HELICS/helics_federate2.json b/examples/HELICS/helics_federate2.json new file mode 100644 index 0000000000..d6fce37a7d --- /dev/null +++ b/examples/HELICS/helics_federate2.json @@ -0,0 +1,87 @@ +{ + "name": "federate2", + "loglevel": 5, + "coreType": "zmq", + "timeDelta": 1.0, + "uninterruptible": true, + + "publications":[ + { + "key": "totalLoad", + "type": "complex" + }, { + "key": "federate2/charge_EV6", + "type": "complex", + "global": true + }, { + "key": "federate2/charge_EV5", + "type": "complex", + "global": true + }, { + "key": "federate2/charge_EV4", + "type": "complex", + "global": true + }, { + "key": "federate2/charge_EV3", + "type": "complex", + "global": true + }, { + "key": "federate2/charge_EV2", + "type": "complex", + "global": true + }, { + "key": "federate2/charge_EV1", + "type": "complex", + "global": true + }], + "subscriptions":[ + { + "key":"pub1", + "type":"double", + "required": true + }, + { + "key": "federate1/pub2", + "type": "double", + "required": true + } + ], + "endpoints":[ + { + "name": "federate2/EV6", + "destination": "federate1/EV1", + "type": "genmessage", + "global": true + }, + { + "name": "federate2/EV5", + "destination": "federate1/EV2", + "type": "genmessage", + "global": true + }, + { + "name": "federate2/EV4", + "destination": "federate1/EV3", + "type": "genmessage", + "global": true + }, + { + "name": "federate2/EV3", + "destination": "federate1/EV4", + "type": "genmessage", + "global": true + }, + { + "name": "federate2/EV2", + "destination": "federate1/EV5", + "type": "genmessage", + "global": true + }, + { + "name": "federate2/EV1", + "destination": "federate1/EV6", + "type": "genmessage", + "global": true + } +] +} diff --git a/examples/HELICS/setup.py b/examples/HELICS/setup.py new file mode 100644 index 0000000000..d67ee5c237 --- /dev/null +++ b/examples/HELICS/setup.py @@ -0,0 +1,26 @@ +from setuptools import setup, find_packages + +MAIN_MODULE = 'agent' + +# Find the agent package that contains the main module +packages = find_packages('.') +agent_package = 'helics_example' + +# Find the version number from the main module +agent_module = agent_package + '.' + MAIN_MODULE +_temp = __import__(agent_module, globals(), locals(), ['__version__'], 0) +__version__ = _temp.__version__ + +# Setup +setup( + name=agent_package + 'agent', + version=__version__, + + install_requires=['volttron'], + packages=packages, + entry_points={ + 'setuptools.installation': [ + 'eggsecutable = ' + agent_module + ':main', + ] + } +) diff --git a/examples/JupyterNotebooks/BacnetCollector.ipynb b/examples/JupyterNotebooks/BacnetCollector.ipynb index 09bd68def6..32f69bbde1 100644 --- a/examples/JupyterNotebooks/BacnetCollector.ipynb +++ b/examples/JupyterNotebooks/BacnetCollector.ipynb @@ -459,7 +459,7 @@ " file.write(config)\n", "print('BACnet configuration written to {}'.format(config_path))\n", "\n", - "# Store the configurations in the master driver.\n", + "# Store the configurations in the platform driver.\n", "print('\\nWait for the platform driver config to display, then confirm that this config appears in it...')\n", "install_driver_csv(name='my_bacnet.csv', csv=csv_path)\n", "install_driver_config(name='devices/my_bacnet', config=config_path)\n", @@ -479,7 +479,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "Install each agent employed by the Collector: a MasterDriver, a ForwardHistorian, a BACnetProxy, and 2 Volttron Central agents." + "Install each agent employed by the Collector: a PlatformDriver, a ForwardHistorian, a BACnetProxy, and 2 Volttron Central agents." ] }, { @@ -497,10 +497,10 @@ " _sh(script_install_command.format(dir, id, config, tag))\n", " print('Installed {}'.format(tag))\n", "\n", - "# Install the MasterDriver agent which runs the Bacnet driver\n", - "install_agent(dir=vroot+'/services/core/MasterDriverAgent/',\n", + "# Install the PlatformDriver agent which runs the Bacnet driver\n", + "install_agent(dir=vroot+'/services/core/PlatformDriverAgent/',\n", " id='platform.driver',\n", - " config=vroot+'/services/core/MasterDriverAgent/master-driver.agent',\n", + " config=vroot+'/services/core/PlatformDriverAgent/platform-driver.agent',\n", " tag='platform.driver')\n", "\n", "# Install a ForwardHistorian agent that forwards metrics to another VOLTTRON instance\n", diff --git a/examples/JupyterNotebooks/ChargePointCollector.ipynb b/examples/JupyterNotebooks/ChargePointCollector.ipynb index a25da1678c..5b6cce2d21 100644 --- a/examples/JupyterNotebooks/ChargePointCollector.ipynb +++ b/examples/JupyterNotebooks/ChargePointCollector.ipynb @@ -414,7 +414,7 @@ " file.write(config)\n", "print('ChargePoint configuration written to {}'.format(config_path))\n", " \n", - "# Store the configurations in the master driver.\n", + "# Store the configurations in the platform driver.\n", "print('\\nWait for the platform driver config to display, then confirm that this config appears in it...')\n", "install_driver_csv(name='my_chargepoint.csv', csv=csv_path)\n", "install_driver_config(name='devices/my_chargepoint', config=config_path)\n", @@ -434,7 +434,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "Install each agent employed by the Collector: a master driver, a ForwardHistorian, and 2 Volttron Central agents." + "Install each agent employed by the Collector: a platform driver, a ForwardHistorian, and 2 Volttron Central agents." ] }, { @@ -452,10 +452,10 @@ " _sh(script_install_command.format(dir, id, config, tag))\n", " print('Installed {}'.format(tag))\n", "\n", - "# Install the MasterDriver agent which runs the Bacnet driver\n", - "install_agent(dir=vroot+'/services/core/MasterDriverAgent/',\n", + "# Install the PlatformDriver agent which runs the Bacnet driver\n", + "install_agent(dir=vroot+'/services/core/PlatformDriverAgent/',\n", " id='platform.driver',\n", - " config=vroot+'/services/core/MasterDriverAgent/master-driver.agent',\n", + " config=vroot+'/services/core/PlatformDriverAgent/platform-driver.agent',\n", " tag='platform.driver')\n", "\n", "# Install a ForwardHistorian agent that forwards metrics to another VOLTTRON instance\n", diff --git a/examples/JupyterNotebooks/IEEE2030_5Collector.ipynb b/examples/JupyterNotebooks/IEEE2030_5Collector.ipynb index 61c1986171..387bb9126d 100644 --- a/examples/JupyterNotebooks/IEEE2030_5Collector.ipynb +++ b/examples/JupyterNotebooks/IEEE2030_5Collector.ipynb @@ -327,7 +327,7 @@ " file.write(config)\n", "print('IEEE 2030.5 configuration written to {}\\n'.format(config_path))\n", "\n", - "# Store the configurations in the master driver.\n", + "# Store the configurations in the platform driver.\n", "print('\\nWait for the platform driver config to display, then confirm that this config appears in it...')\n", "install_driver_csv(name='my_ieee2030_5.csv', csv=csv_path)\n", "install_driver_config(name='devices/my_ieee2030_5', config=config_path)\n", @@ -348,7 +348,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "Install each agent employed by the Collector: a master driver, a IEEE 2030.5 Agent, a ForwardHistorian, and 2 Volttron Central agents." + "Install each agent employed by the Collector: a platform driver, a IEEE 2030.5 Agent, a ForwardHistorian, and 2 Volttron Central agents." ] }, { @@ -364,10 +364,10 @@ " _sh(script_install_command.format(dir, id, config, tag))\n", " print('Installed {}'.format(tag))\n", "\n", - "# Install the MasterDriver agent which runs the Bacnet driver\n", - "install_agent(dir=vroot+'/services/core/MasterDriverAgent/',\n", + "# Install the PlatformDriver agent which runs the Bacnet driver\n", + "install_agent(dir=vroot+'/services/core/PlatformDriverAgent/',\n", " id='platform.driver',\n", - " config=vroot+'/services/core/MasterDriverAgent/master-driver.agent',\n", + " config=vroot+'/services/core/PlatformDriverAgent/platform-driver.agent',\n", " tag='platform.driver')\n", "\n", "# Install a ForwardHistorian agent that forwards metrics to another VOLTTRON instance\n", @@ -379,7 +379,7 @@ "# Install a IEEE 2030.5 agent.\n", "install_agent(dir=vroot + '/services/core/IEEE2030_5Agent',\n", " id='ieee2030_5agent',\n", - " config=vroot + '/services/core/IEEE2030_5Agent/ieee2030_5.config',\n", + " config=vroot + '/services/core/IEEE2030_5Agent/config',\n", " tag='ieee2030_5agent')\n", "\n", "# Install a Platform Agent\n", diff --git a/examples/JupyterNotebooks/ReferenceAppAgent.ipynb b/examples/JupyterNotebooks/ReferenceAppAgent.ipynb index cdb678b640..ea1fff9f66 100644 --- a/examples/JupyterNotebooks/ReferenceAppAgent.ipynb +++ b/examples/JupyterNotebooks/ReferenceAppAgent.ipynb @@ -242,7 +242,7 @@ "agent_root = vroot + '/services/core/OpenADRVenAgent'\n", "install_agent(dir=agent_root,\n", " id='venagent',\n", - " config=agent_root+'/openadrven.config',\n", + " config=agent_root+'/config',\n", " tag='venagent')\n", "\n", "# Install a Volttron Central Platform Agent\n", diff --git a/examples/ListenerAgent/config b/examples/ListenerAgent/config index b0be919f76..a9cbe428a4 100644 --- a/examples/ListenerAgent/config +++ b/examples/ListenerAgent/config @@ -2,7 +2,7 @@ "agentid": "listener1", "message": "hello", # stop time in seconds - "runtime_limit":30, + #"runtime_limit":30, # log-level can be DEBUG, INFO, WARN or ERROR # verbosity is decreased from left to right above # default: INFO diff --git a/examples/ListenerAgent/listener/agent.py b/examples/ListenerAgent/listener/agent.py index 8ef516f6ce..a1991724c1 100644 --- a/examples/ListenerAgent/listener/agent.py +++ b/examples/ListenerAgent/listener/agent.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- {{{ # vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et: # -# Copyright 2019, Battelle Memorial Institute. +# Copyright 2020, Battelle Memorial Institute. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -107,7 +107,7 @@ def onstart(self, sender, **kwargs): query = Query(self.core) _log.info('query: %r', query.query('serverkey').get()) - @PubSub.subscribe('pubsub', '') + @PubSub.subscribe('pubsub', '', all_platforms=True) def on_match(self, peer, sender, bus, topic, headers, message): """Use match_all to receive all messages and print them out.""" self._logfn( diff --git a/examples/ListenerAgent/setup.py b/examples/ListenerAgent/setup.py index eaf3948ea7..cc64769bff 100644 --- a/examples/ListenerAgent/setup.py +++ b/examples/ListenerAgent/setup.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- {{{ # vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et: # -# Copyright 2019, Battelle Memorial Institute. +# Copyright 2020, Battelle Memorial Institute. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/examples/MarketAgents/AHUAgent/ahu/agent.py b/examples/MarketAgents/AHUAgent/ahu/agent.py index a8fa89ba7b..7b956fb156 100644 --- a/examples/MarketAgents/AHUAgent/ahu/agent.py +++ b/examples/MarketAgents/AHUAgent/ahu/agent.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- {{{ # vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et: # -# Copyright 2019, Battelle Memorial Institute. +# Copyright 2020, Battelle Memorial Institute. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/examples/MarketAgents/AHUAgent/setup.py b/examples/MarketAgents/AHUAgent/setup.py index eaf3948ea7..cc64769bff 100644 --- a/examples/MarketAgents/AHUAgent/setup.py +++ b/examples/MarketAgents/AHUAgent/setup.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- {{{ # vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et: # -# Copyright 2019, Battelle Memorial Institute. +# Copyright 2020, Battelle Memorial Institute. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/examples/MarketAgents/ElectricBuyerAgent/electric_buyer/agent.py b/examples/MarketAgents/ElectricBuyerAgent/electric_buyer/agent.py index c985ecd158..2b76920610 100644 --- a/examples/MarketAgents/ElectricBuyerAgent/electric_buyer/agent.py +++ b/examples/MarketAgents/ElectricBuyerAgent/electric_buyer/agent.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- {{{ # vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et: # -# Copyright 2019, Battelle Memorial Institute. +# Copyright 2020, Battelle Memorial Institute. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/examples/MarketAgents/ElectricBuyerAgent/setup.py b/examples/MarketAgents/ElectricBuyerAgent/setup.py index eaf3948ea7..cc64769bff 100644 --- a/examples/MarketAgents/ElectricBuyerAgent/setup.py +++ b/examples/MarketAgents/ElectricBuyerAgent/setup.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- {{{ # vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et: # -# Copyright 2019, Battelle Memorial Institute. +# Copyright 2020, Battelle Memorial Institute. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/examples/MarketAgents/ElectricMeterAgent/electric_meter/agent.py b/examples/MarketAgents/ElectricMeterAgent/electric_meter/agent.py index 1fc9e24705..f967cdc534 100644 --- a/examples/MarketAgents/ElectricMeterAgent/electric_meter/agent.py +++ b/examples/MarketAgents/ElectricMeterAgent/electric_meter/agent.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- {{{ # vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et: # -# Copyright 2019, Battelle Memorial Institute. +# Copyright 2020, Battelle Memorial Institute. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/examples/MarketAgents/ElectricMeterAgent/setup.py b/examples/MarketAgents/ElectricMeterAgent/setup.py index eaf3948ea7..cc64769bff 100644 --- a/examples/MarketAgents/ElectricMeterAgent/setup.py +++ b/examples/MarketAgents/ElectricMeterAgent/setup.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- {{{ # vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et: # -# Copyright 2019, Battelle Memorial Institute. +# Copyright 2020, Battelle Memorial Institute. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/examples/MarketAgents/LightingAgent/light/agent.py b/examples/MarketAgents/LightingAgent/light/agent.py index 26a3049db0..aae5fa11da 100644 --- a/examples/MarketAgents/LightingAgent/light/agent.py +++ b/examples/MarketAgents/LightingAgent/light/agent.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- {{{ # vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et: # -# Copyright 2019, Battelle Memorial Institute. +# Copyright 2020, Battelle Memorial Institute. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/examples/MarketAgents/LightingAgent/setup.py b/examples/MarketAgents/LightingAgent/setup.py index eaf3948ea7..cc64769bff 100644 --- a/examples/MarketAgents/LightingAgent/setup.py +++ b/examples/MarketAgents/LightingAgent/setup.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- {{{ # vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et: # -# Copyright 2019, Battelle Memorial Institute. +# Copyright 2020, Battelle Memorial Institute. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/examples/MarketAgents/MeterAgent/meter/agent.py b/examples/MarketAgents/MeterAgent/meter/agent.py index 20fbb99778..14460bedf5 100644 --- a/examples/MarketAgents/MeterAgent/meter/agent.py +++ b/examples/MarketAgents/MeterAgent/meter/agent.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- {{{ # vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et: # -# Copyright 2019, Battelle Memorial Institute. +# Copyright 2020, Battelle Memorial Institute. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/examples/MarketAgents/MeterAgent/setup.py b/examples/MarketAgents/MeterAgent/setup.py index eaf3948ea7..cc64769bff 100644 --- a/examples/MarketAgents/MeterAgent/setup.py +++ b/examples/MarketAgents/MeterAgent/setup.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- {{{ # vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et: # -# Copyright 2019, Battelle Memorial Institute. +# Copyright 2020, Battelle Memorial Institute. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/examples/MarketAgents/VAVAgent/setup.py b/examples/MarketAgents/VAVAgent/setup.py index eaf3948ea7..cc64769bff 100644 --- a/examples/MarketAgents/VAVAgent/setup.py +++ b/examples/MarketAgents/VAVAgent/setup.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- {{{ # vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et: # -# Copyright 2019, Battelle Memorial Institute. +# Copyright 2020, Battelle Memorial Institute. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/examples/MarketAgents/VAVAgent/vav/agent.py b/examples/MarketAgents/VAVAgent/vav/agent.py index 951914bf66..366ce61bda 100644 --- a/examples/MarketAgents/VAVAgent/vav/agent.py +++ b/examples/MarketAgents/VAVAgent/vav/agent.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- {{{ # vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et: # -# Copyright 2019, Battelle Memorial Institute. +# Copyright 2020, Battelle Memorial Institute. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/examples/SCPAgent/scp/agent.py b/examples/SCPAgent/scp/agent.py index a9fbf46beb..a589757e28 100644 --- a/examples/SCPAgent/scp/agent.py +++ b/examples/SCPAgent/scp/agent.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- {{{ # vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et: # -# Copyright 2019, Battelle Memorial Institute. +# Copyright 2020, Battelle Memorial Institute. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/examples/SCPAgent/trigger_scp.py b/examples/SCPAgent/trigger_scp.py index 3782ed8b68..31a40adf8b 100644 --- a/examples/SCPAgent/trigger_scp.py +++ b/examples/SCPAgent/trigger_scp.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- {{{ # vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et: # -# Copyright 2019, Battelle Memorial Institute. +# Copyright 2020, Battelle Memorial Institute. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/examples/SchedulerExample/schedule_example/agent.py b/examples/SchedulerExample/schedule_example/agent.py index 8b85bc55e1..6ae69a9250 100644 --- a/examples/SchedulerExample/schedule_example/agent.py +++ b/examples/SchedulerExample/schedule_example/agent.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- {{{ # vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et: # -# Copyright 2019, Battelle Memorial Institute. +# Copyright 2020, Battelle Memorial Institute. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -48,7 +48,6 @@ from volttron.platform.messaging import topics, headers as headers_mod -from . import settings utils.setup_logging() diff --git a/examples/SchedulerExample/schedule_example/settings.py b/examples/SchedulerExample/schedule_example/settings.py deleted file mode 100644 index 7c9d04d4ed..0000000000 --- a/examples/SchedulerExample/schedule_example/settings.py +++ /dev/null @@ -1 +0,0 @@ -SCHEDULE_PERIOD = 180 diff --git a/examples/SchedulerExample/setup.py b/examples/SchedulerExample/setup.py index eaf3948ea7..cc64769bff 100644 --- a/examples/SchedulerExample/setup.py +++ b/examples/SchedulerExample/setup.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- {{{ # vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et: # -# Copyright 2019, Battelle Memorial Institute. +# Copyright 2020, Battelle Memorial Institute. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/examples/SimpleForwarder/setup.py b/examples/SimpleForwarder/setup.py index 02075a9053..ae953a32cf 100644 --- a/examples/SimpleForwarder/setup.py +++ b/examples/SimpleForwarder/setup.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- {{{ # vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et: # -# Copyright 2019, Battelle Memorial Institute. +# Copyright 2020, Battelle Memorial Institute. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/examples/SimpleForwarder/simpleforwarder/simpleforwarder.py b/examples/SimpleForwarder/simpleforwarder/simpleforwarder.py index 386092b2e9..552291e46f 100644 --- a/examples/SimpleForwarder/simpleforwarder/simpleforwarder.py +++ b/examples/SimpleForwarder/simpleforwarder/simpleforwarder.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- {{{ # vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et: # -# Copyright 2019, Battelle Memorial Institute. +# Copyright 2020, Battelle Memorial Institute. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/examples/SimpleWebAgent/setup.py b/examples/SimpleWebAgent/setup.py index a19d2d2879..1b78e26043 100644 --- a/examples/SimpleWebAgent/setup.py +++ b/examples/SimpleWebAgent/setup.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- {{{ # vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et: # -# Copyright 2019, Battelle Memorial Institute. +# Copyright 2020, Battelle Memorial Institute. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/examples/SimpleWebAgent/simpleweb/agent.py b/examples/SimpleWebAgent/simpleweb/agent.py index 7348b8beda..f4e823994f 100644 --- a/examples/SimpleWebAgent/simpleweb/agent.py +++ b/examples/SimpleWebAgent/simpleweb/agent.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- {{{ # vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et: # -# Copyright 2019, Battelle Memorial Institute. +# Copyright 2020, Battelle Memorial Institute. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/examples/WeatherUndergroundAgent/setup.py b/examples/WeatherUndergroundAgent/setup.py index 45a535cd17..0df83e9979 100644 --- a/examples/WeatherUndergroundAgent/setup.py +++ b/examples/WeatherUndergroundAgent/setup.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- {{{ # vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et: # -# Copyright 2019, Battelle Memorial Institute. +# Copyright 2020, Battelle Memorial Institute. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/examples/WeatherUndergroundAgent/tests/test_weather_agent.py b/examples/WeatherUndergroundAgent/tests/test_weather_agent.py index 00b78ed05d..52be042f91 100644 --- a/examples/WeatherUndergroundAgent/tests/test_weather_agent.py +++ b/examples/WeatherUndergroundAgent/tests/test_weather_agent.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- {{{ # vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et: # -# Copyright 2019, Battelle Memorial Institute. +# Copyright 2020, Battelle Memorial Institute. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/examples/WeatherUndergroundAgent/weather/weather.py b/examples/WeatherUndergroundAgent/weather/weather.py index b0e4bc245a..98c0c1b09d 100644 --- a/examples/WeatherUndergroundAgent/weather/weather.py +++ b/examples/WeatherUndergroundAgent/weather/weather.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- {{{ # vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et: # -# Copyright 2019, Battelle Memorial Institute. +# Copyright 2020, Battelle Memorial Institute. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/examples/WebRPC/example.py b/examples/WebRPC/example.py index 24513fc836..38853e6f10 100644 --- a/examples/WebRPC/example.py +++ b/examples/WebRPC/example.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- {{{ # vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et: # -# Copyright 2019, Battelle Memorial Institute. +# Copyright 2020, Battelle Memorial Institute. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/examples/WebRPC/setup.py b/examples/WebRPC/setup.py index f94369fcdb..1c53f2fbec 100644 --- a/examples/WebRPC/setup.py +++ b/examples/WebRPC/setup.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- {{{ # vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et: # -# Copyright 2019, Battelle Memorial Institute. +# Copyright 2020, Battelle Memorial Institute. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/examples/WebRPC/volttronwebrpc/__init__.py b/examples/WebRPC/volttronwebrpc/__init__.py index f3611033c1..392cbeb4df 100644 --- a/examples/WebRPC/volttronwebrpc/__init__.py +++ b/examples/WebRPC/volttronwebrpc/__init__.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- {{{ # vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et: # -# Copyright 2019, Battelle Memorial Institute. +# Copyright 2020, Battelle Memorial Institute. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/examples/WebRPC/volttronwebrpc/volttronwebrpc.py b/examples/WebRPC/volttronwebrpc/volttronwebrpc.py index 69ffd6da23..5a048f9a5f 100644 --- a/examples/WebRPC/volttronwebrpc/volttronwebrpc.py +++ b/examples/WebRPC/volttronwebrpc/volttronwebrpc.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- {{{ # vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et: # -# Copyright 2019, Battelle Memorial Institute. +# Copyright 2020, Battelle Memorial Institute. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/examples/WebSocketAgent/setup.py b/examples/WebSocketAgent/setup.py index eaf3948ea7..cc64769bff 100644 --- a/examples/WebSocketAgent/setup.py +++ b/examples/WebSocketAgent/setup.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- {{{ # vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et: # -# Copyright 2019, Battelle Memorial Institute. +# Copyright 2020, Battelle Memorial Institute. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/examples/WebSocketAgent/websocketagent/agent.py b/examples/WebSocketAgent/websocketagent/agent.py index 504f47ffca..0e44e125ab 100644 --- a/examples/WebSocketAgent/websocketagent/agent.py +++ b/examples/WebSocketAgent/websocketagent/agent.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- {{{ # vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et: # -# Copyright 2019, Battelle Memorial Institute. +# Copyright 2020, Battelle Memorial Institute. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -92,4 +92,4 @@ def main(argv=sys.argv): try: sys.exit(main()) except KeyboardInterrupt: - pass \ No newline at end of file + pass diff --git a/examples/configurations/drivers/README.rst b/examples/configurations/drivers/README.rst index 3462a6b4e5..6c47084cc5 100644 --- a/examples/configurations/drivers/README.rst +++ b/examples/configurations/drivers/README.rst @@ -1,12 +1,12 @@ -Master Driver Configuration Examples +Platform Driver Configuration Examples ==================================== -This directory contains a complete set of example configurations for the master driver agent. +This directory contains a complete set of example configurations for the platform driver agent. All configurations have been used with real devices on a test network. Most files will have to be adapted to your network and devices. -The included ``master-driver.agent`` loads all the drivers in this folder. +The included ``platform-driver.agent`` loads all the drivers in this folder. BACnet ------ diff --git a/examples/configurations/drivers/obsolete/bacnet1.config b/examples/configurations/drivers/obsolete/bacnet1.config index 191240821b..b5ff99a9b6 100644 --- a/examples/configurations/drivers/obsolete/bacnet1.config +++ b/examples/configurations/drivers/obsolete/bacnet1.config @@ -2,7 +2,7 @@ #This configuration is now obsolete. #It is included for historical purposes. #This file (after paths are fixed for your particular environment) can be automatically - #converted and uploaded to the config store with scripts/update_master_driver_config.py + #converted and uploaded to the config store with scripts/update_platform_driver_config.py "driver_config": {"device_address": "10.0.0.1", "device_id": 500}, diff --git a/examples/configurations/drivers/obsolete/bacnet2.config b/examples/configurations/drivers/obsolete/bacnet2.config index 6224cf00fb..6673a57b06 100644 --- a/examples/configurations/drivers/obsolete/bacnet2.config +++ b/examples/configurations/drivers/obsolete/bacnet2.config @@ -2,7 +2,7 @@ #This configuration is now obsolete. #It is included for historical purposes. #This file (after paths are fixed for your particular environment) can be automatically - #converted and uploaded to the config store with scripts/update_master_driver_config.py + #converted and uploaded to the config store with scripts/update_platform_driver_config.py "driver_config": {"device_address": "10.0.0.2", "device_id": 501}, diff --git a/examples/configurations/drivers/obsolete/fake.config b/examples/configurations/drivers/obsolete/fake.config index 3ceaefd579..76fe363e2e 100644 --- a/examples/configurations/drivers/obsolete/fake.config +++ b/examples/configurations/drivers/obsolete/fake.config @@ -2,7 +2,7 @@ #This configuration is now obsolete. #It is included for historical purposes. #This file (after paths are fixed for your particular environment) can be automatically - #converted and uploaded to the config store with scripts/update_master_driver_config.py + #converted and uploaded to the config store with scripts/update_platform_driver_config.py "driver_config": {}, "campus": "MyFakeCampus", diff --git a/examples/configurations/drivers/obsolete/master-driver.agent b/examples/configurations/drivers/obsolete/master-driver.agent index ff88a07517..a976446e24 100644 --- a/examples/configurations/drivers/obsolete/master-driver.agent +++ b/examples/configurations/drivers/obsolete/master-driver.agent @@ -1,9 +1,9 @@ { - #This configuration for the master driver is now obsolete. + #This configuration for the platform driver is now obsolete. #It is included for historical purposes. #This file (after paths are fixed for your particular environment) can be automatically - #converted and uploaded with scripts/update_master_driver_config.py - "agentid": "master_driver", + #converted and uploaded with scripts/update_platform_driver_config.py + "agentid": "platform_driver", "driver_config_list": [ "/home/volttron/volttron/examples/configurations/drivers/obsolete/bacnet1.config", "/home/volttron/volttron/examples/configurations/drivers/obsolete/bacnet2.config", diff --git a/examples/configurations/drivers/obsolete/modbus1.config b/examples/configurations/drivers/obsolete/modbus1.config index c0cc1c8190..39881cc9df 100644 --- a/examples/configurations/drivers/obsolete/modbus1.config +++ b/examples/configurations/drivers/obsolete/modbus1.config @@ -2,7 +2,7 @@ #This configuration is now obsolete. #It is included for historical purposes. #This file (after paths are fixed for your particular environment) can be automatically - #converted and uploaded to the config store with scripts/update_master_driver_config.py + #converted and uploaded to the config store with scripts/update_platform_driver_config.py "driver_config": {"device_address": "10.0.0.3"}, "campus": "campus", diff --git a/examples/configurations/drivers/test_hvac_1.config b/examples/configurations/drivers/test_hvac_1.config index 2f73fac37a..8d04e23a82 100644 --- a/examples/configurations/drivers/test_hvac_1.config +++ b/examples/configurations/drivers/test_hvac_1.config @@ -4,7 +4,7 @@ "building": "spl", "unit": "THERMOSTAT_1", "driver_type": "radiothermostat", - "registry_config": "/home/parallels/Desktop/dvaidhyn_pnnl/volttron/services/core/MasterDriverAgent/master_driver/thermostat_points.csv", + "registry_config": "/home/parallels/Desktop/dvaidhyn_pnnl/volttron/services/core/PlatformDriverAgent/platform_driver/thermostat_points.csv", "interval": 20, "timezone": "UTC" } diff --git a/examples/configurations/rabbitmq/rabbitmq_config.yml b/examples/configurations/rabbitmq/rabbitmq_config.yml index d597929880..991031511c 100644 --- a/examples/configurations/rabbitmq/rabbitmq_config.yml +++ b/examples/configurations/rabbitmq/rabbitmq_config.yml @@ -54,3 +54,6 @@ rmq-home: ~/rabbitmq_server/rabbitmq_server-3.7.7 # RabbitMQ reconnect retry delay (in seconds) reconnect-delay: 30 +# Flag to indicate if RabbitMQ server is configured to run as a systemd service. +rabbbitmq-service: false + diff --git a/examples/configurations/rabbitmq/rabbitmq_federation_config.yml b/examples/configurations/rabbitmq/rabbitmq_federation_config.yml index f4da1e828d..15233acd64 100644 --- a/examples/configurations/rabbitmq/rabbitmq_federation_config.yml +++ b/examples/configurations/rabbitmq/rabbitmq_federation_config.yml @@ -1,10 +1,21 @@ # Mandatory parameters for federation setup federation-upstream: - rabbit-4: + volttron4: # hostname of upstream server port: '5671' virtual-host: volttron4 - rabbit-5: + certificates: + csr: true + private_key: "path to private cert" # For example, /home/volttron/vhome/test_fed/certificates/private/volttron1.federation.pem + public_cert: "path to public cert" # For example, /home/volttron/vhome/test_fed/certificates/federation/volttron2.volttron1.federation.crt + remote_ca: "path to CA cert" # For example, /home/volttron/vhome/test_fed/certificates/federation/volttron2_ca.crt + federation-user: volttron4.federation #.federation + volttron5: # hostname of upstream server port: '5671' virtual-host: volttron5 - + certificates: + csr: true + private_key: "path to private cert" + public_cert: "path to public cert" + remote_ca: "path to CA cert" + federation-user: volttron5.federation #.federation diff --git a/examples/configurations/rabbitmq/rabbitmq_shovel_config.yml b/examples/configurations/rabbitmq/rabbitmq_shovel_config.yml index 5a2c29766e..552b6a58a8 100644 --- a/examples/configurations/rabbitmq/rabbitmq_shovel_config.yml +++ b/examples/configurations/rabbitmq/rabbitmq_shovel_config.yml @@ -3,6 +3,11 @@ shovel: rabbit-2: port: '5671' virtual-host: volttron + certificates: + csr: true + private_cert: "path to private cert" # For example, /home/volttron/vhome/test_shovel/certificates/private/volttron1.shovelvolttron2.pem + public_cert: "path to public cert" # For example, /home/volttron/vhome/test_shovel/certificates/shovels/volttron2.volttron1.shovelvolttron2.crt + remote_ca: "path to CA cert" # For example, /home/volttron/vhome/test_shovel/certificates/shovels/volttron2_ca.crt # Configuration to forward pubsub topics pubsub: # Identity of agent that is publishing the topic @@ -14,4 +19,4 @@ shovel: # Remote instance name volttron2: # List of pair of agent identities (local caller, remote callee) - - [scheduler, platform.actuator] \ No newline at end of file + - [scheduler, platform.actuator] diff --git a/examples/default_install_scripts/install-actuator-agent.sh b/examples/default_install_scripts/install-actuator-agent.sh index 705a115e93..847ed3c49d 100755 --- a/examples/default_install_scripts/install-actuator-agent.sh +++ b/examples/default_install_scripts/install-actuator-agent.sh @@ -2,5 +2,5 @@ echo "installing actuator agent" python scripts/install-agent.py --force -s services/core/ActuatorAgent \ - --config services/core/ActuatorAgent/actuator-deploy.service \ + --config services/core/ActuatorAgent/config \ --vip-identity platform.actuator --start diff --git a/examples/default_install_scripts/install-master-driver.sh b/examples/default_install_scripts/install-platform-driver.sh similarity index 64% rename from examples/default_install_scripts/install-master-driver.sh rename to examples/default_install_scripts/install-platform-driver.sh index 6ca6139ee2..311006f35f 100755 --- a/examples/default_install_scripts/install-master-driver.sh +++ b/examples/default_install_scripts/install-platform-driver.sh @@ -4,5 +4,5 @@ echo "storing fake.csv" vctl config store platform.driver fake.csv examples/configurations/drivers/fake.csv --csv echo "storing devices/foo/bar" vctl config store platform.driver devices/foo/bar examples/configurations/drivers/fake.config --json -echo "installing master driver" -python scripts/install-agent.py --force -s services/core/MasterDriverAgent --vip-identity platform.driver --start +echo "installing platform driver" +python scripts/install-agent.py --force -s services/core/PlatformDriverAgent --vip-identity platform.driver --start diff --git a/integrations/__init__.py b/integrations/__init__.py new file mode 100644 index 0000000000..8751420803 --- /dev/null +++ b/integrations/__init__.py @@ -0,0 +1,4 @@ +from .gridappsd_integration import GridAPPSDSimIntegration +from .helics_integration import HELICSSimIntegration +from.energyplus_integration import EnergyPlusSimIntegration +__all__ = ['HELICSSimIntegration', 'GridAPPSDSimIntegration', 'EnergyPlusSimIntegration'] diff --git a/integrations/energyplus_integration.py b/integrations/energyplus_integration.py new file mode 100644 index 0000000000..6ac820bb26 --- /dev/null +++ b/integrations/energyplus_integration.py @@ -0,0 +1,461 @@ +# -*- coding: utf-8 -*- {{{ +# vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et: +# +# Copyright 2020, Battelle Memorial Institute. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# This material was prepared as an account of work sponsored by an agency of +# the United States Government. Neither the United States Government nor the +# United States Department of Energy, nor Battelle, nor any of their +# employees, nor any jurisdiction or organization that has cooperated in the +# development of these materials, makes any warranty, express or +# implied, or assumes any legal liability or responsibility for the accuracy, +# completeness, or usefulness or any information, apparatus, product, +# software, or process disclosed, or represents that its use would not infringe +# privately owned rights. Reference herein to any specific commercial product, +# process, or service by trade name, trademark, manufacturer, or otherwise +# does not necessarily constitute or imply its endorsement, recommendation, or +# favoring by the United States Government or any agency thereof, or +# Battelle Memorial Institute. The views and opinions of authors expressed +# herein do not necessarily state or reflect those of the +# United States Government or any agency thereof. +# +# PACIFIC NORTHWEST NATIONAL LABORATORY operated by +# BATTELLE for the UNITED STATES DEPARTMENT OF ENERGY +# under Contract DE-AC05-76RL01830 +# }}} + +import os +import logging +from gevent import monkey, sleep +import weakref +import socket +import subprocess +from datetime import datetime +from calendar import monthrange +from volttron.platform.agent.base_simulation_integration.base_sim_integration import BaseSimIntegration + +monkey.patch_socket() +_log = logging.getLogger(__name__) +__version__ = '1.0' + +HAS_ENERGYPLUS = True + + +class EnergyPlusSimIntegration(BaseSimIntegration): + """ + The class is responsible for integration with EnergyPlus simulation + """ + + def __init__(self, config, pubsub, core): + super(EnergyPlusSimIntegration, self).__init__(config) + self.pubsub = weakref.ref(pubsub) + self.core = weakref.ref(core) + self.current_time = 0 + self.inputs = [] + self.outputs = [] + self.current_values = {} + self.version = 8.4 + self.bcvtb_home = '.' + self.model = None + self.customizedOutT = 0 + self.weather = None + self.socketFile = None + self.variableFile = None + self.time = 0 + self.vers = 2 + self.flag = 0 + self.sent = None + self.rcvd = None + self.socket_server = None + self.simulation = None + self.step = None + self.eplus_inputs = 0 + self.eplus_outputs = 0 + self.cosim_sync_counter = 0 + self.time_scale = 1.0 + self.passtime = False + self.size = None + self.real_time_flag = False + self.currenthour = datetime.now().hour + self.currentday = datetime.now().day + self.currentmonth = datetime.now().month + self.length = 1 + self.maxday = monthrange(2012, self.currentmonth)[1] + self.callback = None + self.month = None + self.year = None + self.day = None + self.minute = None + self.operation = None + self.timestep = None + self.cosimulation_sync = None + self.real_time_periodic = None + self.co_sim_timestep = None + self.startmonth = None + self.startday = None + self.endmonth = None + self.endday = None + self.sim_flag = 0 + self.cwd = os.getcwd() + + def register_inputs(self, config=None, callback=None, **kwargs): + """ + Store input and output configurations + Save the user agent callback + :return: + """ + self.inputs = self.config.get('inputs', []) + self.outputs = self.config.get('outputs', []) + if 'properties' in self.config and isinstance(self.config['properties'], dict): + self.__dict__.update(self.config['properties']) + self.callback = callback + + def start_socket_server(self): + """ + Connect to EnergyPlus socket server and + register a receiver callback + """ + self.socket_server = SocketServer() + self.socket_server.size = self.size + self.socket_server.on_recv = self.recv_eplus_msg + self.socket_server.connect() + self.core().spawn(self.socket_server.start) + + def start_simulation(self, *args, **kwargs): + """ + Start EnergyPlus simulation + :return: + """ + self.start_socket_server() + self._start_eplus_simulation() + + def _start_eplus_simulation(self): + """ + Check the model path and start EnergyPlus + """ + if not self.model: + self.exit('No model specified.') + if not self.weather: + self.exit('No weather specified.') + model_path = self.model + if model_path[0] == '~': + model_path = os.path.expanduser(model_path) + if model_path[0] != '/': + model_path = os.path.join(self.cwd, model_path) + weather_path = self.weather + if weather_path[0] == '~': + weather_path = os.path.expanduser(weather_path) + if weather_path[0] != '/': + weather_path = os.path.join(self.cwd, weather_path) + model_dir = os.path.dirname(model_path) + bcvtb_dir = self.bcvtb_home + if bcvtb_dir[0] == '~': + bcvtb_dir = os.path.expanduser(bcvtb_dir) + if bcvtb_dir[0] != '/': + bcvtb_dir = os.path.join(self.cwd, bcvtb_dir) + _log.debug('Working in %r', model_dir) + + self._write_port_file(os.path.join(model_dir, 'socket.cfg')) + self._write_variable_file(os.path.join(model_dir, 'variables.cfg')) + + if self.version >= 8.4: + cmd_str = "cd %s; export BCVTB_HOME=%s; energyplus -w %s -r %s" % ( + model_dir, bcvtb_dir, weather_path, model_path) + else: + cmd_str = "export BCVTB_HOME=%s; runenergyplus %s %s" % (bcvtb_dir, model_path, weather_path) + _log.debug('Running: %s', cmd_str) + f = open(model_path, 'r') + lines = f.readlines() + f.close() + endmonth = 0 + if self.currentday + self.length > self.maxday: + endday = self.currentday + self.length - self.maxday + endmonth = self.currentmonth + 1 + else: + endday = self.currentday + self.length + endmonth = self.currentmonth + for i in range(len(lines)): + if lines[i].lower().find('runperiod,') != -1: + if not self.real_time_flag: + lines[i + 2] = ' ' + str(self.startmonth) + ', !- Begin Month' + '\n' + lines[i + 3] = ' ' + str(self.startday) + ', !- Begin Day of Month' + '\n' + lines[i + 4] = ' ' + str(self.endmonth) + ', !- End Month' + '\n' + lines[i + 5] = ' ' + str(self.endday) + ', !- End Day of Month' + '\n' + else: + lines[i + 2] = ' ' + str(self.currentmonth) + ', !- Begin Month' + '\n' + lines[i + 3] = ' ' + str( + self.currentday) + ', !- Begin Day of Month' + '\n' + lines[i + 4] = ' ' + str(endmonth) + ', !- End Month' + '\n' + lines[i + 5] = ' ' + str(endday) + ', !- End Day of Month' + '\n' + for i in range(len(lines)): + if lines[i].lower().find('timestep,') != -1 and lines[i].lower().find('update frequency') == -1: + if lines[i].lower().find(';') != -1: + lines[i] = ' Timestep,' + str(self.timestep) + ';' + '\n' + else: + lines[i + 1] = ' ' + str(self.timestep) + ';' + '\n' + if self.customizedOutT > 0: + lines.append('ExternalInterface:Actuator,') + '\n' + lines.append(' outT, !- Name') + '\n' + lines.append(' Environment, !- Actuated Component Unique Name') + '\n' + lines.append(' Weather Data, !- Actuated Component Type') + '\n' + lines.append(' Outdoor Dry Bulb; !- Actuated Component Control Type') + '\n' + f = open(model_path, 'w') + + for i in range(len(lines)): + f.writelines(lines[i]) + f.close() + self.simulation = subprocess.Popen(cmd_str, shell=True) + + def publish_all_to_simulation(self, inputs): + self.inputs = inputs + self.send_eplus_msg() + + def send_eplus_msg(self): + """ + Send inputs to EnergyPlus + """ + _log.debug("send_eplus_msg ") + if self.socket_server: + args = self.input() + msg = '%r %r %r 0 0 %r' % (self.vers, self.flag, self.eplus_inputs, self.time) + for obj in args: + if obj.get('name', None) and obj.get('type', None): + msg = msg + ' ' + str(obj.get('value')) + self.sent = msg + '\n' + _log.info('Sending message to EnergyPlus: ' + msg) + self.sent = self.sent.encode() + self.socket_server.send(self.sent) + + def recv_eplus_msg(self, msg): + """ + Receive outputs from EnergyPlus, parse the messages and hand it over + to user callback + """ + self.rcvd = msg + self.parse_eplus_msg(msg) + # Call Agent callback to do whatever with the message + if self.callback is not None: + self.callback() + + def parse_eplus_msg(self, msg): + """ + Parse EnergyPlus message to update output values and + simulation datetime + """ + msg = msg.decode("utf-8") + msg = msg.rstrip() + _log.info(f"Received message from EnergyPlus: {msg}") + arry = msg.split() + arry = [float(item) for item in arry] + _log.info(f"Received message from EnergyPlus: {arry}") + slot = 6 + self.sim_flag = arry[1] + + if self.sim_flag != 0.0: + # Exit based on error status + _log.debug("FLAG: {} - {}".format(self.sim_flag, type(self.sim_flag))) + self._check_sim_flag() + elif arry[2] < self.eplus_outputs and len(arry) < self.eplus_outputs + 6: + self.exit('Got message with ' + arry[2] + ' inputs. Expecting ' + str(self.eplus_outputs) + '.') + else: + if float(arry[5]): + self.time = float(arry[5]) + for input in self.inputs: + name_value = input.get('name', None) + dynamic_default_value = input.get('dynamic_default', None) + if name_value is not None and dynamic_default_value is not None: + slot = 6 + for output in self.outputs: + _log.debug("Output: {}".format(output)) + default_value = output.get('default', None) + if default_value is not None: + if default_value.lower().find(name_value.lower()) != -1: + input['default'] = float(arry[slot]) + slot += 1 + slot = 6 + for output in self.outputs: + name_value = output.get('name', None) + type_value = output.get('type', None) + field_value = output.get('field', None) + if name_value is not None and type_value is not None: + try: + output['value'] = float(arry[slot]) + except: + _log.debug(slot) + self.exit('Unable to convert received value to double.') + if "currentmonthv" in type_value.lower(): + self.month = float(arry[slot]) + _log.debug(f"month {self.month}") + elif "currentdayofmonthv" in type_value.lower(): + self.day = float(arry[slot]) + _log.debug(f"day {self.day}") + elif "currenthourv" in type_value.lower(): + self.hour = float(arry[slot]) + _log.debug(f"hour {self.hour}") + elif "currentminutev" in type_value.lower(): + self.minute = float(arry[slot]) + _log.debug(f"minute: {self.minute}") + elif field_value is not None and 'operation' in field_value.lower(): + self.operation = float(arry[slot]) + _log.debug(f"operation (1:on, 0: off) {self.operation}") + slot += 1 + + def _check_sim_flag(self): + """ + Exit the process based on simulation status + """ + if self.sim_flag == '1': + self.exit('Simulation reached end: ' + self.sim_flag) + elif self.sim_flag == '-1': + self.exit('Simulation stopped with unspecified error: ' + self.sim_flag) + elif self.sim_flag == '-10': + self.exit('Simulation stopped with error during initialization: ' + self.sim_flag) + elif self.sim_flag == '-20': + self.exit('Simulation stopped with error during time integration: ' + self.sim_flag) + + def publish_to_simulation(self, topic, message, **kwargs): + """ + Publish message on EnergyPlus simulation + :param topic: EnergyPlus input field + :param message: message + :return: + """ + pass + + def make_time_request(self, time_request=None, **kwargs): + """ + Cannot request time with energyplus + :param time_request: + :return: + """ + pass + + def pause_simulation(self, timeout=None, **kwargs): + pass + + def resume_simulation(self, *args, **kwargs): + pass + + def is_sim_installed(self, **kwargs): + return HAS_ENERGYPLUS + + def stop_simulation(self, *args, **kwargs): + """ + Stop EnergyPlus simulation + :return: + """ + if self.socket_server: + # Close connection to EnergyPlus server + self.socket_server.stop() + self.socket_server = None + + def _write_port_file(self, path): + fh = open(path, "w+") + fh.write('\n') + fh.write('\n') + fh.write(' \n') + fh.write(' \n' % (self.socket_server.port, self.socket_server.host)) + fh.write(' \n') + fh.write('') + fh.close() + + def _write_variable_file(self, path): + fh = open(path, "w+") + fh.write('\n') + fh.write('\n') + fh.write('\n') + for obj in self.outputs: + if 'name' in obj and 'type' in obj: + self.eplus_outputs = self.eplus_outputs + 1 + fh.write(' \n') + fh.write(' \n' % (obj.get('name'), obj.get('type'))) + fh.write(' \n') + for obj in self.inputs: + if 'name' in obj and 'type' in obj: + self.eplus_inputs = self.eplus_inputs + 1 + fh.write(' \n') + fh.write(' \n' % (obj.get('type'), obj.get('name'))) + fh.write(' \n') + fh.write('\n') + fh.close() + + def input(self): + return self.inputs + + +class SocketServer(object): + """ + Socket Server class for connecting to EnergyPlus + """ + def __init__(self, **kwargs): + self.sock = None + self.size = 4096 + self.client = None + self.sent = None + self.rcvd = None + self.host = "127.0.0.1" + self.port = None + + def on_recv(self, msg): + _log.debug('Received %s' % msg) + + def run(self): + self.listen() + + def connect(self): + if self.host is None: + self.host = socket.gethostname() + self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) + if self.port is None: + self.sock.bind((self.host, 0)) + self.port = self.sock.getsockname()[1] + else: + self.sock.bind((self.host, self.port)) + _log.debug('Bound to %r on %r' % (self.port, self.host)) + + def send(self, msg): + self.sent = msg + if self.client is not None and self.sock is not None: + try: + self.client.send(self.sent) + except Exception: + _log.error('We got an error trying to send a message.') + + def recv(self): + if self.client is not None and self.sock is not None: + try: + msg = self.client.recv(self.size) + except Exception: + _log.error('We got an error trying to read a message') + return msg + + def start(self): + _log.debug('Starting socket server') + self.run() + + def stop(self): + if self.sock != None: + self.sock.close() + + def listen(self): + self.sock.listen(10) + _log.debug('server now listening') + self.client, addr = self.sock.accept() + _log.debug('Connected with ' + addr[0] + ':' + str(addr[1])) + while True: + msg = self.recv() + if msg: + self.rcvd = msg + self.on_recv(msg) diff --git a/integrations/gridappsd_integration.py b/integrations/gridappsd_integration.py new file mode 100644 index 0000000000..1a021c28c8 --- /dev/null +++ b/integrations/gridappsd_integration.py @@ -0,0 +1,214 @@ +# -*- coding: utf-8 -*- {{{ +# vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et: +# +# Copyright 2020, Battelle Memorial Institute. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# This material was prepared as an account of work sponsored by an agency of +# the United States Government. Neither the United States Government nor the +# United States Department of Energy, nor Battelle, nor any of their +# employees, nor any jurisdiction or organization that has cooperated in the +# development of these materials, makes any warranty, express or +# implied, or assumes any legal liability or responsibility for the accuracy, +# completeness, or usefulness or any information, apparatus, product, +# software, or process disclosed, or represents that its use would not infringe +# privately owned rights. Reference herein to any specific commercial product, +# process, or service by trade name, trademark, manufacturer, or otherwise +# does not necessarily constitute or imply its endorsement, recommendation, or +# favoring by the United States Government or any agency thereof, or +# Battelle Memorial Institute. The views and opinions of authors expressed +# herein do not necessarily state or reflect those of the +# United States Government or any agency thereof. +# +# PACIFIC NORTHWEST NATIONAL LABORATORY operated by +# BATTELLE for the UNITED STATES DEPARTMENT OF ENERGY +# under Contract DE-AC05-76RL01830 +# }}} + +try: + from gridappsd import GridAPPSD + from gridappsd.simulation import Simulation + from gridappsd import topics as t + import stomp + HAS_GAPPSD = True +except ImportError: + HAS_GAPPSD = False + RuntimeError('GridAPPSD must be installed before running this script ') + +import os +import logging +import gevent +import weakref + +from volttron.platform.agent.base_simulation_integration.base_sim_integration import BaseSimIntegration + +_log = logging.getLogger(__name__) +__version__ = '1.0' + + +class GridAPPSDSimIntegration(BaseSimIntegration): + """ + The class is responsible for integration with GridAPPSD co-simulation platform. + It provides integration support to register configuration, start, stop, publish, + receive messages, pause and resume simulation + """ + def __init__(self, config, pubsub): + super(GridAPPSDSimIntegration, self).__init__(config) + self._work_callback = None + self.config = config + self.gridappsd = None + self.sim = None + self.event_callbacks = {} + self.topic_callbacks = {} + self.sim_id = None + + def register_inputs(self, config=None, callback=None, **kwargs): + """ + Register configuration parameters with GridAppsD. + The config parameters may include but not limited to: + - power_system_config + - application_config + - simulation_config + - test_config + - service_configs + : Register agent callback method + :return: + """ + self.config = config + self._work_callback = callback + + def register_event_callbacks(self, callbacks={}): + """ + Register for event callbacks for event notifications such as + - on measurement change + - on timestep change + - on finish + """ + _log.debug("Registering for event callbacks") + self.event_callbacks = callbacks + + def register_topic_callbacks(self, callbacks={}): + """ + Register for any simulation topic callbacks + """ + _log.debug("Registering for topic callbacks") + self.topic_callbacks = callbacks + + def start_simulation(self, *args, **kwargs): + """ + Simulation start activities involve: + - Creating GridAppsD connection gevent thread + - Registering for event callbacks (if specified) + - Registering for topic callbacks if specified + - Starting simulation based on the input config + :return: + """ + try: + self.gridappsd = GridAPPSD(override_threading=self.receiver_thread) + + _log.debug('Gridappsd connected') + + _log.debug(f"connection config is: {self.config}") + self.sim = Simulation(self.gridappsd, self.config) + + _log.debug('Gridappsd adding onstart callback') + # Register for onstart callback to know if simulation has started + self.sim.add_onstart_callback(self.sim_on_start) + # Register event callbacks - on measurement, on timestep, on finish + for name, cb in self.event_callbacks.items(): + if name == 'MEASUREMENT': + _log.debug('Gridappsd adding measurement callback') + self.sim.add_onmesurement_callback(cb) + elif name == 'TIMESTEP': + _log.debug('Gridappsd adding timestep callback') + self.sim.add_ontimestep_callback(cb) + elif name == 'FINISH': + _log.debug('Gridappsd adding finish callback') + self.sim.add_oncomplete_callback(cb) + + # Register/Subscribe for simulation topics + for topic, cb in self.topic_callbacks: + _log.debug('Gridappsd subscribing to topics callback') + self.gridappsd.subscribe(topic, cb) + + # Starting GridAppsD simulation + self.sim.start_simulation() + _log.debug(f"Gridappsd simulation id: {self.sim.simulation_id}") + except stomp.exception.NotConnectedException as ex: + _log.error("Unable to connect to GridAPPSD: {}".format(ex)) + raise ex + + def sim_on_start(self, sim): + """ + Simulation on start callback to get notified when simulation starts + """ + _log.debug(f"GridAppsD simulation id inside sim_on_start(): {sim.simulation_id}") + self.sim_id = sim.simulation_id + + def receiver_thread(self, arg): + """ + GridAPPSD connection thread + """ + self._receiver_thread = gevent.threading.Thread(group=None, target=arg) + self._receiver_thread.daemon = True # Don't let thread prevent termination + self._receiver_thread.start() + _log.debug('Gridappsd receiver_thread started!') + return self._receiver_thread + + def publish_to_simulation(self, topic, message, **kwargs): + """ + Publish message to GridAppsD + :param topic: GridAppsD publication topic + :param message: message + :return: + """ + self.gridappsd.send(topic, message) + + def pause_simulation(self, timeout=None, **kwargs): + """ + Pause the GridAppsD simulation + """ + if timeout is None: + self.sim.pause() + else: + self.sim.pause(timeout) + + def resume_simulation(self, *args, **kwargs): + """ + Resume the GridAppsD simulation + """ + self.sim.resume() + + def is_sim_installed(self, **kwargs): + """ + Flag to indicate if GridAppsD is installed + """ + return HAS_GAPPSD + + def stop_simulation(self, *args, **kwargs): + """ + Stop the simulation if running and disconnect from GridAppsD server + :return: + """ + _log.debug('Stopping the simulation') + try: + if self.sim_id is not None: + self.sim.stop() + _log.debug('Disconnect GridAppsd') + if self.gridappsd is not None: + self.gridappsd.disconnect() + except Exception: + _log.error("Error stop GridAPPSD simulation") + + diff --git a/integrations/helics_integration.py b/integrations/helics_integration.py new file mode 100644 index 0000000000..2a660f4cac --- /dev/null +++ b/integrations/helics_integration.py @@ -0,0 +1,355 @@ +# -*- coding: utf-8 -*- {{{ +# vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et: +# +# Copyright 2020, Battelle Memorial Institute. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# This material was prepared as an account of work sponsored by an agency of +# the United States Government. Neither the United States Government nor the +# United States Department of Energy, nor Battelle, nor any of their +# employees, nor any jurisdiction or organization that has cooperated in the +# development of these materials, makes any warranty, express or +# implied, or assumes any legal liability or responsibility for the accuracy, +# completeness, or usefulness or any information, apparatus, product, +# software, or process disclosed, or represents that its use would not infringe +# privately owned rights. Reference herein to any specific commercial product, +# process, or service by trade name, trademark, manufacturer, or otherwise +# does not necessarily constitute or imply its endorsement, recommendation, or +# favoring by the United States Government or any agency thereof, or +# Battelle Memorial Institute. The views and opinions of authors expressed +# herein do not necessarily state or reflect those of the +# United States Government or any agency thereof. +# +# PACIFIC NORTHWEST NATIONAL LABORATORY operated by +# BATTELLE for the UNITED STATES DEPARTMENT OF ENERGY +# under Contract DE-AC05-76RL01830 +# }}} + +try: + import helics as h + HAS_HELICS = True +except ImportError: + HAS_HELICS = False + RuntimeError('HELICS must be installed before running this script ') + +import os +import logging +import gevent +import weakref +from volttron.platform.agent.base_simulation_integration.base_sim_integration import BaseSimIntegration +from volttron.platform import jsonapi +from copy import deepcopy + +_log = logging.getLogger(__name__) +__version__ = '1.0' + + +class HELICSSimIntegration(BaseSimIntegration): + """ + The class is responsible for integration with HELICS co-simulation platform + """ + def __init__(self, config, pubsub): + super(HELICSSimIntegration, self).__init__(config) + self.pubsub = weakref.ref(pubsub) + self.fed = None + self._work_callback = None + self._simulation_started = False + self._simulation_complete = False + self._simulation_delta = None + self._simulation_length = None + self.current_time = 0 + self.inputs = [] + self.outputs = {} + self.endpoints = {} + self.current_values = {} + self.helics_to_volttron_publish = {} + + def register_inputs(self, config=None, callback=None, **kwargs): + """ + Register configuration parameters with HELICS. The config parameters may include + but not limited to: + 1. Name of the federate + 2. simulation length + 2. Type of core to use (zmq/tcp/udp etc) + 3. list (and type) of subscriptions + 4. list (and type) of publications + 5. broker address (if not default) + :param config: config parameters + :param callback: Register agent callback method + :return: + """ + self._work_callback = callback + # Build HELICS config from agent config + helics_config = deepcopy(config) + + properties = helics_config.pop('properties', {}) + if not properties: + raise RuntimeError("Invalid configuration. Missing properties dictionary") + self._simulation_delta = properties.pop('timeDelta', 1.0) # seconds + self._simulation_length = properties.pop('simulation_length', 3600) # seconds + + for key, value in properties.items(): + helics_config[key] = value + subscriptions = helics_config.pop('outputs', []) + for sub in subscriptions: + volttron_topic = sub.pop('volttron_topic', None) + if volttron_topic is not None: + self.helics_to_volttron_publish[sub.get('key')] = volttron_topic + sub['key'] = sub.pop('sim_topic') + # Replace 'outputs' key with 'subscriptions' key + if subscriptions: + helics_config['subscriptions'] = subscriptions + + publications = helics_config.pop('inputs', []) + for pub in publications: + volttron_topic = pub.pop('volttron_topic', None) + pub['key'] = pub.pop('sim_topic') + # Replace 'inputs' key with 'publications' key + if publications: + helics_config['publications'] = publications + _log.debug("new config: {}".format(helics_config)) + + # Create a temporary json file + tmp_file = os.path.join(os.getcwd(), 'fed_cfg.json') + _log.debug("tmp file: {}".format(tmp_file)) + with open(tmp_file, 'w') as fout: + fout.write(jsonapi.dumps(helics_config)) + + _log.debug("Create Combination Federate") + # Create federate from provided config parameters + try: + self.fed = h.helicsCreateCombinationFederateFromConfig(tmp_file) + except h._helics.HelicsException as e: + _log.exception("Error parsing HELICS config {}".format(e)) + + # Check if HELICS broker correctly registered inputs + federate_name = h.helicsFederateGetName(self.fed) + _log.debug("Federate name: {}".format(federate_name)) + endpoint_count = h.helicsFederateGetEndpointCount(self.fed) + _log.debug("Endpoint count: {}".format(endpoint_count)) + subkeys_count = h.helicsFederateGetInputCount(self.fed) + _log.debug("Subscription key count: {}".format(subkeys_count)) + pubkeys_count = h.helicsFederateGetPublicationCount(self.fed) + _log.debug("Publication key count: {}".format(endpoint_count)) + + for i in range(0, endpoint_count): + try: + endpt_idx = h.helicsFederateGetEndpointByIndex(self.fed, i) + endpt_name = h.helicsEndpointGetName(endpt_idx) + self.endpoints[endpt_name] = endpt_idx + except h._helics.HelicsException as e: + _log.exception("Error getting helics endpoint index: {}".format(e)) + + for i in range(0, subkeys_count): + inputs = dict() + try: + idx = h.helicsFederateGetInputByIndex(self.fed, i) + inputs['sub_id'] = idx + inputs['type'] = h.helicsInputGetType(idx) + inputs['key'] = h.helicsSubscriptionGetKey(idx) + self.inputs.append(inputs) + data = dict(type=inputs['type'], value=None) + except h._helics.HelicsException as e: + _log.exception("Error getting helics input index: {}".format(e)) + + for i in range(0, pubkeys_count): + outputs = dict() + try: + idx = h.helicsFederateGetPublicationByIndex(self.fed, i) + outputs['pub_id'] = idx + outputs['type'] = h.helicsPublicationGetType(idx) + pub_key = h.helicsPublicationGetKey(idx) + _log.debug("Publication: {}".format(pub_key)) + self.outputs[pub_key] = outputs + data = dict(type=outputs['type'], value=None) + except h._helics.HelicsException as e: + _log.exception("Error getting helics publication index: {}".format(e)) + + def start_simulation(self, *args, **kwargs): + """ + This is a blocking call until the all the federates get connected to HELICS broker + :return: + """ + _log.debug("############ Entering Execution Mode ##############") + h.helicsFederateEnterExecutingMode(self.fed) + _log.debug("Spawning simulation loop to HELICS events") + gevent.spawn(self._sim_loop) + # Allow the spawned greenlet to run. + gevent.sleep(0.1) + + def _sim_loop(self): + """ + Continuous loop to get registered input values from HELICS and feed it to user callback + :return: + """ + _log.info("Starting simulation loop") + self._simulation_started = True + while self.current_time < self._simulation_length: + for in_put in self.inputs: + sub_key = in_put['key'] + # NOTE: Values are persisted in HELICS. Old values are returned if they dont + # get updated in current time step + self.current_values[sub_key] = self._get_input_based_on_type(in_put) + try: + # Get VOLTTRON topic for the input key + volttron_topic = self.helics_to_volttron_publish[sub_key] + self.pubsub().publish('pubsub', topic=volttron_topic, message=self.current_values[sub_key]) + except KeyError: + # No VOLTTRON topic for input key + pass + + # Collect any messages from endpoints (messages are not persistent) + for name, idx in self.endpoints.items(): + try: + if h.helicsEndpointHasMessage(idx): + msg = h.helicsEndpointGetMessage(idx) + self.current_values[name] = msg.data + except h._helics.HelicsException as e: + _log.exception("Error getting endpoint message from HELICS {}".format(e)) + + # Call user provided callback to perform work on HELICS inputs + self._work_callback() + # This allows other event loops to run + gevent.sleep(0.000000001) + _log.debug("Simulation completed. Closing connection to HELICS") + # Check if anything to publish + self._simulation_complete = True + # Closing connection to HELICS + self.stop_simulation() + + def publish_to_simulation(self, topic, message, **kwargs): + """ + Publish message on HELICS bus + :param topic: HELICS publication key + :param message: message + :return: + """ + try: + info = self.outputs[topic] + info['value'] = message + _log.debug("Publishing Pub key: {}, info: {}".format(topic, info)) + self._publish_based_on_type(info) + except KeyError as e: + _log.error("Unknown publication key {}".format(topic)) + + def _publish_based_on_type(self, output): + """ + Publish message based on type + :param output: + :return: + """ + try: + if output['type'] == 'integer': + h.helicsPublicationPublishInteger(output['pub_id'], output['value']) + elif output['type'] == 'double': + h.helicsPublicationPublishDouble(output['pub_id'], output['value']) + elif output['type'] == 'string': + h.helicsPublicationPublishString(output['pub_id'], output['value']) + elif output['type'] == 'complex': + h.helicsPublicationPublishComplex(output['pub_id'], output['value']) + elif output['type'] == 'vector': + h.helicsPublicationPublishVector(output['pub_id'], output['value']) + elif output['type'] == 'boolean': + h.helicsPublicationPublishBoolean(output['pub_id'], output['value']) + else: + _log.error("Unknown datatype: {}".format(output['type'])) + except h._helics.HelicsException as e: + _log.exception("Error sending publication to HELICS {}".format(e)) + + def _get_input_based_on_type(self, in_put): + """ + Get input based on type + :param in_put: + :return: + """ + val = None + sub_id = in_put['sub_id'] + try: + if in_put['type'] == 'integer': + val = h.helicsInputGetInteger(sub_id) + elif in_put['type'] == 'double': + val = h.helicsInputGetDouble(sub_id) + elif in_put['type'] == 'string': + val = h.helicsInputGetString(sub_id) + elif in_put['type'] == 'complex': + real, imag = h.helicsInputGetComplex(sub_id) + val = [real, imag] + elif in_put['type'] == 'vector': + val = h.helicsInputGetVector(sub_id) + elif in_put['type'] == 'boolean': + val = h.helicsInputGetBoolean(sub_id) + else: + _log.error("Unknown datatype: {}".format(in_put['type'])) + except h._helics.HelicsException as e: + _log.exception("Error getting input from HELICS {}".format(e)) + return val + + def make_blocking_time_request(self, time_request=None): + """ + This is a blocking call till the next time request is granted + :param time_request: + :return: + """ + if time_request is None: + time_request = self.current_time + self._simulation_delta + granted_time = -1 + while granted_time < time_request: + granted_time = h.helicsFederateRequestTime(self.fed, time_request) + gevent.sleep(0.000000001) + _log.debug("GRANTED TIME: {}".format(granted_time)) + self.current_time = granted_time + + def make_time_request(self, time_request=None, **kwargs): + """ + Request for next time step. Granted time maybe lower than the requested time + :param time_request: + :return: + """ + if time_request is None: + time_request = self.current_time + self._simulation_delta + _log.debug("MAKING NEXT TIMEREQUEST: {}".format(time_request)) + granted_time = h.helicsFederateRequestTime(self.fed, time_request) + _log.debug("GRANTED TIME maybe lower than time requested: {}".format(granted_time)) + self.current_time = granted_time + + def is_sim_installed(self, **kwargs): + return HAS_HELICS + + def send_to_endpoint(self, endpoint_name, destination='', value=0): + """ + Send the message to specific endpoint + :param endpoint_name: endpoint name + :param destination: destination name if any + :param value: message + :return: + """ + endpoint_idx = self.endpoints[endpoint_name] + try: + h.helicsEndpointSendEventRaw(endpoint_idx, destination, str(56.7), self.current_time) + except h._helics.HelicsException as e: + _log.exception("Error sending endpoint message to HELICS {}".format(e)) + + def stop_simulation(self, *args, **kwargs): + """ + Disconnect the federate from helics core and close the library + :return: + """ + try: + h.helicsFederateFinalize(self.fed) + h.helicsFederateFree(self.fed) + h.helicsCloseLibrary() + except h._helics.HelicsException as e: + _log.exception("Error stopping HELICS federate {}".format(e)) + + diff --git a/pytest.ini b/pytest.ini index d500024eb5..6313fcb010 100644 --- a/pytest.ini +++ b/pytest.ini @@ -1,11 +1,11 @@ [pytest] timeout = 240 -addopts = --strict -rsxX -v --showlocals -# --full-trace +addopts = --strict -rsxX -v --continue-on-collection-errors +#--full-trace -norecursedirs = - services/deprecated-remove-5.0/MultiBuilding \ - .git env scripts volttron.egg-info config +norecursedirs = \ + .git env scripts volttron.egg-info config docs \ + examples volttron_data markers = actuator: Tests for actuator agent @@ -26,7 +26,8 @@ markers = keystore: Test the keystore and known-hosts store. mongodb: Tests for mongodb related test code. pa: Tests for the platform agent. - driver: Tests for master driver functionality. + driver: Tests for platform driver functionality. + driver_unit: Unit tests for platform driver functionality. slow: Mark tests that run slowly. sqlhistorian: Mark for only sql historian tests. subsystems: Testing subsystems. @@ -54,5 +55,10 @@ markers = secure: Test platform and agents with secure platform options rpc: Tests for RPC mysqlfuncts: level one integration tests for mysqlfuncts + postgresqlfuncts_timescaledb: level one integreation tests for timescaledb postgresqlfuncts: level one integration tests for postgresqlfuncts dbutils: test all the level one integrations tests for dbfuncts classes + mongoutils: level one integration tests for mongoutils + sqlitefuncts: level one integration tests for sqlitefuncts + unit: Run all unit/level one integration tests + influxdbutils: level one integration tests for influxdb diff --git a/requirements.py b/requirements.py index 0a8e9d260b..7aa3072b17 100644 --- a/requirements.py +++ b/requirements.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- {{{ # vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et: # -# Copyright 2019, Battelle Memorial Institute. +# Copyright 2020, Battelle Memorial Institute. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -36,101 +36,63 @@ # under Contract DE-AC05-76RL01830 # }}} -# These need to be importable by bootstrap.py. If we put them in -# setup.py the import may fail if setuptools in not installed -# in the global python3. -option_requirements = [ - ('pyzmq', ['--zmq=bundled']), -] +extras_require = { 'crate': ['crate==0.26.0'], + 'databases': [ 'mysql-connector-python-rf==2.2.2', + 'bson==0.5.7', + 'pymongo==3.7.2', + 'crate==0.26.0', + 'influxdb==5.3.1', + 'psycopg2-binary==2.8.6'], + 'documentation': [ 'mock==4.0.3', + 'Sphinx==3.5.1', + 'sphinx-rtd-theme==0.5.1', + 'sphinx==3.3.0', + 'm2r2==0.2.7'], + 'drivers': [ 'pymodbus==2.5.0', + 'bacpypes==0.16.7', + 'modbus-tk==1.1.2', + 'pyserial==3.5'], + 'influxdb': ['influxdb==5.3.1'], + 'market': ['numpy==1.19.5', 'transitions==0.8.7'], + 'mongo': ['bson==0.5.7pymongo==3.7.2'], + 'mysql': ['mysql-connector-python-rf==2.2.2'], + 'pandas': ['numpy==1.19.5', 'pandas==1.1.5'], + 'postgres': ['psycopg2-binary==2.8.6'], + 'testing': [ 'mock==4.0.3', + 'pytest==6.2.2', + 'pytest-timeout==1.4.2', + 'websocket-client==0.58.0', + 'deepdiff==5.2.3', + 'docker==4.4.4'], + 'weather': ['Pint==0.16.1'], + 'web': [ 'ws4py==0.5.1', + 'PyJWT==1.7.1', + 'Jinja2==2.11.3', + 'passlib==1.7.4', + 'argon2-cffi==20.1.0', + 'Werkzeug==1.0.1']} + install_requires = [ 'gevent==20.6.1', 'greenlet==0.4.16', - 'grequests', + 'grequests==0.6.0', + 'idna<3,>=2.5', 'requests==2.23.0', - 'ply', - 'psutil', - 'python-dateutil', - 'pytz', - 'PyYAML', - 'pyzmq', - 'setuptools', - 'tzlocal', + 'ply==3.11', + 'psutil==5.8.0', + 'python-dateutil==2.8.1', + 'pytz==2021.1', + 'PyYAML==5.4.1', + 'pyzmq==22.0.3', + 'setuptools==39.0.1', + 'tzlocal==2.1', 'pyOpenSSL==19.0.0', 'cryptography==2.3', - # Cross platform way of handling changes in file/directories. - # https://github.com/Bogdanp/watchdog_gevent - 'watchdog-gevent', - 'wheel==0.30' -] + 'watchdog-gevent==0.1.1', + 'wheel==0.30'] + +option_requirements = [('pyzmq==22.0.3', ['--zmq=bundled'])] + -extras_require = { - 'crate': [ # crate databases - 'crate' - ], - 'databases': [ # Support for all known databases - 'mysql-connector-python-rf', - 'pymongo', - 'crate', - 'influxdb', - 'psycopg2-binary' - ], - 'dnp3': [ # dnp3 agent requirements. - 'pydnp3' - ], - 'documentation': [ # Requirements for building the documentation - 'mock', - 'mysql-connector-python-rf', - 'psutil', - 'pymongo', - 'Sphinx', - 'recommonmark', - 'sphinx-rtd-theme' - ], - 'drivers': [ - 'pymodbus', - 'bacpypes==0.16.7', - 'modbus-tk', - 'pyserial' - ], - 'influxdb': [ # influxdb historian requirements. - 'influxdb' - ], - 'market': [ # Requirements for the market service - 'numpy', - 'transitions', - ], - 'mongo': [ # mongo databases - 'pymongo', - ], - 'mysql': [ # mysql databases - 'mysql-connector-python-rf', - ], - 'pandas': [ # numpy and pandas for applications - 'numpy', - 'pandas', - ], - 'postgres': [ # numpy and pandas for applications - 'psycopg2-binary' - ], - 'testing': [ # Testing infrastructure dependencies - 'mock', - 'pytest', - 'pytest-timeout', - 'websocket-client', - # Allows us to compare nested dictionaries easily. - 'deepdiff' - ], - 'web': [ # Web support for launching web based agents including ssl and json web tokens. - 'ws4py', - 'PyJWT', - 'Jinja2', - 'passlib', - 'argon2-cffi', - 'Werkzeug' - ], - 'weather': [ - 'Pint' - ], -} diff --git a/scripts/bacnet/bacnet_scan.py b/scripts/bacnet/bacnet_scan.py index c3d918e597..a19060f234 100644 --- a/scripts/bacnet/bacnet_scan.py +++ b/scripts/bacnet/bacnet_scan.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- {{{ # vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et: # -# Copyright 2019, Battelle Memorial Institute. +# Copyright 2020, Battelle Memorial Institute. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -55,8 +55,8 @@ from bacpypes.basetypes import ServicesSupported from bacpypes.errors import DecodingError -import threading, time, sys - +import threading +import time import csv # some debugging @@ -193,7 +193,8 @@ def do_whois(self, args): f = None if args.csv_out is not None: - f = open(args.csv_out, "wb") + mode = 'wb' if sys.version_info.major == 2 else 'w' + f = open(args.csv_out, mode) field_names = ["address", "device_id", "max_apdu_length", diff --git a/scripts/bacnet/grab_bacnet_config.py b/scripts/bacnet/grab_bacnet_config.py index 3d00ad40dc..4e4f8710a7 100644 --- a/scripts/bacnet/grab_bacnet_config.py +++ b/scripts/bacnet/grab_bacnet_config.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- {{{ # vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et: # -# Copyright 2019, Battelle Memorial Institute. +# Copyright 2020, Battelle Memorial Institute. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/scripts/bacnet/grab_multiple_configs.py b/scripts/bacnet/grab_multiple_configs.py index dd3c5619b3..df47033f5e 100644 --- a/scripts/bacnet/grab_multiple_configs.py +++ b/scripts/bacnet/grab_multiple_configs.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- {{{ # vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et: # -# Copyright 2019, Battelle Memorial Institute. +# Copyright 2020, Battelle Memorial Institute. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -54,12 +54,14 @@ def makedirs(path): raise arg_parser = argparse.ArgumentParser() -arg_parser.add_argument("csv_file", type=argparse.FileType('rb'), +arg_parser.add_argument("csv_file", type=argparse.FileType('r'), help="Input CSV file") arg_parser.add_argument("--use-proxy", action="store_true", help="Use proxy_grab_bacnet_config.py to grab configurations.") +arg_parser.add_argument("--proxy-id", help="Use this VIP identity for the BACnet Proxy instance") arg_parser.add_argument("--out-directory", help="Output directory.", default=".") +arg_parser.add_argument("--ini", help="BACPypes.ini config file to use") args = arg_parser.parse_args() @@ -79,20 +81,21 @@ def makedirs(path): address = device["address"] device_id = device["device_id"] - prog_args = ["python", program_path] + prog_args = ["python3", program_path] prog_args.append(device_id) - if not args.use_proxy: + if not args.use_proxy and address: prog_args.append("--address") prog_args.append(address) + if args.use_proxy and args.proxy_id: + prog_args += ['--proxy-id', args.proxy_id] prog_args.append("--registry-out-file") prog_args.append(join(registers_dir, str(device_id)+".csv")) prog_args.append("--driver-out-file") prog_args.append(join(devices_dir, str(device_id))) + if args.ini is not None: + prog_args.append("--ini") + prog_args.append(args.ini) print("executing command:", " ".join(prog_args)) subprocess.call(prog_args) - - - - diff --git a/scripts/bacnet/proxy_bacnet_scan.py b/scripts/bacnet/proxy_bacnet_scan.py index 082b5158ef..f3d887f16d 100644 --- a/scripts/bacnet/proxy_bacnet_scan.py +++ b/scripts/bacnet/proxy_bacnet_scan.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- {{{ # vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et: # -# Copyright 2019, Battelle Memorial Institute. +# Copyright 2020, Battelle Memorial Institute. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -117,15 +117,16 @@ def main(): _log.debug(" - args: %r", args) csv_writer = None + csv_file = None if args.csv_out is not None: - f = open(args.csv_out, "wb") + csv_file = open(args.csv_out, "w") field_names = ["address", "device_id", "max_apdu_length", "segmentation_supported", "vendor_id"] - csv_writer = csv.DictWriter(f, field_names) + csv_writer = csv.DictWriter(csv_file, field_names) csv_writer.writeheader() keystore = KeyStore() @@ -154,13 +155,12 @@ def main(): args.proxy_id)) else: gevent.sleep(args.timeout) + finally: + if csv_file is not None: + csv_file.close() try: main() except Exception as e: _log.exception("an error has occurred: %s".format(repr(e))) - - - - diff --git a/scripts/bacnet/proxy_grab_bacnet_config.py b/scripts/bacnet/proxy_grab_bacnet_config.py index 1fe36a661a..7f0955f4b9 100644 --- a/scripts/bacnet/proxy_grab_bacnet_config.py +++ b/scripts/bacnet/proxy_grab_bacnet_config.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- {{{ # vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et: # -# Copyright 2019, Battelle Memorial Institute. +# Copyright 2020, Battelle Memorial Institute. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -38,13 +38,13 @@ import sys from csv import DictWriter - +from os.path import basename import logging import argparse import gevent from gevent.event import AsyncResult -from volttron.platform import get_address, get_home +from volttron.platform import get_address, get_home, jsonapi from volttron.platform.agent import utils from volttron.platform.agent.bacnet_proxy_reader import BACnetReader from volttron.platform.keystore import KeyStore @@ -112,6 +112,17 @@ def main(): _log.debug("initialization") _log.debug(" - args: %r", args) + config_file_name = basename(args.registry_out_file.name) + + config = { + "driver_config": {"device_address": str(args.address), + "device_id": args.device_id}, + "driver_type": "bacnet", + "registry_config": "config://registry_configs/{}".format(config_file_name) + } + + jsonapi.dump(config, args.driver_out_file, indent=4) + key_store = KeyStore() config_writer = DictWriter(args.registry_out_file, ('Reference Point Name', diff --git a/scripts/extract_config_store.py b/scripts/extract_config_store.py index 188535a31f..5e56afa817 100644 --- a/scripts/extract_config_store.py +++ b/scripts/extract_config_store.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- {{{ # vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et: # -# Copyright 2019, Battelle Memorial Institute. +# Copyright 2020, Battelle Memorial Institute. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/scripts/get_versions.py b/scripts/get_versions.py index d8350a1167..dc45ec4a49 100644 --- a/scripts/get_versions.py +++ b/scripts/get_versions.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- {{{ # vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et: # -# Copyright 2019, Battelle Memorial Institute. +# Copyright 2020, Battelle Memorial Institute. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/scripts/historian-scripts/update_sqlite_historian_database.py b/scripts/historian-scripts/update_sqlite_historian_database.py index bc8b89a9aa..a682c9dad0 100644 --- a/scripts/historian-scripts/update_sqlite_historian_database.py +++ b/scripts/historian-scripts/update_sqlite_historian_database.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- {{{ # vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et: # -# Copyright 2019, Battelle Memorial Institute. +# Copyright 2020, Battelle Memorial Institute. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -77,4 +77,4 @@ def main(database_name): help='The path to the database file.') args = parser.parse_args() - main(args.database) \ No newline at end of file + main(args.database) diff --git a/scripts/install_master_driver_configs.py b/scripts/install_platform_driver_configs.py similarity index 90% rename from scripts/install_master_driver_configs.py rename to scripts/install_platform_driver_configs.py index 6dd20d981e..bfb1c03574 100644 --- a/scripts/install_master_driver_configs.py +++ b/scripts/install_platform_driver_configs.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- {{{ # vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et: # -# Copyright 2019, Battelle Memorial Institute. +# Copyright 2020, Battelle Memorial Institute. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -46,9 +46,9 @@ import glob description = """ -Updates the contents of the Master Driver configuration store with a set of +Updates the contents of the Platform Driver configuration store with a set of configurations in a directory. This is designed to work with the output of -the update_master_driver_config.py script. +the update_platform_driver_config.py script. The script expects the target directory to have the following files and directories: @@ -63,7 +63,7 @@ For example: A device configuration with the path "devices/campus/building/my_device" will -be named "devices/campus/building/my_device" when added to the Master Driver +be named "devices/campus/building/my_device" when added to the Platform Driver configuration store. All other files and directories are ignored. @@ -72,7 +72,7 @@ Any errors in the configurations will cause the process to stop with an error. -By default this will delete the old master driver configuration store before +By default this will delete the old platform driver configuration store before adding new configurations. """ @@ -80,7 +80,7 @@ def install_configs(input_directory, keep=False): os.chdir(input_directory) keystore = KeyStore() - agent = Agent(address=get_address(), identity="master_driver_update_agent", + agent = Agent(address=get_address(), identity="platform_driver_update_agent", publickey=keystore.public, secretkey=keystore.secret, enable_store=False) @@ -89,7 +89,7 @@ def install_configs(input_directory, keep=False): event.wait() if not keep: - print("Deleting old Master Driver store") + print("Deleting old Platform Driver store") agent.vip.rpc.call(CONFIGURATION_STORE, 'manage_delete_store', PLATFORM_DRIVER).get(timeout=10) @@ -134,7 +134,7 @@ def install_configs(input_directory, keep=False): help='The input directory.') parser.add_argument('--keep-old', action="store_true", - help="Do not remove existing device driver and registry files from the Master Driver configuration store.") + help="Do not remove existing device driver and registry files from the Platform Driver configuration store.") args = parser.parse_args() diff --git a/scripts/launch_actuator.sh b/scripts/launch_actuator.sh index b5134169bc..b7b14a7c5e 100755 --- a/scripts/launch_actuator.sh +++ b/scripts/launch_actuator.sh @@ -4,7 +4,7 @@ pushd ../services/core/ActuatorAgent if [ -z "$VOLTTRON_HOME" ]; then export VOLTTRON_HOME=~/.volttron fi -export AGENT_CONFIG=actuator-deploy.service +export AGENT_CONFIG=config python -m actuator.agent popd diff --git a/scripts/launch_drivers.sh b/scripts/launch_drivers.sh index 28d1385946..35aa1cb3de 100755 --- a/scripts/launch_drivers.sh +++ b/scripts/launch_drivers.sh @@ -1,10 +1,10 @@ #!/usr/bin/env bash -# Manually launch the master driver agent. Useful for debugging as running this way will dump driver logging data directly to the console. -pushd ../services/core/MasterDriverAgent +# Manually launch the platform driver agent. Useful for debugging as running this way will dump driver logging data directly to the console. +pushd ../services/core/PlatformDriverAgent if [ -z "$VOLTTRON_HOME" ]; then export VOLTTRON_HOME=~/.volttron fi -export AGENT_CONFIG=fake-master-driver.agent -python -m master_driver.agent +export AGENT_CONFIG=fake-platform-driver.agent +python -m platform_driver.agent popd diff --git a/scripts/launch_proxy.sh b/scripts/launch_proxy.sh index f0950dfc77..30112f7ca2 100755 --- a/scripts/launch_proxy.sh +++ b/scripts/launch_proxy.sh @@ -4,7 +4,7 @@ pushd ../services/core/BACnetProxy if [ -z "$VOLTTRON_HOME" ]; then export VOLTTRON_HOME=~/.volttron fi -export AGENT_CONFIG=bacnet-proxy.agent +export AGENT_CONFIG=config python -m bacnet_proxy.agent popd diff --git a/scripts/make_release_requirements.py b/scripts/make_release_requirements.py index bcadf21b04..fca7a91868 100644 --- a/scripts/make_release_requirements.py +++ b/scripts/make_release_requirements.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- {{{ # vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et: # -# Copyright 2019, Battelle Memorial Institute. +# Copyright 2020, Battelle Memorial Institute. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/scripts/obix/get_obix_driver_config.py b/scripts/obix/get_obix_driver_config.py index 0824c348e3..ac2afffc91 100644 --- a/scripts/obix/get_obix_driver_config.py +++ b/scripts/obix/get_obix_driver_config.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- {{{ # vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et: # -# Copyright 2019, Battelle Memorial Institute. +# Copyright 2020, Battelle Memorial Institute. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/scripts/obix/get_obix_history_config.py b/scripts/obix/get_obix_history_config.py index 1d048225f2..2a25d13b1e 100644 --- a/scripts/obix/get_obix_history_config.py +++ b/scripts/obix/get_obix_history_config.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- {{{ # vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et: # -# Copyright 2019, Battelle Memorial Institute. +# Copyright 2020, Battelle Memorial Institute. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/scripts/pycharm-launch.py b/scripts/pycharm-launch.py index 8652db9a17..a2a5694d5c 100644 --- a/scripts/pycharm-launch.py +++ b/scripts/pycharm-launch.py @@ -73,11 +73,17 @@ def write_required_statement(out=sys.stderr): sys.path.insert(0, abspath) if not parsed.no_config: if not os.environ.get('AGENT_CONFIG'): - if not os.path.exists(os.path.join(abspath, 'config')): + path_found = None + # Order of search is as follows config, config.yml, config.json + for cfg in ('config', 'config.yml', 'config.json'): + if os.path.exists(os.path.join(abspath, cfg)): + path_found = os.path.join(abspath, cfg) + break + if not path_found: sys.stderr.write('AGENT_CONFIG variable not set. Either set it or ' 'put a config file in the root of the agent dir.') sys.exit() - os.environ['AGENT_CONFIG'] = os.path.join(abspath, 'config') + os.environ['AGENT_CONFIG'] = path_found volttron_home = os.environ.get('VOLTTRON_HOME') diff --git a/scripts/rabbit_dependencies.sh b/scripts/rabbit_dependencies.sh index 0c08b9389b..f337dceb79 100755 --- a/scripts/rabbit_dependencies.sh +++ b/scripts/rabbit_dependencies.sh @@ -1,5 +1,7 @@ #!/usr/bin/env bash -list=( bionic artful stretch buster trusty xenial ) +set -e + +list=( bionic buster ) function exit_on_error { rc=$? @@ -14,8 +16,8 @@ function exit_on_error { function print_usage { echo " Command Usage: -/rabbit_dependencies.sh -Valid Debian distributions: ${list[@]} +/rabbit_dependencies.sh +Valid Raspbian/Debian distributions: ${list[@]} Valid centos versions: 6, 7, 8 " exit 0 @@ -70,8 +72,11 @@ function install_on_debian { fi echo "installing ERLANG" - ${prefix} apt-get install apt-transport-https libwxbase3.0-0v5 libwxgtk3.0-0v5 libsctp1 build-essential python-dev openssl libssl-dev libevent-dev git + ${prefix} apt-get update + ${prefix} apt-get install -y apt-transport-https libwxbase3.0-0v5 libwxgtk3.0-0v5 libsctp1 build-essential python-dev openssl libssl-dev libevent-dev git + set +e ${prefix} apt-get purge -yf erlang* + set -e # Add the signing key wget -O- https://packages.erlang-solutions.com/ubuntu/erlang_solutions.asc | ${prefix} apt-key add - @@ -80,33 +85,44 @@ function install_on_debian { fi version=${erlang_package_version} + common_deb_pkgs="\ + erlang-asn1=$version \ + erlang-base=$version \ + erlang-crypto=$version \ + erlang-diameter=$version \ + erlang-edoc=$version \ + erlang-eldap=$version \ + erlang-erl-docgen=$version \ + erlang-eunit=$version \ + erlang-inets=$version \ + erlang-mnesia=$version \ + erlang-odbc=$version \ + erlang-os-mon=$version \ + erlang-parsetools=$version \ + erlang-public-key=$version \ + erlang-runtime-tools=$version \ + erlang-snmp=$version \ + erlang-ssh=$version \ + erlang-ssl=$version \ + erlang-syntax-tools=$version \ + erlang-tools=$version \ + erlang-xmerl=$version \ + " + x86_pkgs="\ + erlang-ic=$version \ + erlang-inviso=$version \ + erlang-percept=$version \ + " + to_install="" + if [[ $is_arm == "FALSE" ]]; then + to_install="${common_deb_pkgs} ${x86_pkgs}" + else + to_install="${common_deb_pkgs}" + fi + ${prefix} apt-get update ${prefix} apt-get install -yf - ${prefix} apt-get install -y "erlang-asn1=$version" \ - "erlang-base=$version" \ - "erlang-crypto=$version" \ - "erlang-diameter=$version" \ - "erlang-edoc=$version" \ - "erlang-eldap=$version" \ - "erlang-erl-docgen=$version" \ - "erlang-eunit=$version" \ - "erlang-ic=$version" \ - "erlang-inets=$version" \ - "erlang-inviso=$version" \ - "erlang-mnesia=$version" \ - "erlang-odbc=$version" \ - "erlang-os-mon=$version" \ - "erlang-parsetools=$version" \ - "erlang-percept=$version" \ - "erlang-public-key=$version" \ - "erlang-runtime-tools=$version" \ - "erlang-snmp=$version" \ - "erlang-ssh=$version" \ - "erlang-ssl=$version" \ - "erlang-syntax-tools=$version" \ - "erlang-tools=$version" \ - "erlang-xmerl=$version" - + ${prefix} apt-get install -y ${to_install} ${prefix} apt-get install -y "erlang-nox=$version" } @@ -118,14 +134,17 @@ if [[ ${user} == 'root' ]]; then else prefix="sudo" fi +is_arm="FALSE" ${prefix} pwd > /dev/null if [[ "$os_name" == "debian" ]]; then erlang_package_version="1:22.1.8.1-1" + is_arm="FALSE" install_on_debian elif [[ "$os_name" == "raspbian" ]]; then erlang_package_version="1:21.2.6+dfsg-1" + is_arm="TRUE" install_on_debian elif [[ "$os_name" == "centos" ]]; then install_on_centos diff --git a/scripts/scalability-testing/README.md b/scripts/scalability-testing/README.md index 5c5ea0b308..e6641740d3 100644 --- a/scripts/scalability-testing/README.md +++ b/scripts/scalability-testing/README.md @@ -9,17 +9,17 @@ This will create configuration files in configs/ directory to produce fake data To use put the configurations into the configuration store run the following command. **NOTE: the volttron platform must be running at this point** ```` -python ../install_master_driver_configs.py configs +python ../install_platform_driver_configs.py configs ```` -To start the test launch the Master Driver Agent. A shortcut is to launch the Master Driver is found in the scripts directory +To start the test launch the Platform Driver Agent. A shortcut is to launch the Platform Driver is found in the scripts directory ```` cd .. ./launch_drivers.sh ```` -This will launch the master driver using the configurations created earlier. The MasterDriver will publish 5 sets of 1500 device "all" publishes and time the results. After 5 publishes have finished the master driver will print the average time and quit. +This will launch the platform driver using the configurations created earlier. The PlatformDriver will publish 5 sets of 1500 device "all" publishes and time the results. After 5 publishes have finished the platform driver will print the average time and quit. To change the number of points on each device to 6 rerun config_builder.py and change "fake18.csv" to "fake6.csv". To change the number of devices change the value passed to the --count argument. @@ -27,7 +27,7 @@ To test generally how well a Historian will perform on the platform start the fa ./launch_fake_historian.sh -Start the scalability drivers again and note the change in results. It should also be noted that fake historian does not have a good way to measure it's performance yet. By watching the historian log one should note approximately how long it takes to "catch up" after the master driver has finished each publish. +Start the scalability drivers again and note the change in results. It should also be noted that fake historian does not have a good way to measure it's performance yet. By watching the historian log one should note approximately how long it takes to "catch up" after the platform driver has finished each publish. To have the drivers publish all points individually as well the breadth first remove "--publish-only-depth-all" when you run config_builder.py. diff --git a/scripts/scalability-testing/agents/NullHistorian/null_historian/agent.py b/scripts/scalability-testing/agents/NullHistorian/null_historian/agent.py index 7bc1de1024..9ce7908b23 100644 --- a/scripts/scalability-testing/agents/NullHistorian/null_historian/agent.py +++ b/scripts/scalability-testing/agents/NullHistorian/null_historian/agent.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- {{{ # vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et: # -# Copyright 2019, Battelle Memorial Institute. +# Copyright 2020, Battelle Memorial Institute. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -113,4 +113,4 @@ def main(argv=sys.argv): try: sys.exit(main()) except KeyboardInterrupt: - pass \ No newline at end of file + pass diff --git a/scripts/scalability-testing/agents/NullHistorian/setup.py b/scripts/scalability-testing/agents/NullHistorian/setup.py index 3c9972e392..3a654ad0db 100644 --- a/scripts/scalability-testing/agents/NullHistorian/setup.py +++ b/scripts/scalability-testing/agents/NullHistorian/setup.py @@ -4,7 +4,7 @@ # -*- coding: utf-8 -*- {{{ # vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et: # -# Copyright 2019, Battelle Memorial Institute. +# Copyright 2020, Battelle Memorial Institute. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/scripts/scalability-testing/config_builder.py b/scripts/scalability-testing/config_builder.py index 36f1d5117f..4063c5c353 100644 --- a/scripts/scalability-testing/config_builder.py +++ b/scripts/scalability-testing/config_builder.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- {{{ # vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et: # -# Copyright 2019, Battelle Memorial Institute. +# Copyright 2020, Battelle Memorial Institute. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -43,7 +43,7 @@ import argparse from shutil import copy, rmtree from test_settings import (virtual_device_host, device_types, config_dir, - volttron_install, master_driver_file, + volttron_install, platform_driver_file, host_config_location) from volttron.platform import jsonapi @@ -180,12 +180,12 @@ def build_all_configs(device_type, host_address, count, reg_config, config_dir, command_lines = build_device_configs(device_type, host_address, count, reg_config_ref, config_dir, interval, devices_dir) - build_master_config(config_dir, + build_platform_config(config_dir, scalability_test, scalability_test_iterations, driver_scrape_interval, publish_only_depth_all) -def build_master_config(config_dir, +def build_platform_config(config_dir, scalability_test, scalability_test_iterations, driver_scrape_interval, publish_only_depth_all): """Takes the input from multiple called to build_device_configs and create the master config.""" @@ -213,7 +213,7 @@ def build_master_config(config_dir, help='number of devices to configure') parser.add_argument('--scalability-test', action='store_true', - help='Configure master driver for a scalability test') + help='Configure platform driver for a scalability test') parser.add_argument('--publish-only-depth-all', action='store_true', help='Configure drivers to only publish depth first all.') diff --git a/scripts/scalability-testing/fabfile.py b/scripts/scalability-testing/fabfile.py index 196f9d52d8..53604f101e 100644 --- a/scripts/scalability-testing/fabfile.py +++ b/scripts/scalability-testing/fabfile.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- {{{ # vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et: # -# Copyright 2019, Battelle Memorial Institute. +# Copyright 2020, Battelle Memorial Institute. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -74,8 +74,8 @@ def build_configs(): config_paths.extend(configs) command_lines.extend(commands) - #config_builder.build_master_config(test_settings.master_driver_file, config_dir, config_paths) - config_builder.build_master_config(test_settings.master_driver_file, + #config_builder.build_platform_config(test_settings.platform_driver_file, config_dir, config_paths) + config_builder.build_platform_config(test_settings.platform_driver_file, config_full_path, config_paths, True, diff --git a/scripts/scalability-testing/launch_scalability_drivers.sh b/scripts/scalability-testing/launch_scalability_drivers.sh index f28bc078a0..ae48dbfc77 100755 --- a/scripts/scalability-testing/launch_scalability_drivers.sh +++ b/scripts/scalability-testing/launch_scalability_drivers.sh @@ -1,11 +1,11 @@ #!/usr/bin/env bash -# Manually launch the master driver agent using the configuration created by the config builder. +# Manually launch the platform driver agent using the configuration created by the config builder. # Useful for debugging as running this way will dump driver logging data directly to the console. -pushd ../../services/core/MasterDriverAgent +pushd ../../services/core/PlatformDriverAgent if [ -z "$VOLTTRON_HOME" ]; then export VOLTTRON_HOME=~/.volttron fi -export AGENT_CONFIG=../../../scripts/scalability-testing/configs/master-driver.agent -python -m master_driver.agent +export AGENT_CONFIG=../../../scripts/scalability-testing/configs/platform-driver.agent +python -m platform_driver.agent popd diff --git a/scripts/scalability-testing/multilistener/README.md b/scripts/scalability-testing/multilistener/README.md index 149cb566f1..773cd0a5f9 100644 --- a/scripts/scalability-testing/multilistener/README.md +++ b/scripts/scalability-testing/multilistener/README.md @@ -1,34 +1,36 @@ #Message Bus Benchmarking -This python script can be used to check the performance of PubSub communication for either ZMQ/RMQ message bus. It spins up multiple agents with each agent running as separate process and each listening on 'devices' topic. So -it needs to run along with master driver with different scalability test confguration. - +This python script can be used to check the performance of PubSub communication for either ZMQ/RMQ message bus. +It spins up multiple agents with each agent running as separate process and each listening on 'devices' topic. +So it needs to run along with platform driver with different scalability test confguration. + Steps: -1. Activate VOLTTRON environment and start VOLTTRON platform. Allow authentication to all incoming connections +1. Activate VOLTTRON environment and start VOLTTRON platform. Allow authentication to all incoming connections ```sh source env/bin/activate ./start-volttron vctl auth add - domain []: - address []: - user_id []: - capabilities (delimit multiple entries with comma) []: - roles (delimit multiple entries with comma) []: - groups (delimit multiple entries with comma) []: - mechanism [CURVE]: + domain []: + address []: + user_id []: + capabilities (delimit multiple entries with comma) []: + roles (delimit multiple entries with comma) []: + groups (delimit multiple entries with comma) []: + mechanism [CURVE]: credentials []: /.*/ - comments []: - enabled [True]: + comments []: + enabled [True]: ``` -2. Build fake device configuration for the master driver. + +2. Build fake device configuration for the platform driver. ```sh cd scripts/scalability-testing/ python config_builder.py --count=1500 --publish-only-depth-all --scalability-test fake fake18.csv null ``` This will create configuration files in configs/ directory to produce fake data from 1500 devices with 18 points each. -3. Set "driver_scrape_interval" parameter in configs/config to '0.0' so that master driver scrapes all the devices together with zero staggered scrape interval. +3. Set "driver_scrape_interval" parameter in configs/config to '0.0' so that platform driver scrapes all the devices together with zero staggered scrape interval. { "scalability_test_iterations": 5, @@ -42,11 +44,11 @@ This will create configuration files in configs/ directory to produce fake data 4. Put the configurations into the configuration store with the following command. ```sh - python ../install_master_driver_configs.py configs + python ../install_platform_driver_configs.py configs ``` - + 5. In a new terminal, activate the VOLTTRON environment and run the multi-listener script. - + ```sh source env/bin/activate cd scripts/scalability-testing/multilistener @@ -54,18 +56,72 @@ This will create configuration files in configs/ directory to produce fake data ``` This starts 10 listeners, each listening for 1500 device topics. By default, agents use 'zmq' message bus. But you can the message bus by changing message bus option to '-m rmq'. - -5. To start the test launch the Master Driver Agent. A shortcut is to launch the Master Driver is found in the scripts directory + +5. To start the test launch the Platform Driver Agent. A shortcut is to launch the Platform Driver is found in the scripts directory ```sh cd .. ./launch_drivers.sh ``` -This will launch the master driver using the configurations created earlier. The MasterDriver will publish 5 sets of 1500 device "all" publishes and time the results. After 5 publishes have finished the master driver will print the average time and quit. After 5 set of publishes, 'multi_listener_agent.py' script will also finish execution. It finally prints the mean time taken to receive each set of publishes. -By default, the master driver runs on 'zmq' message bus. You can change the default setting, adding below environment +This will launch the platform driver using the configurations created earlier. The PlatformDriver will publish 5 sets of 1500 device "all" publishes and time the results. After 5 publishes have finished the platform driver will print the average time and quit. After 5 set of publishes, 'multi_listener_agent.py' script will also finish execution. It finally prints the mean time taken to receive each set of publishes. +By default, the platform driver runs on 'zmq' message bus. You can change the default setting, adding below environment flag inside 'launch_drivers.sh' script. ```sh export MESSAGEBUS=rmq - ``` \ No newline at end of file + ``` + +## Looking at raw timing output + +### The multi-listener script with raw data output + +The `raw_output_multi_listener_agent.py` script is a modified version of the `multi_listener_agent.py` script, which will record the header and client times for each message received and save them in json format for more detailed processing. +This is particularly useful if you may be interested in decoupling any statistical analyses of the timing results from the process of configuring and running the agents to collect the data (for example, if you're interested in exploring multiple or less defined analyses, or if collecting data in many configurations where the time cost of re-running the collection is significant). +Some important notes about this script: +- The raw output file will either end with `.raw` (if the provided output file name has no extension), or will insert `.raw` prior to the extension (if one is present); the output file from the normal output script is not impacted. +- The raw output file must not already exist (this is checked before starting the agents). + This is different from the overwrite behavior of the base script (and still present for the non-raw output file). + +### Processing the data + +An additional script (`process_file.py`) is included for parsing the raw output files and computing the average total time per scrape (that is, the difference between the source time of the first message in a set of scrape messages and the client time of the last message, averaged over all listeners and over the 5 sequential scrapes used in the test). +This script could be modified or expanded to look at other statistical metrics (possibly fitting to look for other trends, or computing other statistical moments for example). + +## Other notes and tips + +If running a large number of different configurations, working with the configuration store in the normal/supported way is quite slow (each interaction requires client/server interactions and were casually observed to take about a second). +An alternate way is to directly modify the configuration store's json file directly by: +1. stop the platform +2. modify the `VOLTTRON_HOME/configuration_store/.store` file +3. restart the platform + +For step 2, an extended bash command can be used unless the number of interactions was quite large. +This isn't the most efficient, but was very quick to write/modify as needed; a par of examples follow. +*Note:* to run the following commands you need to install two CLI tools: `jq` and `sponge`. +On debian, you can do that with `sudo apt-get install jq moreutils`. +For adding new devices: +```sh +for i in {1..99}; do + echo "adding device [$i] at $(date)"; + jq --arg dname "devices/fake$i" '. + {($dname): .["devices/fake0"]}' $VOLTTRON_HOME/configuration_store/.store | sponge $VOLTTRON_HOME/configuration_store/.store; +done +``` +(where the line breaks are optional, the update is done directly to the store file, and the `jq` and `sponge` tools were previously installed). +You can replace the json wrangling part of the jq command with the following to instead delete entries +```sh +jq --arg dname "devices/fake$i" 'del(.[$dname])' +``` + +Once the configuration store is configured to your needs, you run `raw_output_multi_listener_agent.py` in the same way as `multi_listener_agent.py` above. +You can run it with `--help` to see a description of the arguments, or as an example, for the case where you're interested in a single listener and have 10 devices installed in the platform driver, you could run the following (with your volttron virtual environment for a ZMQ platform activated): +```sh +python raw_output_multi_listener_agent.py -l 1 -d 10 -f `pwd`/1listener_10device_zmq.out -m zmq +``` +which would produce two files in the current directory, `1listener_10device_zmq.out` and `1listener_10device_zmq.raw.out`. + +You could then process the raw output file to have it compute the mean of the total time of the device scrapes by running: +```sh +./process_file.py -i 1listener_10device_zmq.raw.out +``` +The results are printed to STDOUT. diff --git a/scripts/scalability-testing/multilistener/process_file.py b/scripts/scalability-testing/multilistener/process_file.py new file mode 100755 index 0000000000..bbe1b1951a --- /dev/null +++ b/scripts/scalability-testing/multilistener/process_file.py @@ -0,0 +1,50 @@ +#!/usr/local/bin/python3 + +import argparse +import json + +import numpy + +def load_data(file_path): + data = None + with open(file_path, 'r') as input_file: + data = json.load(input_file) + return data + +def pivot_data(data): + result = {} + for agent,agent_results in data.items(): + result[agent] = [] + for an_index,full_scrape in enumerate(agent_results): + result[agent].append({}) + result[agent][an_index] = { + 'header_times': numpy.array([device['header_t'] for device in full_scrape]), + 'client_times': numpy.array([device['client_t'] for device in full_scrape]), + } + return result + +def main(input): + raw_data = load_data(input) + pivoted_data = pivot_data(raw_data) + + # average over agents and publishes of the full time of each publish + overall_result = {} + for agent,agent_results in raw_data.items(): + for devices in agent_results: + #scrape_times = [i_device['client_t'][-1] - i_device['header_t'][0] for i_device in device] + scrape_time = devices[-1]['client_t'] - devices[0]['header_t'] + overall_result[agent] = { + 'total_time': scrape_time, + 'time_per_device': scrape_time/len(devices) + } + mean_over_agents = numpy.mean([data['total_time'] for agent,data in overall_result.items()]) + norm_mean_over_agents = numpy.mean([data['time_per_device'] for agent,data in overall_result.items()]) + print(f'full mean time: [{mean_over_agents:.4f}] sec. ({norm_mean_over_agents:.4f} sec./device)') + + +if __name__ == '__main__': + parser = argparse.ArgumentParser() + parser.add_argument('-i', '--input', action='store', type=str, required=True) + options = parser.parse_args() + + main(options.input) diff --git a/scripts/scalability-testing/multilistener/raw_output_multi_listener_agent.py b/scripts/scalability-testing/multilistener/raw_output_multi_listener_agent.py new file mode 100644 index 0000000000..efb0c62407 --- /dev/null +++ b/scripts/scalability-testing/multilistener/raw_output_multi_listener_agent.py @@ -0,0 +1,217 @@ +from __future__ import print_function + +import argparse +import gevent +import json +import logging +import os +import sys + +from multiprocessing import Process, Lock, JoinableQueue + +from volttron.platform.agent import utils +from volttron.platform.vip.agent import Agent +from volttron.platform import get_address +from volttron.platform.agent.utils import setup_logging + + +def eprint(*args, **kwargs): + print("###>", *args, file=sys.stderr, **kwargs) + + +setup_logging() +_log = logging.getLogger('multiagent') +_log.setLevel(logging.INFO) + + +class MultiAgent(Process): + def __init__(self, agentid, lock, dev, msgque, raw_queue, message_bus): + Process.__init__(self) + self.lock = lock + self.msgque = msgque + self.raw_queue = raw_queue + ident = 'Agent' + str(agentid) + self.agent = Agent(address=get_address(), identity=ident, message_bus=message_bus) + event = gevent.event.Event() + self.agent._agentid = 'Agent' + str(agentid) + + self.task = gevent.spawn(self.agent.core.run, event) + + event.wait(timeout=2) + + self.agent.vip.pubsub.subscribe('pubsub', 'devices', self.on_message) + + _log.debug("Process id: {}".format(os.getpid())) + eprint("eprint - Process id: {}".format(os.getpid())) + self.count = 0 + self.publishes = 0 + self.data_list = [[], [], [], [], []] + self.delta_list = [] + self.msg = [] + self.devices = dev + self.max_publishes = 5*dev + self.utcnow_string = '' + + def on_message(self, peer, sender, bus, topic, headers, message): + '''Use match_all to receive all messages and print them out.''' + #if self.count == 0: + # eprint(f"the max publishes is {self.max_publishes}") + client_time = utils.get_aware_utc_now() + utcnow_string = utils.format_timestamp(client_time) + self.count += 1 + #eprint( + # "Process name: [%r], Count: [%r], Time: [%r], Peer: [%r], Sender: [%r]:, Bus: [%r], Topic: [%r], Headers: [%r], " + # "Message: [%r]" % (self.name, self.count, utcnow_string, peer, sender, bus, topic, headers, message)) + header_time = utils.parse_timestamp_string(headers['TimeStamp']) + # eprint("Agent: {0}, current timestamp {1}, header timestamp {2}!".format(self.agent._agentid, + # utcnow_string, + # headers['TimeStamp'])) + self.data_list[self.publishes].append({ + 'header_t': header_time.timestamp(), + 'client_t': client_time.timestamp(), + }) + #if self.count%21 == 0 or self.count%42 == 1: + diff = client_time - header_time + d_float = diff.seconds + (diff.microseconds* 0.000001) + #eprint(f"--- count [{self.count}] | Agent {self.agent._agentid} | pub time is {d_float} seconds") + ##TODO: why do we take the last device? Should it be a mean? + if self.count % self.devices == 0: + #eprint("Agent: {0}, current timestamp {1}, header timestamp {2}!".format(self.agent._agentid, + # utcnow_string, + # headers['TimeStamp'])) + #eprint("I'M HERE!") + diff = client_time - header_time + d_float = diff.seconds + (diff.microseconds* 0.000001) + self.msg.append(d_float) + # increment publish count + eprint(f'[{self.agent._agentid}] done with publish [{self.publishes}]') + self.publishes += 1 + + #self.delta_list.append(diff) + #avg = sum(self.delta_list, timedelta(0))/len(self.delta_list) + + if (self.count == self.max_publishes): + eprint(f"finishing because count [{self.count}] == max_publishes [{self.max_publishes}] (publish counter is [{self.publishes}])") + self.queue_put(self.msg) + self.task.kill() + + def queue_put(self, msg): + self.lock.acquire() + self.msgque.put(msg) + self.raw_queue.put({self.agent._agentid: self.data_list}) + #self.agent._agentid = 'Agent' + str(agentid) + + self.lock.release() + + +def main(argv=sys.argv): + args = argv[1:] + + parser = argparse.ArgumentParser() + + # Add options for number of listeners and test results file + parser.add_argument("-l", "--listener", action='store', type=int, dest="ps", + help="Number of listener agents") + parser.add_argument( + '-d', '--devices', action='store', type=int, dest="dev", + help='Number of devices') + parser.add_argument( + '-m', '--messagebus', action='store', type=str, dest="mb", + help='message bus') + parser.add_argument( + '-f', '--file', metavar='FILE', dest="test_opt", + help='send test result to FILE') + + parser.set_defaults( + ps=2, + dev=50, + mb='zmq', + test_opt='test.log' + ) + opts = parser.parse_args(args) + ps = opts.ps + test_opt = opts.test_opt + dev = opts.dev + message_bus = opts.mb + #_log.debug("Num of listener agents {0}, Devices {1}, test output file: {2}".format(ps, dev, test_opt)) + eprint("Num of listener agents {0}, Devices {1}, test output file: {2}".format(ps, dev, test_opt)) + + try: + proc = [] + time_delta_msg = [] + raw_time_data = {} + l = Lock() + msgQ = JoinableQueue() + raw_queue = JoinableQueue() + proc = [MultiAgent(i, l, dev, msgQ, raw_queue, message_bus=message_bus) for i in range(ps)] + + raw_output_file = f'{os.path.splitext(test_opt)[0]}.raw{os.path.splitext(test_opt)[-1]}' + if os.path.exists(raw_output_file): + raise ValueError(f'file [{raw_output_file}] already exists') + + for p in proc: + #_log.debug("Process name {0}, Process Id: {1}".format(p.name, p.pid)) + p.start() + eprint("Process name [{}], Process Id: [{}]".format(p.name, p.pid)) + eprint('listeners processes started') + + #Wait for queue to be done + while True: + if not msgQ.empty(): + time_delta_msg.append(msgQ.get(True)) + msgQ.task_done() + #_log.debug("msg len {0}, proc len {1}".format(len(time_delta_msg), len(proc))) + #eprint("msg len {0}, proc len {1}".format(len(time_delta_msg), len(proc))) + if len(time_delta_msg) == len(proc): + break + gevent.sleep(0.5) + eprint('data collected, processing') + ## and the raw queue + while True: + if not raw_queue.empty(): + raw_time_data.update(raw_queue.get(True)) + raw_queue.task_done() + if len(raw_time_data) == len(proc): + break + gevent.sleep(0.5) + eprint('raw data collected') + eprint(f'size is {len(raw_time_data)}') + + # Calculate the mean for each time samples/deltas + td = [] + mean = [] + for i in range(5): + td = [] + for j in range(len(time_delta_msg)): + td.append(time_delta_msg[j][i]) + mn = sum(td) / len(td) + mean.append(mn) + + # Write the collected time samples into a file + fd = 0 + try: + fd = open(test_opt, 'w') + fd.write('Mean=' + str(mean)) + total_mean = sum(mean) / len(mean) + fd.write('Total Mean= ' + str(total_mean)) + eprint("TOTAL MEAN = {}".format(total_mean)) + except IOError: + eprint("Error writing into file") + finally: + if fd: + fd.close() + with open(raw_output_file, 'x') as a_file: + a_file.write(json.dumps(raw_time_data)) + #_log.debug("I'M DONE WITH THE TEST.") + eprint("I'M DONE WITH THE TEST") + for p in proc: + p.task.join() + except KeyboardInterrupt: + for p in proc: + p.task.kill() + eprint("KEYBOARD INTERRUPT") + + +if __name__ == '__main__': + # Entry point for script + sys.exit(main()) diff --git a/scripts/scalability-testing/test_settings.py b/scripts/scalability-testing/test_settings.py index 15dcf0bf62..304914b421 100644 --- a/scripts/scalability-testing/test_settings.py +++ b/scripts/scalability-testing/test_settings.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- {{{ # vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et: # -# Copyright 2019, Battelle Memorial Institute. +# Copyright 2020, Battelle Memorial Institute. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -44,7 +44,7 @@ device_types = {'bacnet': (1, 'device-configs/bacnet_lab.csv'), 'modbus': (1, 'device-configs/catalyst371.csv')} -#Output directory for configurations for the master driver agent +#Output directory for configurations for the platform driver agent # and individual drivers on the local host. #Directory will be created if it does not exist. config_dir = "configs" @@ -52,8 +52,8 @@ #Volttron installation directory on virtua_device_host. volttron_install = "~/volttron" -#Master driver config file name -master_driver_file = "master-driver.agent" +#platform driver config file name +platform_driver_file = "platform-driver.agent" #Location of virtual device config files on virtual device host. #Directory will be created if it does not exist and will diff --git a/scripts/scalability-testing/virtual-drivers/bacnet.py b/scripts/scalability-testing/virtual-drivers/bacnet.py index 8439438d75..9646fdf37b 100644 --- a/scripts/scalability-testing/virtual-drivers/bacnet.py +++ b/scripts/scalability-testing/virtual-drivers/bacnet.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- {{{ # vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et: # -# Copyright 2019, Battelle Memorial Institute. +# Copyright 2020, Battelle Memorial Institute. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/scripts/scalability-testing/virtual-drivers/modbus.py b/scripts/scalability-testing/virtual-drivers/modbus.py index 9d3ec06275..a074d4a888 100644 --- a/scripts/scalability-testing/virtual-drivers/modbus.py +++ b/scripts/scalability-testing/virtual-drivers/modbus.py @@ -3,7 +3,7 @@ # -*- coding: utf-8 -*- {{{ # vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et: # -# Copyright 2019, Battelle Memorial Institute. +# Copyright 2020, Battelle Memorial Institute. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/scripts/scalability-testing/virtual-drivers/shutdown.py b/scripts/scalability-testing/virtual-drivers/shutdown.py index 5fb00d9933..3899818556 100644 --- a/scripts/scalability-testing/virtual-drivers/shutdown.py +++ b/scripts/scalability-testing/virtual-drivers/shutdown.py @@ -3,7 +3,7 @@ # -*- coding: utf-8 -*- {{{ # vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et: # -# Copyright 2019, Battelle Memorial Institute. +# Copyright 2020, Battelle Memorial Institute. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -76,4 +76,4 @@ def stop_bacnet(): break if __name__ == '__main__': - stop_all() \ No newline at end of file + stop_all() diff --git a/scripts/scalability-testing/virtual-drivers/utils.py b/scripts/scalability-testing/virtual-drivers/utils.py index 34fd62fbf7..a631c8f33e 100644 --- a/scripts/scalability-testing/virtual-drivers/utils.py +++ b/scripts/scalability-testing/virtual-drivers/utils.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- {{{ # vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et: # -# Copyright 2019, Battelle Memorial Institute. +# Copyright 2020, Battelle Memorial Institute. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/scripts/update_curve_key.py b/scripts/update_curve_key.py index 31517d72ac..8f3804762b 100644 --- a/scripts/update_curve_key.py +++ b/scripts/update_curve_key.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- {{{ # vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et: # -# Copyright 2019, Battelle Memorial Institute. +# Copyright 2020, Battelle Memorial Institute. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/scripts/update_master_driver_config.py b/scripts/update_master_driver_config.py index 0b432b9e22..82c9ad5a2f 100644 --- a/scripts/update_master_driver_config.py +++ b/scripts/update_master_driver_config.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- {{{ # vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et: # -# Copyright 2019, Battelle Memorial Institute. +# Copyright 2020, Battelle Memorial Institute. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -162,11 +162,11 @@ def process_main_config(main_file, output_directory, keep=False): if __name__ == "__main__": parser = ArgumentParser(description="Update a master configuration to use the configuration store and" " writes the new configurations to disk. To automatically update the" - " configurations for the Master Driver in the store use the script" - " install_master_driver_configs.py on the output from this script.") + " configurations for the Platform Driver in the store use the script" + " install_platform_driver_configs.py on the output from this script.") parser.add_argument('main_configuration', type=file, - help='The pre-configuration store master driver configuration file') + help='The pre-configuration store platform driver configuration file') parser.add_argument('output_directory', help='The output directory.') diff --git a/services/contrib/FactsServiceAgent/README.rst b/services/contrib/FactsServiceAgent/README.rst index 8110ba3176..b8cf0da21c 100644 --- a/services/contrib/FactsServiceAgent/README.rst +++ b/services/contrib/FactsServiceAgent/README.rst @@ -25,7 +25,7 @@ Recommended Setup ----------------- - ``BACnet Proxy`` Agent - ``Facts Service`` Agent -- ``Master Driver`` Agent +- ``Platform Driver`` Agent **Note**: If you're planning on using only the Facts Service agent, it is recommended to disable the ``Platform`` agent since communication with a Volttron Central instance isn't required, hence saving resources. diff --git a/services/contrib/KafkaAgent/setup.py b/services/contrib/KafkaAgent/setup.py index 25ab24da59..cb5acdb326 100644 --- a/services/contrib/KafkaAgent/setup.py +++ b/services/contrib/KafkaAgent/setup.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- {{{ # vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et: # -# Copyright 2019, Battelle Memorial Institute. +# Copyright 2020, Battelle Memorial Institute. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/services/contrib/README.rst b/services/contrib/README.rst deleted file mode 100644 index feae85c998..0000000000 --- a/services/contrib/README.rst +++ /dev/null @@ -1 +0,0 @@ -This folder is meant for user contributed code. \ No newline at end of file diff --git a/services/core/ActuatorAgent/README.md b/services/core/ActuatorAgent/README.md new file mode 100644 index 0000000000..1b63be628b --- /dev/null +++ b/services/core/ActuatorAgent/README.md @@ -0,0 +1,39 @@ +# Actuator Agent +The Actuator Agent is used to manage write access to devices. Other agents +may request scheduled times, called Tasks, to interact with one or more +devices. + +Agents may interact with the ActuatorAgent via either PUB/SUB or RPC, +but it is recommended agents use RPC to interact with the ActuatorAgent. + +The PUB/SUB interface remains primarily for VOLTTRON 2.0 agents. + +The Actuator Agent also triggers the heart beat on devices whose +drivers are configured to do so. + +## ActuatorAgent Configuration + + +1. "schedule_publish_interval" + + Interval between published schedule announcements in seconds. Defaults to 30. +2. "preempt_grace_time" + + Minimum time given to Tasks which have been preempted to clean up in seconds. Defaults to 60. +3. "schedule_state_file" + + File used to save and restore Task states if the ActuatorAgent restarts for any reason. File will be + created if it does not exist when it is needed. +4. "heartbeat_interval" + + How often to send a heartbeat signal to all devices in seconds. Defaults to 60. + + +## Sample configuration file + +``` + { + "schedule_publish_interval": 30, + "schedule_state_file": "actuator_state.pickle" + } +``` diff --git a/services/core/ActuatorAgent/actuator/agent.py b/services/core/ActuatorAgent/actuator/agent.py index f76ce07f7a..8a9547b67b 100644 --- a/services/core/ActuatorAgent/actuator/agent.py +++ b/services/core/ActuatorAgent/actuator/agent.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- {{{ # vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et: # -# Copyright 2019, Battelle Memorial Institute. +# Copyright 2020, Battelle Memorial Institute. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -540,7 +540,7 @@ class ActuatorAgent(Agent): state to. This file is updated every time a schedule changes. :param preempt_grace_time: Time in seconds after a schedule is preemted before it is actually cancelled. - :param driver_vip_identity: VIP identity of the Master Driver Agent. + :param driver_vip_identity: VIP identity of the Platform Driver Agent. :type heartbeat_interval: float :type schedule_publish_interval: float @@ -603,7 +603,7 @@ def configure(self, config_name, action, contents): self.schedule_publish_interval = schedule_publish_interval self.allow_no_lock_write = allow_no_lock_write - _log.debug("MasterDriver VIP IDENTITY: {}".format(self.driver_vip_identity)) + _log.debug("PlatformDriver VIP IDENTITY: {}".format(self.driver_vip_identity)) _log.debug("Schedule publish interval: {}".format(self.schedule_publish_interval)) #Only restart the heartbeat if it changes. @@ -661,7 +661,7 @@ def _heart_beat(self): self.vip.rpc.call(self.driver_vip_identity, 'heart_beat').get( timeout=20.0) except Unreachable: - _log.warning("Master driver is not running") + _log.warning("Platform driver is not running") except (Exception, gevent.Timeout) as e: _log.warning(''.join([e.__class__.__name__, '(', str(e), ')'])) @@ -976,7 +976,7 @@ def get_multiple_points(self, topics, **kwargs): """RPC method Get multiple points on multiple devices. Makes a single - RPC call to the master driver per device. + RPC call to the platform driver per device. :param topics: List of topics or list of [device, point] pairs. :param \*\*kwargs: Any driver specific parameters @@ -1020,7 +1020,7 @@ def set_multiple_points(self, requester_id, topics_values, **kwargs): """RPC method Set multiple points on multiple devices. Makes a single - RPC call to the master driver per device. + RPC call to the platform driver per device. :param requester_id: Ignored, VIP Identity used internally :param topics_values: List of (topic, value) tuples diff --git a/services/core/ActuatorAgent/actuator/scheduler.py b/services/core/ActuatorAgent/actuator/scheduler.py index 92b76641f3..64df1b4419 100644 --- a/services/core/ActuatorAgent/actuator/scheduler.py +++ b/services/core/ActuatorAgent/actuator/scheduler.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- {{{ # vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et: # -# Copyright 2019, Battelle Memorial Institute. +# Copyright 2020, Battelle Memorial Institute. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/services/core/ActuatorAgent/actuator-deploy.service b/services/core/ActuatorAgent/config similarity index 100% rename from services/core/ActuatorAgent/actuator-deploy.service rename to services/core/ActuatorAgent/config diff --git a/services/core/ActuatorAgent/setup.py b/services/core/ActuatorAgent/setup.py index eaf3948ea7..cc64769bff 100644 --- a/services/core/ActuatorAgent/setup.py +++ b/services/core/ActuatorAgent/setup.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- {{{ # vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et: # -# Copyright 2019, Battelle Memorial Institute. +# Copyright 2020, Battelle Memorial Institute. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/services/core/ActuatorAgent/tests/actuator_fixtures.py b/services/core/ActuatorAgent/tests/actuator_fixtures.py index 36f121f8a8..c21834023e 100644 --- a/services/core/ActuatorAgent/tests/actuator_fixtures.py +++ b/services/core/ActuatorAgent/tests/actuator_fixtures.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- {{{ # vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et: # -# Copyright 2019, Battelle Memorial Institute. +# Copyright 2020, Battelle Memorial Institute. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/services/core/ActuatorAgent/tests/test_actuator_pubsub.py b/services/core/ActuatorAgent/tests/test_actuator_pubsub.py index f9c7b6d13e..d95bc4c470 100644 --- a/services/core/ActuatorAgent/tests/test_actuator_pubsub.py +++ b/services/core/ActuatorAgent/tests/test_actuator_pubsub.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- {{{ # vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et: # -# Copyright 2019, Battelle Memorial Institute. +# Copyright 2020, Battelle Memorial Institute. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -37,7 +37,6 @@ # }}} - from volttron.platform import get_services_core, get_examples """ @@ -45,14 +44,14 @@ actuator agent with both 2.0 and 3.0 publish agents """ -from datetime import datetime, timedelta - import gevent import gevent.subprocess as subprocess import pytest +from datetime import datetime, timedelta from dateutil.tz import tzutc from gevent.subprocess import Popen from mock import MagicMock + from volttron.platform.agent import utils from volttron.platform.messaging import topics from volttron.platform.agent.known_identities import PLATFORM_DRIVER @@ -65,6 +64,7 @@ REQUEST_CANCEL_SCHEDULE = 'request_cancel_schedule' REQUEST_NEW_SCHEDULE = 'request_new_schedule' + @pytest.fixture(scope="function") def cancel_schedules(request, publish_agent): """ @@ -84,10 +84,7 @@ def cancel_schedules(request, publish_agent): def cleanup(): for schedule in cleanup_parameters: - print( - 'Requesting cancel for task:', schedule['taskid'], - 'from agent:', - schedule['agentid']) + print('Requesting cancel for task:', schedule['taskid'], 'from agent:', schedule['agentid']) header = { 'type': 'CANCEL_SCHEDULE', @@ -116,14 +113,9 @@ def revert_devices(request, publish_agent): def cleanup(): for device in cleanup_parameters: - print( - 'Requesting revert on device:', device['device'], - 'from agent:', - device['agentid']) - - topic = topics.ACTUATOR_REVERT_DEVICE(campus='', - building='', - unit=device['device']) + print('Requesting revert on device:', device['device'], 'from agent:', device['agentid']) + + topic = topics.ACTUATOR_REVERT_DEVICE(campus='', building='', unit=device['device']) publish(publish_agent, topic, {}, None) # sleep so that the message is sent to pubsub before @@ -135,13 +127,12 @@ def cleanup(): # VOLTTRON 2.0 agents will deprecated from VOLTTRON 6.0 release. So running it for only volttron 3.0 agents -@pytest.fixture(scope="module", - params=['volttron_3']) +@pytest.fixture(scope="module", params=['volttron_3']) def publish_agent(request, volttron_instance): """ Fixture used for setting up the environment. 1. Creates fake driver configs - 2. Starts the master driver agent with the created fake driver agents + 2. Starts the platform driver agent with the created fake driver agents 3. Starts the actuator agent 4. Creates an instance Agent class for publishing and returns it @@ -152,21 +143,18 @@ def publish_agent(request, volttron_instance): """ global actuator_uuid - - # Reset master driver config store + # Reset platform driver config store cmd = ['volttron-ctl', 'config', 'delete', PLATFORM_DRIVER, '--all'] process = Popen(cmd, env=volttron_instance.env, - cwd='scripts/scalability-testing', stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True) result = process.wait() print(result) assert result == 0 - # Add master driver configuration files to config store. - cmd = ['volttron-ctl', 'config', 'store', PLATFORM_DRIVER, - 'fake.csv', 'fake_unit_testing.csv', '--csv'] + # Add platform driver configuration files to config store. + cmd = ['volttron-ctl', 'config', 'store', PLATFORM_DRIVER, 'fake.csv', 'fake_unit_testing.csv', '--csv'] process = Popen(cmd, env=volttron_instance.env, - cwd='scripts/scalability-testing', + cwd=f"{volttron_instance.volttron_root}/scripts/scalability-testing", stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True) result = process.wait() print(result) @@ -177,23 +165,23 @@ def publish_agent(request, volttron_instance): cmd = ['volttron-ctl', 'config', 'store', PLATFORM_DRIVER, config_name, 'fake_unit_testing.config', '--json'] process = Popen(cmd, env=volttron_instance.env, - cwd='scripts/scalability-testing', + cwd=f"{volttron_instance.volttron_root}/scripts/scalability-testing", stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True) result = process.wait() print(result) assert result == 0 - # Start the master driver agent which would intern start the fake driver + # Start the platform driver agent which would intern start the fake driver # using the configs created above - master_uuid = volttron_instance.install_agent( - agent_dir=get_services_core("MasterDriverAgent"), + platform_uuid = volttron_instance.install_agent( + agent_dir=get_services_core("PlatformDriverAgent"), config_file={}, start=True) - print("agent id: ", master_uuid) + print("agent id: ", platform_uuid) gevent.sleep(2) # wait for the agent to start and start the devices # Start the actuator agent through which publish agent should communicate - # to fake device. Start the master driver agent which would intern start + # to fake device. Start the platform driver agent which would intern start # the fake driver using the configs created above actuator_uuid = volttron_instance.install_agent( agent_dir=get_services_core("ActuatorAgent"), @@ -211,8 +199,6 @@ def publish_agent(request, volttron_instance): fake_publish_agent = volttron_instance.build_agent() # Mock callback methods attach actuate method to fake_publish_agent as # it needs to be a class method for the call back to work - # fake_publish_agent.callback = - # types.MethodType(callback, fake_publish_agent) fake_publish_agent.callback = MagicMock(name="callback") fake_publish_agent.callback.reset_mock() # subscribe to schedule response topic @@ -226,9 +212,9 @@ def publish_agent(request, volttron_instance): def stop_agent(): print("In teardown method of module") volttron_instance.stop_agent(actuator_uuid) - volttron_instance.stop_agent(master_uuid) + volttron_instance.stop_agent(platform_uuid) volttron_instance.remove_agent(actuator_uuid) - volttron_instance.remove_agent(master_uuid) + volttron_instance.remove_agent(platform_uuid) gevent.sleep(2) fake_publish_agent.core.stop() @@ -244,10 +230,7 @@ def publish(publish_agent, topic, header, message): :param header: header to publish :param message: message to publish """ - publish_agent.vip.pubsub.publish('pubsub', - topic, - headers=header, - message=message).get(timeout=10) + publish_agent.vip.pubsub.publish('pubsub', topic, headers=header, message=message).get(timeout=10) @pytest.mark.actuator_pubsub @@ -291,13 +274,11 @@ def test_schedule_response(publish_agent): assert publish_agent.callback.call_count == 1 print('call args ', publish_agent.callback.call_args[0][1]) assert publish_agent.callback.call_args[0][1] == PLATFORM_ACTUATOR - assert publish_agent.callback.call_args[0][3] == \ - topics.ACTUATOR_SCHEDULE_RESULT + assert publish_agent.callback.call_args[0][3] == topics.ACTUATOR_SCHEDULE_RESULT result_header = publish_agent.callback.call_args[0][4] result_message = publish_agent.callback.call_args[0][5] assert result_header['type'] == 'NEW_SCHEDULE' assert result_header['taskID'] == 'task_schedule_response' - # assert result_header['requesterID'] == TEST_AGENT assert result_message['result'] == SUCCESS # Test valid cancellation @@ -316,22 +297,19 @@ def test_schedule_response(publish_agent): assert publish_agent.callback.call_count == 1 print(publish_agent.callback.call_args[0]) assert publish_agent.callback.call_args[0][1] == PLATFORM_ACTUATOR - assert publish_agent.callback.call_args[0][3] == \ - topics.ACTUATOR_SCHEDULE_RESULT + assert publish_agent.callback.call_args[0][3] == topics.ACTUATOR_SCHEDULE_RESULT result_header = publish_agent.callback.call_args[0][4] result_message = publish_agent.callback.call_args[0][5] assert result_header['taskID'] == 'task_schedule_response' - # assert result_header['requesterID'] == TEST_AGENT assert result_message['result'] == SUCCESS assert result_header['type'] == 'CANCEL_SCHEDULE' @pytest.mark.actuator_pubsub def test_schedule_announce(publish_agent, volttron_instance): - """ Tests the schedule announcements of actuator. - - Waits for two announcements and checks if the right parameters - are sent to call back method. + """ + Tests the schedule announcements of actuator. + Waits for two announcements and checks if the right parameters are sent to call back method. :param publish_agent: fixture invoked to setup all agents necessary and returns an instance of Agent object used for publishing :param volttron_instance: Volttron instance on which test is run @@ -352,8 +330,7 @@ def test_schedule_announce(publish_agent, volttron_instance): # reset mock to ignore any previous callback publish_agent.callback.reset_mock() publish_agent.actuate0 = MagicMock(name="magic_actuate0") - announce = topics.ACTUATOR_SCHEDULE_ANNOUNCE(campus='', building='', - unit='fakedriver0') + announce = topics.ACTUATOR_SCHEDULE_ANNOUNCE(campus='', building='', unit='fakedriver0') publish_agent.vip.pubsub.subscribe( peer='pubsub', prefix=announce, @@ -382,12 +359,8 @@ def test_schedule_announce(publish_agent, volttron_instance): args_list1 = publish_agent.actuate0.call_args_list[0][0] args_list2 = publish_agent.actuate0.call_args_list[1][0] assert args_list1[1] == args_list2[1] == alternate_actuator_vip_id - assert args_list1[3] == args_list2[ - 3] == 'devices/actuators/schedule/announce/fakedriver0' - assert args_list1[4]['taskID'] == args_list2[4][ - 'taskID'] == 'task_schedule_announce' - # assert args_list1[4]['requesterID'] == args_list2[4][ - # 'requesterID'] == TEST_AGENT + assert args_list1[3] == args_list2[3] == 'devices/actuators/schedule/announce/fakedriver0' + assert args_list1[4]['taskID'] == args_list2[4]['taskID'] == 'task_schedule_announce' datetime1 = utils.parse_timestamp_string(args_list1[4]['time']) datetime2 = utils.parse_timestamp_string(args_list2[4]['time']) delta = datetime2 - datetime1 @@ -434,15 +407,13 @@ def test_schedule_error_int_taskid(publish_agent): print("call args list : ", publish_agent.callback.call_args_list) assert publish_agent.callback.call_count == 1 assert publish_agent.callback.call_args[0][1] == PLATFORM_ACTUATOR - assert publish_agent.callback.call_args[0][3] == \ - topics.ACTUATOR_SCHEDULE_RESULT + assert publish_agent.callback.call_args[0][3] == topics.ACTUATOR_SCHEDULE_RESULT result_header = publish_agent.callback.call_args[0][4] result_message = publish_agent.callback.call_args[0][5] assert result_header['type'] == 'NEW_SCHEDULE' # assert result_header['requesterID'] == TEST_AGENT assert result_message['result'] == FAILURE - assert result_message['info'] == \ - 'MALFORMED_REQUEST: TypeError: taskid must be a nonempty string' + assert result_message['info'] == 'MALFORMED_REQUEST: TypeError: taskid must be a nonempty string' @pytest.mark.actuator_pubsub @@ -480,15 +451,13 @@ def test_schedule_empty_task(publish_agent, cancel_schedules): assert publish_agent.callback.call_count == 1 print(publish_agent.callback.call_args[0]) assert publish_agent.callback.call_args[0][1] == PLATFORM_ACTUATOR - assert publish_agent.callback.call_args[0][3] == \ - topics.ACTUATOR_SCHEDULE_RESULT + assert publish_agent.callback.call_args[0][3] == topics.ACTUATOR_SCHEDULE_RESULT result_header = publish_agent.callback.call_args[0][4] result_message = publish_agent.callback.call_args[0][5] assert result_header['type'] == 'NEW_SCHEDULE' assert result_header['taskID'] == taskid assert result_message['result'] == FAILURE - assert result_message['info'] == \ - 'MALFORMED_REQUEST: TypeError: taskid must be a nonempty string' + assert result_message['info'] == 'MALFORMED_REQUEST: TypeError: taskid must be a nonempty string' @pytest.mark.actuator_pubsub @@ -519,13 +488,11 @@ def test_schedule_error_none_taskid(publish_agent): assert publish_agent.callback.call_count == 1 print(publish_agent.callback.call_args[0]) assert publish_agent.callback.call_args[0][1] == PLATFORM_ACTUATOR - assert publish_agent.callback.call_args[0][3] == \ - topics.ACTUATOR_SCHEDULE_RESULT + assert publish_agent.callback.call_args[0][3] == topics.ACTUATOR_SCHEDULE_RESULT result_header = publish_agent.callback.call_args[0][4] result_message = publish_agent.callback.call_args[0][5] assert result_header['type'] == 'NEW_SCHEDULE' - # assert result_header['requesterID'] == TEST_AGENT assert result_message['result'] == FAILURE assert result_message['info'] == 'MISSING_TASK_ID' @@ -533,9 +500,7 @@ def test_schedule_error_none_taskid(publish_agent): @pytest.mark.actuator_pubsub def test_schedule_error_invalid_type(publish_agent): """ - Test error responses for schedule request through pubsub. - Test invalid type in header - + Test error responses for schedule request through pubsub. Test invalid type in header :param publish_agent: fixture invoked to setup all agents necessary and returns an instance of Agent object used for publishing """ @@ -560,13 +525,11 @@ def test_schedule_error_invalid_type(publish_agent): print('call args list:', publish_agent.callback.call_args_list) assert publish_agent.callback.call_count == 1 assert publish_agent.callback.call_args[0][1] == PLATFORM_ACTUATOR - assert publish_agent.callback.call_args[0][3] == \ - topics.ACTUATOR_SCHEDULE_RESULT + assert publish_agent.callback.call_args[0][3] == topics.ACTUATOR_SCHEDULE_RESULT result_header = publish_agent.callback.call_args[0][4] result_message = publish_agent.callback.call_args[0][5] assert result_header['type'] == 'NEW_SCHEDULE2' assert result_header['taskID'] == 'task1' - # assert result_header['requesterID'] == TEST_AGENT assert result_message['result'] == FAILURE assert result_message['info'] == 'INVALID_REQUEST_TYPE' @@ -574,9 +537,7 @@ def test_schedule_error_invalid_type(publish_agent): @pytest.mark.actuator_pubsub def test_schedule_error_invalid_priority(publish_agent): """ - Test error responses for schedule request through pubsub. - Test invalid type in header - + Test error responses for schedule request through pubsub. Test invalid type in header :param publish_agent: fixture invoked to setup all agents necessary and returns an instance of Agent object used for publishing """ @@ -603,13 +564,11 @@ def test_schedule_error_invalid_priority(publish_agent): assert publish_agent.callback.call_count == 1 print('call args ', publish_agent.callback.call_args[0][1]) assert publish_agent.callback.call_args[0][1] == PLATFORM_ACTUATOR - assert publish_agent.callback.call_args[0][3] == \ - topics.ACTUATOR_SCHEDULE_RESULT + assert publish_agent.callback.call_args[0][3] == topics.ACTUATOR_SCHEDULE_RESULT result_header = publish_agent.callback.call_args[0][4] result_message = publish_agent.callback.call_args[0][5] assert result_header['type'] == 'NEW_SCHEDULE' assert result_header['taskID'] == 'task1' - # assert result_header['requesterID'] == TEST_AGENT assert result_message['result'] == FAILURE assert result_message['info'] == 'INVALID_PRIORITY' @@ -617,9 +576,7 @@ def test_schedule_error_invalid_priority(publish_agent): @pytest.mark.actuator_pubsub def test_schedule_error_empty_message(publish_agent): """ - Test error responses for schedule request through pubsub. - Test empty message - + Test error responses for schedule request through pubsub. Test empty message :param publish_agent: fixture invoked to setup all agents necessary and returns an instance of Agent object used for publishing """ @@ -631,9 +588,7 @@ def test_schedule_error_empty_message(publish_agent): 'taskID': 'task_empty_message', 'priority': 'LOW' } - msg = [ - - ] + msg = [] # reset mock to ignore any previous callback publish_agent.callback.reset_mock() publish(publish_agent, topics.ACTUATOR_SCHEDULE_REQUEST, header, msg) @@ -642,13 +597,11 @@ def test_schedule_error_empty_message(publish_agent): assert publish_agent.callback.call_count == 1 print(publish_agent.callback.call_args[0]) assert publish_agent.callback.call_args[0][1] == PLATFORM_ACTUATOR - assert publish_agent.callback.call_args[0][3] == \ - topics.ACTUATOR_SCHEDULE_RESULT + assert publish_agent.callback.call_args[0][3] == topics.ACTUATOR_SCHEDULE_RESULT result_header = publish_agent.callback.call_args[0][4] result_message = publish_agent.callback.call_args[0][5] assert result_header['type'] == 'NEW_SCHEDULE' assert result_header['taskID'] == 'task_empty_message' - # assert result_header['requesterID'] == TEST_AGENT assert result_message['result'] == FAILURE assert result_message['info'] == 'MALFORMED_REQUEST_EMPTY' @@ -656,9 +609,7 @@ def test_schedule_error_empty_message(publish_agent): @pytest.mark.actuator_pubsub def test_schedule_error_multiple_missing_headers(publish_agent): """ - Test error responses for schedule request through pubsub. - Test multiple mising headers - + Test error responses for schedule request through pubsub. Test multiple missing headers :param publish_agent: fixture invoked to setup all agents necessary and returns an instance of Agent object used for publishing """ @@ -670,9 +621,7 @@ def test_schedule_error_multiple_missing_headers(publish_agent): 'taskID': 'task_schedule_response-2' # 'priority': 'LOW' } - msg = [ - - ] + msg = [] # reset mock to ignore any previous callback publish_agent.callback.reset_mock() publish(publish_agent, topics.ACTUATOR_SCHEDULE_REQUEST, header, msg) @@ -682,84 +631,19 @@ def test_schedule_error_multiple_missing_headers(publish_agent): assert publish_agent.callback.call_count == 1 print(publish_agent.callback.call_args[0]) assert publish_agent.callback.call_args[0][1] == PLATFORM_ACTUATOR - assert publish_agent.callback.call_args[0][3] == \ - topics.ACTUATOR_SCHEDULE_RESULT + assert publish_agent.callback.call_args[0][3] == topics.ACTUATOR_SCHEDULE_RESULT result_header = publish_agent.callback.call_args[0][4] result_message = publish_agent.callback.call_args[0][5] assert result_header['type'] == 'NEW_SCHEDULE' assert result_header['taskID'] == 'task_schedule_response-2' - # assert result_header['requesterID'] == TEST_AGENT - assert result_message['result'] == FAILURE - assert result_message['info'] == 'MALFORMED_REQUEST_EMPTY' or \ - result_message['info'] == 'MISSING_PRIORITY' - - -@pytest.mark.actuator_pubsub -def test_schedule_error_duplicate_task(publish_agent, cancel_schedules): - """ - Test error response for schedule request through pubsub. Test Agent=''. - This test case should be removed - once agent id are generated by the volttron platform - - :param publish_agent: fixture invoked to setup all agents necessary and - returns an instance of Agent object used for publishing - :param cancel_schedules: fixture used to cancel the schedule at the end of - test so that other tests can use the same device and time slot - """ - print("\n**** test_schedule_error_duplicate_task ****") - agentid = TEST_AGENT - taskid = 'task_duplicate_task' - cancel_schedules.append({'agentid': agentid, 'taskid': taskid}) - - start = str(datetime.now()) - end = str(datetime.now() + timedelta(seconds=4)) - msg = [ - ['fakedriver0', start, end] - ] - # reset mock to ignore any previous callback - publish_agent.callback.reset_mock() - result = publish_agent.vip.rpc.call( - 'platform.actuator', - REQUEST_NEW_SCHEDULE, - TEST_AGENT, - 'task_duplicate_task', - 'LOW', - msg).get(timeout=10) - - assert result['result'] == 'SUCCESS' - print("Result of schedule through rpc ", result) - - header = { - 'type': 'NEW_SCHEDULE', - 'requesterID': TEST_AGENT, # The name of the requesting agent. - 'taskID': 'task_duplicate_task', - # unique (to all tasks) ID for scheduled task. - 'priority': 'LOW' - } - publish(publish_agent, topics.ACTUATOR_SCHEDULE_REQUEST, header, msg) - gevent.sleep(1) - - print('call args list:', publish_agent.callback.call_args_list) - # once for rpc call and once for publish - assert publish_agent.callback.call_count == 1 - print(publish_agent.callback.call_args[0]) - assert publish_agent.callback.call_args[0][1] == PLATFORM_ACTUATOR - assert publish_agent.callback.call_args[0][3] == \ - topics.ACTUATOR_SCHEDULE_RESULT - result_header = publish_agent.callback.call_args[0][4] - result_message = publish_agent.callback.call_args[0][5] - assert result_header['type'] == 'NEW_SCHEDULE' - assert result_header['taskID'] == 'task_duplicate_task' assert result_message['result'] == FAILURE - assert result_message['info'] == 'TASK_ID_ALREADY_EXISTS' + assert result_message['info'] == 'MALFORMED_REQUEST_EMPTY' or result_message['info'] == 'MISSING_PRIORITY' @pytest.mark.actuator_pubsub def test_schedule_error_missing_priority(publish_agent): """ - Test error response for schedule request through pubsub. - Test missing priority info - + Test error response for schedule request through pubsub. Test missing priority info :param publish_agent: fixture invoked to setup all agents necessary and returns an instance of Agent object used for publishing """ @@ -785,8 +669,7 @@ def test_schedule_error_missing_priority(publish_agent): assert publish_agent.callback.call_count == 1 print(publish_agent.callback.call_args[0]) assert publish_agent.callback.call_args[0][1] == PLATFORM_ACTUATOR - assert publish_agent.callback.call_args[0][3] == \ - topics.ACTUATOR_SCHEDULE_RESULT + assert publish_agent.callback.call_args[0][3] == topics.ACTUATOR_SCHEDULE_RESULT result_header = publish_agent.callback.call_args[0][4] result_message = publish_agent.callback.call_args[0][5] assert result_header['taskID'] == 'task_missing_priority' @@ -799,7 +682,6 @@ def test_schedule_error_malformed_request(publish_agent): """ Test error response for schedule request through pubsub. Test malformed request by sending a message without end date. - :param publish_agent: fixture invoked to setup all agents necessary and returns an instance of Agent object used for publishing """ @@ -825,8 +707,7 @@ def test_schedule_error_malformed_request(publish_agent): assert publish_agent.callback.call_count == 1 print(publish_agent.callback.call_args[0]) assert publish_agent.callback.call_args[0][1] == PLATFORM_ACTUATOR - assert publish_agent.callback.call_args[0][3] == \ - topics.ACTUATOR_SCHEDULE_RESULT + assert publish_agent.callback.call_args[0][3] == topics.ACTUATOR_SCHEDULE_RESULT result_header = publish_agent.callback.call_args[0][4] result_message = publish_agent.callback.call_args[0][5] assert result_header['type'] == 'NEW_SCHEDULE' @@ -930,11 +811,9 @@ def test_schedule_preempt_self(publish_agent, cancel_schedules): assert schedule_header['type'] == 'NEW_SCHEDULE' assert schedule_header['taskID'] == taskid - # assert schedule_header['requesterID'] == agentid assert schedule_message['result'] == SUCCESS assert cancel_header['taskID'] == 'task_low_priority' - # assert cancel_message['data']['agentID'] == agentid assert cancel_message['data']['taskID'] == taskid assert cancel_message['result'] == 'PREEMPTED' @@ -1034,12 +913,9 @@ def test_schedule_preempt_other(publish_agent, cancel_schedules): assert schedule_header['type'] == 'NEW_SCHEDULE' assert schedule_header['taskID'] == taskid - # assert schedule_header['requesterID'] == agentid assert schedule_message['result'] == SUCCESS assert cancel_header['taskID'] == 'task_low_priority2' - # assert cancel_header['requesterID'] == 'other_agent' - # assert cancel_message['data']['agentID'] == agentid assert cancel_message['data']['taskID'] == taskid assert cancel_message['result'] == 'PREEMPTED' @@ -1090,8 +966,7 @@ def test_schedule_conflict(publish_agent, cancel_schedules): print('call args list:', publish_agent.callback.call_args_list) assert publish_agent.callback.call_count == 1 assert publish_agent.callback.call_args[0][1] == PLATFORM_ACTUATOR - assert publish_agent.callback.call_args[0][3] == \ - topics.ACTUATOR_SCHEDULE_RESULT + assert publish_agent.callback.call_args[0][3] == topics.ACTUATOR_SCHEDULE_RESULT result_header = publish_agent.callback.call_args[0][4] result_message = publish_agent.callback.call_args[0][5] assert result_header['type'] == 'NEW_SCHEDULE' @@ -1130,8 +1005,7 @@ def test_schedule_conflict_self(publish_agent): print('call args list:', publish_agent.callback.call_args_list) assert publish_agent.callback.call_count == 1 assert publish_agent.callback.call_args[0][1] == PLATFORM_ACTUATOR - assert publish_agent.callback.call_args[0][3] == \ - topics.ACTUATOR_SCHEDULE_RESULT + assert publish_agent.callback.call_args[0][3] == topics.ACTUATOR_SCHEDULE_RESULT result_header = publish_agent.callback.call_args[0][4] result_message = publish_agent.callback.call_args[0][5] assert result_header['type'] == 'NEW_SCHEDULE' @@ -1178,8 +1052,7 @@ def test_schedule_overlap(publish_agent, cancel_schedules): print('call args list:', publish_agent.callback.call_args_list) assert publish_agent.callback.call_count == 1 assert publish_agent.callback.call_args[0][1] == PLATFORM_ACTUATOR - assert publish_agent.callback.call_args[0][3] == \ - topics.ACTUATOR_SCHEDULE_RESULT + assert publish_agent.callback.call_args[0][3] == topics.ACTUATOR_SCHEDULE_RESULT result_header = publish_agent.callback.call_args[0][4] result_message = publish_agent.callback.call_args[0][5] assert result_header['type'] == 'NEW_SCHEDULE' @@ -1216,11 +1089,9 @@ def test_cancel_error_invalid_task(publish_agent): print('call args list:', publish_agent.callback.call_args_list) assert publish_agent.callback.call_count == 1 assert publish_agent.callback.call_args[0][1] == PLATFORM_ACTUATOR - assert publish_agent.callback.call_args[0][3] == \ - topics.ACTUATOR_SCHEDULE_RESULT + assert publish_agent.callback.call_args[0][3] == topics.ACTUATOR_SCHEDULE_RESULT result_header = publish_agent.callback.call_args[0][4] result_message = publish_agent.callback.call_args[0][5] - # assert result_header['requesterID'] == TEST_AGENT assert result_message['result'] == FAILURE assert result_message['info'] == 'TASK_ID_DOES_NOT_EXIST' assert result_header['type'] == 'CANCEL_SCHEDULE' @@ -1246,38 +1117,25 @@ def test_get_default(publish_agent): # reset mock to ignore any previous callback publish_agent.callback.reset_mock() # Subscribe to result of set - value_topic = topics.ACTUATOR_VALUE(campus='', building='', - unit='fakedriver1', - point='SampleWritableFloat1') - error_topic = topics.ACTUATOR_ERROR(campus='', building='', - unit='fakedriver1', - point='SampleWritableFloat1') + value_topic = topics.ACTUATOR_VALUE(campus='', building='', unit='fakedriver1', point='SampleWritableFloat1') + error_topic = topics.ACTUATOR_ERROR(campus='', building='', unit='fakedriver1', point='SampleWritableFloat1') print('value topic', value_topic) - publish_agent.vip.pubsub.subscribe(peer='pubsub', - prefix=value_topic, - callback=publish_agent.callback).get() - publish_agent.vip.pubsub.subscribe(peer='pubsub', - prefix=error_topic, - callback=publish_agent.callback).get() + publish_agent.vip.pubsub.subscribe(peer='pubsub', prefix=value_topic, callback=publish_agent.callback).get() + publish_agent.vip.pubsub.subscribe(peer='pubsub', prefix=error_topic, callback=publish_agent.callback).get() # Get default value - get_topic = topics.ACTUATOR_GET(campus='', building='', unit='fakedriver1', - point='SampleWritableFloat1') + get_topic = topics.ACTUATOR_GET(campus='', building='', unit='fakedriver1', point='SampleWritableFloat1') header = { 'requesterID': TEST_AGENT } - publish_agent.vip.pubsub.publish('pubsub', - get_topic, - headers=header).get(timeout=10) + publish_agent.vip.pubsub.publish('pubsub', get_topic, headers=header).get(timeout=10) gevent.sleep(1) print("call args list", publish_agent.callback.call_args_list) assert publish_agent.callback.call_count == 1 print('call args ', publish_agent.callback.call_args[0]) assert publish_agent.callback.call_args[0][1] == PLATFORM_ACTUATOR assert publish_agent.callback.call_args[0][3] == value_topic - result_header = publish_agent.callback.call_args[0][4] result_message = publish_agent.callback.call_args[0][5] - # assert result_header['requesterID'] == TEST_AGENT assert result_message == 10.0 @@ -1306,19 +1164,11 @@ def test_get_value_success(publish_agent, cancel_schedules): # Mock callback methods publish_agent.callback = MagicMock(name="callback") # Subscribe to result of set - value_topic = topics.ACTUATOR_VALUE(campus='', building='', - unit='fakedriver1', - point='SampleWritableFloat1') - error_topic = topics.ACTUATOR_ERROR(campus='', building='', - unit='fakedriver1', - point='SampleWritableFloat1') + value_topic = topics.ACTUATOR_VALUE(campus='', building='', unit='fakedriver1', point='SampleWritableFloat1') + error_topic = topics.ACTUATOR_ERROR(campus='', building='', unit='fakedriver1', point='SampleWritableFloat1') print('value topic', value_topic) - publish_agent.vip.pubsub.subscribe(peer='pubsub', - prefix=value_topic, - callback=publish_agent.callback).get() - publish_agent.vip.pubsub.subscribe(peer='pubsub', - prefix=error_topic, - callback=publish_agent.callback).get() + publish_agent.vip.pubsub.subscribe(peer='pubsub', prefix=value_topic, callback=publish_agent.callback).get() + publish_agent.vip.pubsub.subscribe(peer='pubsub', prefix=error_topic, callback=publish_agent.callback).get() start = str(datetime.now()) end = str(datetime.now() + timedelta(seconds=2)) msg = [ @@ -1348,21 +1198,16 @@ def test_get_value_success(publish_agent, cancel_schedules): 20.5 # New value ).get(timeout=10) print("result of set", result) - get_topic = topics.ACTUATOR_GET(campus='', building='', unit='fakedriver1', - point='SampleWritableFloat1') + get_topic = topics.ACTUATOR_GET(campus='', building='', unit='fakedriver1', point='SampleWritableFloat1') print("set topic: ", get_topic) - publish_agent.vip.pubsub.publish('pubsub', - get_topic, - headers=header).get(timeout=10) + publish_agent.vip.pubsub.publish('pubsub', get_topic, headers=header).get(timeout=10) gevent.sleep(0.5) print("call args list", publish_agent.callback.call_args_list) assert publish_agent.callback.call_count == 2 print('call args ', publish_agent.callback.call_args[0]) assert publish_agent.callback.call_args[0][1] == PLATFORM_ACTUATOR assert publish_agent.callback.call_args[0][3] == value_topic - result_header = publish_agent.callback.call_args[0][4] result_message = publish_agent.callback.call_args[0][5] - # assert result_header['requesterID'] == agentid assert result_message == 20.5 @@ -1389,42 +1234,27 @@ def test_get_error_invalid_point(publish_agent): # Mock callback methods publish_agent.callback = MagicMock(name="callback") # Subscribe to result of set - value_topic = topics.ACTUATOR_VALUE(campus='', building='', - unit='fakedriver1', - point='SampleWritableFloat12') - error_topic = topics.ACTUATOR_ERROR(campus='', building='', - unit='fakedriver1', - point='SampleWritableFloat12') + value_topic = topics.ACTUATOR_VALUE(campus='', building='', unit='fakedriver1', point='SampleWritableFloat12') + error_topic = topics.ACTUATOR_ERROR(campus='', building='', unit='fakedriver1', point='SampleWritableFloat12') print('value topic', value_topic) - publish_agent.vip.pubsub.subscribe(peer='pubsub', - prefix=value_topic, - callback=publish_agent.callback).get() - publish_agent.vip.pubsub.subscribe(peer='pubsub', - prefix=error_topic, - callback=publish_agent.callback).get() + publish_agent.vip.pubsub.subscribe(peer='pubsub', prefix=value_topic, callback=publish_agent.callback).get() + publish_agent.vip.pubsub.subscribe(peer='pubsub', prefix=error_topic, callback=publish_agent.callback).get() gevent.sleep(1) header = { 'requesterID': TEST_AGENT } - get_topic = topics.ACTUATOR_GET(campus='', building='', unit='fakedriver1', - point='SampleWritableFloat12') + get_topic = topics.ACTUATOR_GET(campus='', building='', unit='fakedriver1', point='SampleWritableFloat12') print("set topic: ", get_topic) - publish_agent.vip.pubsub.publish('pubsub', - get_topic, - headers=header).get(timeout=10) + publish_agent.vip.pubsub.publish('pubsub', get_topic, headers=header).get(timeout=10) gevent.sleep(1) print("call args list", publish_agent.callback.call_args_list) assert publish_agent.callback.call_count == 1 print('call args ', publish_agent.callback.call_args[0]) assert publish_agent.callback.call_args[0][1] == PLATFORM_ACTUATOR assert publish_agent.callback.call_args[0][3] == error_topic - result_header = publish_agent.callback.call_args[0][4] result_message = publish_agent.callback.call_args[0][5] - assert result_message['type'] == \ - 'master_driver.interfaces.DriverInterfaceError' - assert result_message['value'] == \ - "['Point not configured on device: SampleWritableFloat12']" - # assert result_header['requesterID'] == TEST_AGENT + assert result_message['type'] == 'platform_driver.interfaces.DriverInterfaceError' + assert result_message['value'] == "['Point not configured on device: SampleWritableFloat12']" @pytest.mark.actuator_pubsub @@ -1453,17 +1283,11 @@ def test_set_value_bool(publish_agent, cancel_schedules, revert_devices): # Mock callback methods publish_agent.callback = MagicMock(name="callback") # Subscribe to result of set - value_topic = topics.ACTUATOR_VALUE(campus='', building='', unit=device, - point='SampleWritableBool1') - error_topic = topics.ACTUATOR_ERROR(campus='', building='', unit=device, - point='SampleWritableBool1') + value_topic = topics.ACTUATOR_VALUE(campus='', building='', unit=device, point='SampleWritableBool1') + error_topic = topics.ACTUATOR_ERROR(campus='', building='', unit=device, point='SampleWritableBool1') print('value topic', value_topic) - publish_agent.vip.pubsub.subscribe(peer='pubsub', - prefix=value_topic, - callback=publish_agent.callback).get() - publish_agent.vip.pubsub.subscribe(peer='pubsub', - prefix=error_topic, - callback=publish_agent.callback).get() + publish_agent.vip.pubsub.subscribe(peer='pubsub', prefix=value_topic, callback=publish_agent.callback).get() + publish_agent.vip.pubsub.subscribe(peer='pubsub', prefix=error_topic, callback=publish_agent.callback).get() start = str(datetime.now()) end = str(datetime.now() + timedelta(seconds=3)) msg = [ @@ -1497,9 +1321,7 @@ def test_set_value_bool(publish_agent, cancel_schedules, revert_devices): assert publish_agent.callback.call_count == 1 assert publish_agent.callback.call_args[0][1] == PLATFORM_ACTUATOR assert publish_agent.callback.call_args[0][3] == value_topic - result_header = publish_agent.callback.call_args[0][4] result_message = publish_agent.callback.call_args[0][5] - # assert result_header['requesterID'] == agentid assert result_message is True @@ -1536,17 +1358,11 @@ def test_set_value_array(publish_agent, cancel_schedules, revert_devices): # Mock callback methods publish_agent.callback = MagicMock(name="callback") # Subscribe to result of set - value_topic = topics.ACTUATOR_VALUE(campus='', building='', unit=device, - point='SampleWritableFloat1') - error_topic = topics.ACTUATOR_ERROR(campus='', building='', unit=device, - point='SampleWritableFloat1') + value_topic = topics.ACTUATOR_VALUE(campus='', building='', unit=device, point='SampleWritableFloat1') + error_topic = topics.ACTUATOR_ERROR(campus='', building='', unit=device, point='SampleWritableFloat1') print('value topic', value_topic) - publish_agent.vip.pubsub.subscribe(peer='pubsub', - prefix=value_topic, - callback=publish_agent.callback).get() - publish_agent.vip.pubsub.subscribe(peer='pubsub', - prefix=error_topic, - callback=publish_agent.callback).get() + publish_agent.vip.pubsub.subscribe(peer='pubsub', prefix=value_topic, callback=publish_agent.callback).get() + publish_agent.vip.pubsub.subscribe(peer='pubsub', prefix=error_topic, callback=publish_agent.callback).get() start = str(datetime.now()) end = str(datetime.now() + timedelta(seconds=3)) msg = [ @@ -1567,21 +1383,15 @@ def test_set_value_array(publish_agent, cancel_schedules, revert_devices): 'requesterID': agentid } - set_topic = topics.ACTUATOR_SET(campus='', building='', unit=device, - point='SampleWritableFloat1') + set_topic = topics.ACTUATOR_SET(campus='', building='', unit=device, point='SampleWritableFloat1') print("set topic: ", set_topic) - publish_agent.vip.pubsub.publish('pubsub', - set_topic, - headers=header, - message=[0.2]).get(timeout=10) + publish_agent.vip.pubsub.publish('pubsub', set_topic, headers=header, message=[0.2]).get(timeout=10) gevent.sleep(1.5) print('call args list:', publish_agent.callback.call_args_list) assert publish_agent.callback.call_count == 1 assert publish_agent.callback.call_args[0][1] == PLATFORM_ACTUATOR assert publish_agent.callback.call_args[0][3] == error_topic - result_header = publish_agent.callback.call_args[0][4] result_message = publish_agent.callback.call_args[0][5] - # assert result_header['requesterID'] == agentid assert result_message['type'] == 'builtins.TypeError' @@ -1619,17 +1429,11 @@ def test_set_value_float(publish_agent, cancel_schedules, revert_devices): # Mock callback methods publish_agent.callback = MagicMock(name="callback") # Subscribe to result of set - value_topic = topics.ACTUATOR_VALUE(campus='', building='', unit=device, - point='SampleWritableFloat1') - error_topic = topics.ACTUATOR_ERROR(campus='', building='', unit=device, - point='SampleWritableFloat1') + value_topic = topics.ACTUATOR_VALUE(campus='', building='', unit=device, point='SampleWritableFloat1') + error_topic = topics.ACTUATOR_ERROR(campus='', building='', unit=device, point='SampleWritableFloat1') print('value topic', value_topic) - publish_agent.vip.pubsub.subscribe(peer='pubsub', - prefix=value_topic, - callback=publish_agent.callback).get() - publish_agent.vip.pubsub.subscribe(peer='pubsub', - prefix=error_topic, - callback=publish_agent.callback).get() + publish_agent.vip.pubsub.subscribe(peer='pubsub', prefix=value_topic, callback=publish_agent.callback).get() + publish_agent.vip.pubsub.subscribe(peer='pubsub', prefix=error_topic, callback=publish_agent.callback).get() topic = topics header = { @@ -1648,8 +1452,7 @@ def test_set_value_float(publish_agent, cancel_schedules, revert_devices): 'requesterID': TEST_AGENT } - set_topic = topics.ACTUATOR_SET(campus='', building='', unit=device, - point='SampleWritableFloat1') + set_topic = topics.ACTUATOR_SET(campus='', building='', unit=device, point='SampleWritableFloat1') print("set topic: ", set_topic) publish(publish_agent, set_topic, header, 0.2) gevent.sleep(1) @@ -1658,9 +1461,7 @@ def test_set_value_float(publish_agent, cancel_schedules, revert_devices): assert publish_agent.callback.call_count == 1 assert publish_agent.callback.call_args[0][1] == PLATFORM_ACTUATOR assert publish_agent.callback.call_args[0][3] == value_topic - result_header = publish_agent.callback.call_args[0][4] result_message = publish_agent.callback.call_args[0][5] - # assert result_header['requesterID'] == agentid assert result_message == 0.2 @@ -1697,17 +1498,11 @@ def test_revert_point(publish_agent, cancel_schedules): # Mock callback methods publish_agent.callback = MagicMock(name="callback") # Subscribe to result of set - value_topic = topics.ACTUATOR_VALUE(campus='', building='', unit=device, - point=point) - reverted_topic = topics.ACTUATOR_REVERTED_POINT(campus='', building='', - unit=device, point=point) + value_topic = topics.ACTUATOR_VALUE(campus='', building='', unit=device, point=point) + reverted_topic = topics.ACTUATOR_REVERTED_POINT(campus='', building='', unit=device, point=point) print('value topic', value_topic) - publish_agent.vip.pubsub.subscribe(peer='pubsub', - prefix=value_topic, - callback=publish_agent.callback).get() - publish_agent.vip.pubsub.subscribe(peer='pubsub', - prefix=reverted_topic, - callback=publish_agent.callback).get() + publish_agent.vip.pubsub.subscribe(peer='pubsub', prefix=value_topic, callback=publish_agent.callback).get() + publish_agent.vip.pubsub.subscribe(peer='pubsub', prefix=reverted_topic, callback=publish_agent.callback).get() start = str(datetime.now()) end = str(datetime.now() + timedelta(seconds=10)) msg = [ @@ -1723,12 +1518,9 @@ def test_revert_point(publish_agent, cancel_schedules): # expected result {'info': u'', 'data': {}, 'result': 'SUCCESS'} assert result['result'] == 'SUCCESS' - revert_topic = topics.ACTUATOR_REVERT_POINT(campus='', building='', - unit=device, point=point) + revert_topic = topics.ACTUATOR_REVERT_POINT(campus='', building='', unit=device, point=point) print("revert topic: ", revert_topic) - publish_agent.vip.pubsub.publish('pubsub', - revert_topic, - headers={}).get(timeout=10) + publish_agent.vip.pubsub.publish('pubsub', revert_topic, headers={}).get(timeout=10) initial_value = publish_agent.vip.rpc.call( PLATFORM_ACTUATOR, # Target agent @@ -1753,9 +1545,7 @@ def test_revert_point(publish_agent, cancel_schedules): assert publish_agent.callback.call_count == 2 assert publish_agent.callback.call_args[0][1] == PLATFORM_ACTUATOR assert publish_agent.callback.call_args[0][3] == value_topic - result_header = publish_agent.callback.call_args[0][4] result_message = publish_agent.callback.call_args[0][5] - # assert result_header['requesterID'] == agentid assert result_message == test_value publish_agent.callback.reset_mock() @@ -1765,21 +1555,16 @@ def test_revert_point(publish_agent, cancel_schedules): 'requesterID': TEST_AGENT } - revert_topic = topics.ACTUATOR_REVERT_POINT(campus='', building='', - unit=device, point=point) + revert_topic = topics.ACTUATOR_REVERT_POINT(campus='', building='', unit=device, point=point) print("revert topic: ", revert_topic) - publish_agent.vip.pubsub.publish('pubsub', - revert_topic, - headers=header).get(timeout=10) + publish_agent.vip.pubsub.publish('pubsub', revert_topic, headers=header).get(timeout=10) gevent.sleep(1) print('call args list ', publish_agent.callback.call_args_list) assert publish_agent.callback.call_count == 1 assert publish_agent.callback.call_args[0][1] == PLATFORM_ACTUATOR assert publish_agent.callback.call_args[0][3] == reverted_topic - result_header = publish_agent.callback.call_args[0][4] result_message = publish_agent.callback.call_args[0][5] - # assert result_header['requesterID'] == agentid assert result_message is None publish_agent.callback.reset_mock() @@ -1827,22 +1612,13 @@ def test_revert_device(publish_agent, cancel_schedules): # Mock callback methods publish_agent.callback = MagicMock(name="callback") # Subscribe to result of set - value_topic = topics.ACTUATOR_VALUE(campus='', building='', unit=device, - point=point) - error_topic = topics.ACTUATOR_ERROR(campus='', building='', unit=device, - point=point) - reverted_topic = topics.ACTUATOR_REVERTED_DEVICE(campus='', building='', - unit=device) + value_topic = topics.ACTUATOR_VALUE(campus='', building='', unit=device, point=point) + error_topic = topics.ACTUATOR_ERROR(campus='', building='', unit=device, point=point) + reverted_topic = topics.ACTUATOR_REVERTED_DEVICE(campus='', building='', unit=device) print('value topic', value_topic) - publish_agent.vip.pubsub.subscribe(peer='pubsub', - prefix=value_topic, - callback=publish_agent.callback).get() - publish_agent.vip.pubsub.subscribe(peer='pubsub', - prefix=error_topic, - callback=publish_agent.callback).get() - publish_agent.vip.pubsub.subscribe(peer='pubsub', - prefix=reverted_topic, - callback=publish_agent.callback).get() + publish_agent.vip.pubsub.subscribe(peer='pubsub', prefix=value_topic, callback=publish_agent.callback).get() + publish_agent.vip.pubsub.subscribe(peer='pubsub', prefix=error_topic, callback=publish_agent.callback).get() + publish_agent.vip.pubsub.subscribe(peer='pubsub', prefix=reverted_topic, callback=publish_agent.callback).get() header = { 'type': 'NEW_SCHEDULE', @@ -1868,18 +1644,14 @@ def test_revert_device(publish_agent, cancel_schedules): test_value = initial_value + 1.0 - set_topic = topics.ACTUATOR_SET(campus='', building='', - unit=device, - point=point) + set_topic = topics.ACTUATOR_SET(campus='', building='', unit=device, point=point) publish(publish_agent, set_topic, {}, test_value) gevent.sleep(1) assert publish_agent.callback.call_count == 1 assert publish_agent.callback.call_args[0][1] == PLATFORM_ACTUATOR assert publish_agent.callback.call_args[0][3] == value_topic - result_header = publish_agent.callback.call_args[0][4] result_message = publish_agent.callback.call_args[0][5] - # assert result_header['requesterID'] == agentid assert result_message == test_value publish_agent.callback.reset_mock() @@ -1889,8 +1661,7 @@ def test_revert_device(publish_agent, cancel_schedules): 'requesterID': TEST_AGENT } - revert_topic = topics.ACTUATOR_REVERT_DEVICE(campus='', building='', - unit=device) + revert_topic = topics.ACTUATOR_REVERT_DEVICE(campus='', building='', unit=device) print("revert topic: ", revert_topic) publish(publish_agent, revert_topic, header, None) gevent.sleep(1) @@ -1898,9 +1669,7 @@ def test_revert_device(publish_agent, cancel_schedules): assert publish_agent.callback.call_count == 1 assert publish_agent.callback.call_args[0][1] == PLATFORM_ACTUATOR assert publish_agent.callback.call_args[0][3] == reverted_topic - result_header = publish_agent.callback.call_args[0][4] result_message = publish_agent.callback.call_args[0][5] - # assert result_header['requesterID'] == agentid assert result_message is None publish_agent.callback.reset_mock() @@ -1942,19 +1711,11 @@ def test_set_read_only_point(publish_agent, cancel_schedules): # Mock callback methods publish_agent.callback = MagicMock(name="callback") # Subscribe to result of set - value_topic = topics.ACTUATOR_VALUE(campus='', building='', - unit='fakedriver0', - point='OutsideAirTemperature1') - error_topic = topics.ACTUATOR_ERROR(campus='', building='', - unit='fakedriver0', - point='OutsideAirTemperature1') + value_topic = topics.ACTUATOR_VALUE(campus='', building='', unit='fakedriver0', point='OutsideAirTemperature1') + error_topic = topics.ACTUATOR_ERROR(campus='', building='', unit='fakedriver0', point='OutsideAirTemperature1') print('value topic', value_topic) - publish_agent.vip.pubsub.subscribe(peer='pubsub', - prefix=value_topic, - callback=publish_agent.callback).get() - publish_agent.vip.pubsub.subscribe(peer='pubsub', - prefix=error_topic, - callback=publish_agent.callback).get() + publish_agent.vip.pubsub.subscribe(peer='pubsub', prefix=value_topic, callback=publish_agent.callback).get() + publish_agent.vip.pubsub.subscribe(peer='pubsub', prefix=error_topic, callback=publish_agent.callback).get() start = str(datetime.now()) end = str(datetime.now() + timedelta(seconds=3)) msg = [ @@ -1974,13 +1735,9 @@ def test_set_read_only_point(publish_agent, cancel_schedules): 'requesterID': TEST_AGENT } - set_topic = topics.ACTUATOR_SET(campus='', building='', unit='fakedriver0', - point='OutsideAirTemperature1') + set_topic = topics.ACTUATOR_SET(campus='', building='', unit='fakedriver0', point='OutsideAirTemperature1') print("set topic: ", set_topic) - publish_agent.vip.pubsub.publish('pubsub', - set_topic, - headers=header, - message=['0.2']).get(timeout=10) + publish_agent.vip.pubsub.publish('pubsub', set_topic, headers=header, message=['0.2']).get(timeout=10) publish_agent.vip.rpc.call( 'platform.actuator', REQUEST_CANCEL_SCHEDULE, @@ -1993,13 +1750,9 @@ def test_set_read_only_point(publish_agent, cancel_schedules): print('call args ', publish_agent.callback.call_args[0]) assert publish_agent.callback.call_args[0][1] == PLATFORM_ACTUATOR assert publish_agent.callback.call_args[0][3] == error_topic - result_header = publish_agent.callback.call_args[0][4] - # assert result_header['requesterID'] == agentid result_message = publish_agent.callback.call_args[0][5] assert result_message['type'] == 'builtins.RuntimeError' - assert result_message['value'] == "['Trying to write to a point " \ - "configured read only: " \ - "OutsideAirTemperature1']" + assert result_message['value'] == "['Trying to write to a point configured read only: OutsideAirTemperature1']" @pytest.mark.actuator_pubsub @@ -2032,48 +1785,33 @@ def test_set_lock_error(publish_agent): print("Value of point before set without lock: ", current_value) # Subscribe to result of set - value_topic = topics.ACTUATOR_VALUE(campus='', building='', - unit='fakedriver1', - point='SampleWritableFloat1') - error_topic = topics.ACTUATOR_ERROR(campus='', building='', - unit='fakedriver1', - point='SampleWritableFloat1') + value_topic = topics.ACTUATOR_VALUE(campus='', building='', unit='fakedriver1', point='SampleWritableFloat1') + error_topic = topics.ACTUATOR_ERROR(campus='', building='', unit='fakedriver1', point='SampleWritableFloat1') print('error topic:', error_topic) print('value topic', value_topic) - publish_agent.vip.pubsub.subscribe(peer='pubsub', - prefix=value_topic, - callback=publish_agent.callback).get() - publish_agent.vip.pubsub.subscribe(peer='pubsub', - prefix=error_topic, - callback=publish_agent.callback).get() + publish_agent.vip.pubsub.subscribe(peer='pubsub', prefix=value_topic, callback=publish_agent.callback).get() + publish_agent.vip.pubsub.subscribe(peer='pubsub', prefix=error_topic, callback=publish_agent.callback).get() gevent.sleep(1) # set value header = { 'requesterID': TEST_AGENT } - set_topic = topics.ACTUATOR_SET(campus='', building='', unit='fakedriver1', - point='SampleWritableFloat1') + set_topic = topics.ACTUATOR_SET(campus='', building='', unit='fakedriver1', point='SampleWritableFloat1') print("set topic: ", set_topic) set_value = current_value + 1 print("Attempting to set value as ", set_value) - publish_agent.vip.pubsub.publish('pubsub', - set_topic, - headers=header, - message=set_value).get(timeout=10) + publish_agent.vip.pubsub.publish('pubsub', set_topic, headers=header, message=set_value).get(timeout=10) gevent.sleep(1) print('call args list:', publish_agent.callback.call_args_list) assert publish_agent.callback.call_count == 1 print('call args ', publish_agent.callback.call_args[0]) assert publish_agent.callback.call_args[0][1] == PLATFORM_ACTUATOR assert publish_agent.callback.call_args[0][3] == error_topic - result_header = publish_agent.callback.call_args[0][4] result_message = publish_agent.callback.call_args[0][5] - # assert result_header['requesterID'] == TEST_AGENT assert result_message['type'] == 'LockError' assert result_message['value'].endswith('does not have this lock') - # To test fix for bug #223 new_value = publish_agent.vip.rpc.call( 'platform.actuator', # Target agent 'get_point', # Method @@ -2112,19 +1850,11 @@ def test_set_value_error(publish_agent, cancel_schedules): # Mock callback methods publish_agent.callback = MagicMock(name="callback_value_error") # Subscribe to result of set - value_topic = topics.ACTUATOR_VALUE(campus='', building='', - unit='fakedriver0', - point='SampleWritableFloat1') - error_topic = topics.ACTUATOR_ERROR(campus='', building='', - unit='fakedriver0', - point='SampleWritableFloat1') + value_topic = topics.ACTUATOR_VALUE(campus='', building='', unit='fakedriver0', point='SampleWritableFloat1') + error_topic = topics.ACTUATOR_ERROR(campus='', building='', unit='fakedriver0', point='SampleWritableFloat1') print('value topic', value_topic) - publish_agent.vip.pubsub.subscribe(peer='pubsub', - prefix=value_topic, - callback=publish_agent.callback).get() - publish_agent.vip.pubsub.subscribe(peer='pubsub', - prefix=error_topic, - callback=publish_agent.callback).get() + publish_agent.vip.pubsub.subscribe(peer='pubsub', prefix=value_topic, callback=publish_agent.callback).get() + publish_agent.vip.pubsub.subscribe(peer='pubsub', prefix=error_topic, callback=publish_agent.callback).get() start = str(datetime.now()) end = str(datetime.now() + timedelta(seconds=3)) msg = [ @@ -2145,13 +1875,9 @@ def test_set_value_error(publish_agent, cancel_schedules): 'requesterID': agentid } - set_topic = topics.ACTUATOR_SET(campus='', building='', unit='fakedriver0', - point='SampleWritableFloat1') + set_topic = topics.ACTUATOR_SET(campus='', building='', unit='fakedriver0', point='SampleWritableFloat1') print("set topic: ", set_topic) - publish_agent.vip.pubsub.publish('pubsub', - set_topic, - headers=header, - message='abcd').get(timeout=10) + publish_agent.vip.pubsub.publish('pubsub', set_topic, headers=header, message='abcd').get(timeout=10) gevent.sleep(1) print('call args list:', publish_agent.callback.call_args_list) @@ -2159,9 +1885,6 @@ def test_set_value_error(publish_agent, cancel_schedules): print('call args ', publish_agent.callback.call_args[0]) assert publish_agent.callback.call_args[0][1] == PLATFORM_ACTUATOR assert publish_agent.callback.call_args[0][3] == error_topic - result_header = publish_agent.callback.call_args[0][4] result_message = publish_agent.callback.call_args[0][5] - # assert result_header['requesterID'] == agentid assert result_message['type'] == 'builtins.ValueError' - assert result_message['value'] == \ - '["could not convert string to float: \'abcd\'"]' + assert result_message['value'] == '["could not convert string to float: \'abcd\'"]' diff --git a/services/core/ActuatorAgent/tests/test_actuator_pubsub_unit.py b/services/core/ActuatorAgent/tests/test_actuator_pubsub_unit.py index 88f5dbfceb..f423eb8133 100644 --- a/services/core/ActuatorAgent/tests/test_actuator_pubsub_unit.py +++ b/services/core/ActuatorAgent/tests/test_actuator_pubsub_unit.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- {{{ # vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et: # -# Copyright 2019, Battelle Memorial Institute. +# Copyright 2020, Battelle Memorial Institute. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -49,6 +49,7 @@ from volttrontesting.utils.utils import AgentMock from volttron.platform.vip.agent import Agent +pytestmark = [pytest.mark.actuator_unit, pytest.mark.unit] PEER = "peer-1" SENDER = "sender-1" @@ -65,7 +66,6 @@ ActuatorAgent.__bases__ = (AgentMock.imitate(Agent, Agent()),) -@pytest.mark.actuator_unit def test_handle_get_should_succeed(): with get_actuator_agent() as actuator_agent: actuator_agent.handle_get(PEER, SENDER, BUS, GET_TOPIC, HEADERS, MESSAGE) @@ -74,7 +74,6 @@ def test_handle_get_should_succeed(): actuator_agent.vip.pubsub.publish.assert_called_once() -@pytest.mark.actuator_unit def test_handle_get_should_handle_standard_error(caplog): with get_actuator_agent(vip_identity=None) as actuator_agent: actuator_agent.handle_get(PEER, SENDER, BUS, GET_TOPIC, HEADERS, MESSAGE) @@ -88,7 +87,6 @@ def test_handle_get_should_handle_standard_error(caplog): ) -@pytest.mark.actuator_unit @pytest.mark.parametrize( "sender, device_state", [ @@ -111,7 +109,6 @@ def test_handle_set_should_succeed(sender, device_state): actuator_agent.vip.pubsub.publish.assert_called() -@pytest.mark.actuator_unit def test_handle_set_should_return_none_on_none_message(caplog): with get_actuator_agent(vip_identity=None) as actuator_agent: result = actuator_agent.handle_set(PEER, SENDER, BUS, SET_TOPIC, HEADERS, None) @@ -125,7 +122,6 @@ def test_handle_set_should_return_none_on_none_message(caplog): ) -@pytest.mark.actuator_unit def test_handle_set_should_handle_type_error_on_invalid_sender(caplog): with get_actuator_agent(vip_identity=None) as actuator_agent: actuator_agent.handle_set(PEER, None, BUS, SET_TOPIC, HEADERS, MESSAGE) @@ -138,7 +134,6 @@ def test_handle_set_should_handle_type_error_on_invalid_sender(caplog): ) -@pytest.mark.actuator_unit def test_handle_set_should_handle_lock_error(caplog): with get_actuator_agent(vip_identity=None) as actuator_agent: actuator_agent.handle_set(PEER, SENDER, BUS, SET_TOPIC, HEADERS, MESSAGE) @@ -151,7 +146,6 @@ def test_handle_set_should_handle_lock_error(caplog): ) -@pytest.mark.actuator_unit def test_handle_revert_point_should_succeed(): device_state = { "actuators/revert/point/somedevicepath": DeviceState( @@ -168,7 +162,6 @@ def test_handle_revert_point_should_succeed(): actuator_agent.vip.pubsub.publish.assert_called_once() -@pytest.mark.actuator_unit def test_handle_revert_point_should_handle_lock_error(caplog): with get_actuator_agent(vip_identity=None) as actuator_agent: actuator_agent.handle_revert_point( @@ -183,7 +176,6 @@ def test_handle_revert_point_should_handle_lock_error(caplog): ) -@pytest.mark.actuator_unit def test_handle_revert_device_should_succeed(): device_state = { "somedevicepath": DeviceState("sender-1", "task-id-1", "anytime") @@ -199,7 +191,6 @@ def test_handle_revert_device_should_succeed(): actuator_agent.vip.pubsub.publish.assert_called_once() -@pytest.mark.actuator_unit def test_handle_revert_device_should_handle_lock_error(caplog): with get_actuator_agent(vip_identity=None) as actuator_agent: actuator_agent.handle_revert_device( @@ -214,7 +205,6 @@ def test_handle_revert_device_should_handle_lock_error(caplog): ) -@pytest.mark.actuator_unit @pytest.mark.parametrize( "priority, success", [ @@ -244,7 +234,6 @@ def test_handle_schedule_request_should_succeed_on_new_schedule_request_type( actuator_agent.vip.pubsub.publish.assert_called() -@pytest.mark.actuator_unit @pytest.mark.parametrize("success", [True, False]) def test_handle_schedule_request_should_succeed_on_cancel_schedule_request_type(success): headers = {"type": "CANCEL_SCHEDULE", "requesterID": "id-123", "taskID": "12345"} @@ -257,7 +246,6 @@ def test_handle_schedule_request_should_succeed_on_cancel_schedule_request_type( actuator_agent.vip.pubsub.publish.assert_called() -@pytest.mark.actuator_unit @pytest.mark.parametrize("invalid_request_type", ["bad request type", None]) def test_handle_schedule_request_should_log_invalid_request_type( invalid_request_type, caplog diff --git a/services/core/ActuatorAgent/tests/test_actuator_rpc.py b/services/core/ActuatorAgent/tests/test_actuator_rpc.py index 6aa3f441c8..245c49ceab 100644 --- a/services/core/ActuatorAgent/tests/test_actuator_rpc.py +++ b/services/core/ActuatorAgent/tests/test_actuator_rpc.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- {{{ # vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et: # -# Copyright 2019, Battelle Memorial Institute. +# Copyright 2020, Battelle Memorial Institute. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -39,12 +39,14 @@ """ Pytest test cases for testing actuator agent using rpc calls. """ -from datetime import datetime, timedelta +import json import gevent import gevent.subprocess as subprocess import pytest +import os from pytest import approx +from datetime import datetime, timedelta from gevent.subprocess import Popen from mock import MagicMock @@ -52,6 +54,7 @@ from volttron.platform.jsonrpc import RemoteError from volttron.platform.messaging import topics from volttron.platform.agent.known_identities import PLATFORM_DRIVER +from volttron.platform.messaging.health import STATUS_GOOD REQUEST_CANCEL_SCHEDULE = 'request_cancel_schedule' REQUEST_NEW_SCHEDULE = 'request_new_schedule' @@ -67,30 +70,25 @@ def publish_agent(request, volttron_instance): """ Fixture used for setting up the environment. 1. Creates fake driver configs - 2. Starts the master driver agent with the created fake driver agents + 2. Starts the platform driver agent with the created fake driver agents 3. Starts the actuator agent 4. Creates an instance Agent class for publishing and returns it - :param request: pytest request object :param volttron_instance: instance of volttron in which test cases are run :return: an instance of fake agent used for publishing """ - - - # Reset master driver config store + # Reset platform driver config store cmd = ['volttron-ctl', 'config', 'delete', PLATFORM_DRIVER, '--all'] process = Popen(cmd, env=volttron_instance.env, - cwd='scripts/scalability-testing', stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True) result = process.wait() print(result) assert result == 0 - # Add master driver configuration files to config store. - cmd = ['volttron-ctl', 'config', 'store', PLATFORM_DRIVER, - 'fake.csv', 'fake_unit_testing.csv', '--csv'] + # Add platform driver configuration files to config store. + cmd = ['volttron-ctl', 'config', 'store', PLATFORM_DRIVER, 'fake.csv', 'fake_unit_testing.csv', '--csv'] process = Popen(cmd, env=volttron_instance.env, - cwd='scripts/scalability-testing', + cwd=f"{volttron_instance.volttron_root}/scripts/scalability-testing", stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True) output, err = process.communicate() print(output) @@ -99,26 +97,25 @@ def publish_agent(request, volttron_instance): for i in range(4): config_name = "devices/fakedriver{}".format(i) - cmd = ['volttron-ctl', 'config', 'store', PLATFORM_DRIVER, - config_name, 'fake_unit_testing.config', '--json'] + cmd = ['volttron-ctl', 'config', 'store', PLATFORM_DRIVER, config_name, 'fake_unit_testing.config', '--json'] process = Popen(cmd, env=volttron_instance.env, - cwd='scripts/scalability-testing', + cwd=f"{volttron_instance.volttron_root}/scripts/scalability-testing", stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True) result = process.wait() print(result) assert result == 0 - # Start the master driver agent which would intern start the fake driver + # Start the platform driver agent which would intern start the fake driver # using the configs created above - master_uuid = volttron_instance.install_agent( - agent_dir=get_services_core("MasterDriverAgent"), + platform_uuid = volttron_instance.install_agent( + agent_dir=get_services_core("PlatformDriverAgent"), config_file={}, start=True) - print("agent id: ", master_uuid) + print("agent id: ", platform_uuid) gevent.sleep(2) # wait for the agent to start and start the devices # Start the actuator agent through which publish agent should communicate - # to fake device. Start the master driver agent which would intern start + # to fake device. Start the platform driver agent which would intern start # the fake driver using the configs created above actuator_uuid = volttron_instance.install_agent( agent_dir=get_services_core("ActuatorAgent"), @@ -129,14 +126,13 @@ def publish_agent(request, volttron_instance): # 3: Start a fake agent to publish to message bus publish_agent = volttron_instance.build_agent(identity=TEST_AGENT) - # 4: add a tear down method to stop sqlhistorian agent and the fake agent - # \that published to message bus + # 4: add a tear down method to stop sqlhistorian agent and the fake agent that published to message bus def stop_agent(): print("In teardown method of module") volttron_instance.stop_agent(actuator_uuid) - volttron_instance.stop_agent(master_uuid) + volttron_instance.stop_agent(platform_uuid) volttron_instance.remove_agent(actuator_uuid) - volttron_instance.remove_agent(master_uuid) + volttron_instance.remove_agent(platform_uuid) publish_agent.core.stop() request.addfinalizer(stop_agent) @@ -208,7 +204,6 @@ def cleanup(): def test_schedule_success(publish_agent, cancel_schedules): """ Test responses for successful schedule request - :param publish_agent: fixture invoked to setup all agents necessary and returns an instance of Agent object used for publishing :param cancel_schedules: fixture used to cancel the schedule at the end of @@ -236,15 +231,15 @@ def test_schedule_success(publish_agent, cancel_schedules): print(result) assert result['result'] == SUCCESS + @pytest.mark.actuator def test_schedule_error_int_taskid(publish_agent): """ Test responses for successful schedule request with integer task id - :param publish_agent: fixture invoked to setup all agents necessary and returns an instance of Agent object used for publishing """ - print ("\n**** test_schedule_error_int_taskid ****") + print("\n**** test_schedule_error_int_taskid ****") agentid = TEST_AGENT taskid = 1234 @@ -263,21 +258,20 @@ def test_schedule_error_int_taskid(publish_agent): # expected result {'info': u'', 'data': {}, 'result': SUCCESS} print(result) assert result['result'] == FAILURE - assert result['info'] == \ - 'MALFORMED_REQUEST: TypeError: taskid must be a nonempty string' + assert result['info'] == 'MALFORMED_REQUEST: TypeError: taskid must be a nonempty string' + @pytest.mark.actuator def test_schedule_empty_taskid(publish_agent, cancel_schedules): """ Test responses for successful schedule request when task id is an empty string - :param publish_agent: fixture invoked to setup all agents necessary and returns an instance of Agent object used for publishing :param cancel_schedules: fixture used to cancel the schedule at the end of test so that other tests can use the same device and time slot """ - print ("\n**** test_schedule_empty_taskid ****") + print("\n**** test_schedule_empty_taskid ****") # used by cancel_schedules agentid = TEST_AGENT taskid = '' @@ -298,19 +292,17 @@ def test_schedule_empty_taskid(publish_agent, cancel_schedules): # expected result {'info': u'', 'data': {}, 'result': SUCCESS} print(result) assert result['result'] == FAILURE - assert result['info'] == \ - 'MALFORMED_REQUEST: TypeError: taskid must be a nonempty string' + assert result['info'] == 'MALFORMED_REQUEST: TypeError: taskid must be a nonempty string' @pytest.mark.actuator def test_schedule_error_none_taskid(publish_agent): """ Test error responses for schedule request with taskid = None - :param publish_agent: fixture invoked to setup all agents necessary and returns an instance of Agent object used for publishing """ - print ("\n**** test_schedule_error_none_taskid ****") + print("\n**** test_schedule_error_none_taskid ****") agentid = TEST_AGENT taskid = None @@ -330,15 +322,15 @@ def test_schedule_error_none_taskid(publish_agent): assert result['result'] == FAILURE assert result['info'] == 'MISSING_TASK_ID' + @pytest.mark.actuator def test_schedule_error_invalid_priority(publish_agent): """ Test error responses for schedule request with an invalid priority - :param publish_agent: fixture invoked to setup all agents necessary and returns an instance of Agent object used for publishing """ - print ("\n**** test_schedule_error_invalid_priority ****") + print("\n**** test_schedule_error_invalid_priority ****") taskid = 'task_invalid_priority' start = str(datetime.now()) end = str(datetime.now() + timedelta(seconds=1)) @@ -358,15 +350,15 @@ def test_schedule_error_invalid_priority(publish_agent): assert result['result'] == FAILURE assert result['info'] == 'INVALID_PRIORITY' + @pytest.mark.actuator def test_schedule_error_empty_message(publish_agent): """ Test error responses for schedule request with an empty message - :param publish_agent: fixture invoked to setup all agents necessary and returns an instance of Agent object used for publishing """ - print ("\n**** test_schedule_error_empty_message ****") + print("\n**** test_schedule_error_empty_message ****") taskid = 'task_empty_message' msg = [ @@ -384,18 +376,18 @@ def test_schedule_error_empty_message(publish_agent): assert result['result'] == FAILURE assert result['info'] == 'MALFORMED_REQUEST_EMPTY' + @pytest.mark.actuator def test_schedule_error_duplicate_task(publish_agent, cancel_schedules): """ Test error responses for schedule request with task id that is already in use - :param publish_agent: fixture invoked to setup all agents necessary and returns an instance of Agent object used for publishing :param cancel_schedules: fixture used to cancel the schedule at the end of test so that other tests can use the same device and time slot """ - print ("\n**** test_schedule_error_duplicate_task ****") + print("\n**** test_schedule_error_duplicate_task ****") # used by cancel_schedules agentid = TEST_AGENT taskid = 'task_schedule_duplicate_id' @@ -428,15 +420,15 @@ def test_schedule_error_duplicate_task(publish_agent, cancel_schedules): assert result['result'] == FAILURE assert result['info'] == 'TASK_ID_ALREADY_EXISTS' + @pytest.mark.actuator def test_schedule_error_none_priority(publish_agent): """ Test error responses for schedule request with priority = None - :param publish_agent: fixture invoked to setup all agents necessary and returns an instance of Agent object used for publishing """ - print ("\n**** test_schedule_error_none_priority ****") + print("\n**** test_schedule_error_none_priority ****") taskid = 'task_none_priority' start = str(datetime.now()) @@ -463,15 +455,13 @@ def test_schedule_error_malformed_request(publish_agent): """ Test error responses for schedule request with malformed request - request with only a device name and start time and no stop time - :param publish_agent: fixture invoked to setup all agents necessary and returns an instance of Agent object used for publishing """ - print ("\n**** test_schedule_error_malformed_request ****") + print("\n**** test_schedule_error_malformed_request ****") taskid = 'task_malformed_request' start = str(datetime.now()) - # end = str(datetime.now() + timedelta(seconds=1)) msg = [ ['fakedriver0', start] ] @@ -490,7 +480,7 @@ def test_schedule_error_malformed_request(publish_agent): @pytest.mark.actuator -def test_schedule_premept_self(publish_agent, cancel_schedules): +def test_schedule_preempt_self(publish_agent, cancel_schedules): """ Test error response for schedule request through pubsub. Test schedule preemption by a higher priority task from the same agent. @@ -500,7 +490,7 @@ def test_schedule_premept_self(publish_agent, cancel_schedules): :param cancel_schedules: fixture used to cancel the schedule at the end of test so that other tests can use the same device and time slot """ - print ("\n**** test_schedule_premept_self ****") + print("\n**** test_schedule_preempt_self ****") # used by cancel_schedules agentid = TEST_AGENT taskid = 'task_high_priority' @@ -544,8 +534,7 @@ def test_schedule_premept_self(publish_agent, cancel_schedules): 'HIGH', msg).get(timeout=10) assert result['result'] == SUCCESS - # wait for 2 callbacks - success msg for task_high_priority and preempt - # msg for task_low_priority + # wait for 2 callbacks - success msg for task_high_priority and preempt msg for task_low_priority gevent.sleep(6) print('call args list:', publish_agent.callback.call_args_list) assert publish_agent.callback.call_count == 1 @@ -559,7 +548,6 @@ def test_schedule_premept_self(publish_agent, cancel_schedules): cancel_header = call_args1[4] cancel_message = call_args1[5] - assert call_args1[4]['type'] == 'CANCEL_SCHEDULE' assert cancel_header['taskID'] == 'task_low_priority' @@ -569,7 +557,7 @@ def test_schedule_premept_self(publish_agent, cancel_schedules): @pytest.mark.actuator -def test_schedule_premept_active_task(publish_agent, cancel_schedules): +def test_schedule_preempt_active_task(publish_agent, cancel_schedules): """ Test error response for schedule request. Test schedule preemption of a actively running task with priority @@ -580,7 +568,7 @@ def test_schedule_premept_active_task(publish_agent, cancel_schedules): :param cancel_schedules: fixture used to cancel the schedule at the end of test so that other tests can use the same device and time slot """ - print ("\n**** test_schedule_premept_active_task ****") + print ("\n**** test_schedule_preempt_active_task ****") # used by cancel_schedules agentid = 'new_agent' taskid = 'task_high_priority2' @@ -641,7 +629,6 @@ def test_schedule_premept_active_task(publish_agent, cancel_schedules): assert call_args1[4]['type'] == 'CANCEL_SCHEDULE' assert cancel_header['taskID'] == 'task_low_priority2' - # assert cancel_message['data']['agentID'] == agentid assert cancel_message['data']['taskID'] == taskid assert cancel_message['result'] == 'PREEMPTED' @@ -649,10 +636,9 @@ def test_schedule_premept_active_task(publish_agent, cancel_schedules): @pytest.mark.actuator @pytest.mark.xfail(reason="Request ids are now ignored.") # This test checks to see if a requestid is no longer valid. -# Since requestids are always vip identities and only one agent +# Since request ids are always vip identities and only one agent # is scheduling devices the expected lock error is not raised. -def test_schedule_premept_active_task_gracetime(publish_agent, - cancel_schedules): +def test_schedule_preempt_active_task_gracetime(publish_agent, cancel_schedules): """ Test error response for schedule request. Test schedule preemption of a actively running task with priority LOW by @@ -665,13 +651,12 @@ def test_schedule_premept_active_task_gracetime(publish_agent, :param cancel_schedules: fixture used to cancel the schedule at the end of test so that other tests can use the same device and time slot """ - print ("\n**** test_schedule_premept_active_task_gracetime ****") + print("\n**** test_schedule_preempt_active_task_gracetime ****") # used by cancel_schedules agentid = 'new_agent' taskid = 'task_high_priority3' cancel_schedules.append({'agentid': agentid, 'taskid': taskid}) - # add low prority task as well since it won't get cancelled till - # end of grace time + # add low prority task as well since it won't get cancelled till end of grace time cancel_schedules.append( {'agentid': TEST_AGENT, 'taskid': 'task_low_priority3'}) @@ -743,17 +728,14 @@ def test_schedule_premept_active_task_gracetime(publish_agent, assert schedule_header['type'] == 'NEW_SCHEDULE' assert schedule_header['taskID'] == taskid - # assert schedule_header['requesterID'] == agentid assert schedule_message['result'] == SUCCESS assert cancel_header['taskID'] == 'task_low_priority3' - # assert cancel_message['data']['agentID'] == agentid assert cancel_message['data']['taskID'] == taskid assert cancel_message['result'] == 'PREEMPTED' - # High priority task's schedule request should succeed but it should not - # be able to start write to the device till active task's ( - # 'task_low_priority3') grace time is over + # High priority task's schedule request should succeed but it should not be able to start write to the device till + # active task's ( 'task_low_priority3') grace time is over try: result = publish_agent.vip.rpc.call( PLATFORM_ACTUATOR, # Target agent @@ -770,7 +752,7 @@ def test_schedule_premept_active_task_gracetime(publish_agent, @pytest.mark.actuator -def test_schedule_premept_error_active_task(publish_agent, cancel_schedules): +def test_schedule_preempt_error_active_task(publish_agent, cancel_schedules): """ Test error response for schedule request. Test schedule preemption of a actively running task with priority LOW by @@ -782,7 +764,7 @@ def test_schedule_premept_error_active_task(publish_agent, cancel_schedules): :param cancel_schedules: fixture used to cancel the schedule at the end of test so that other tests can use the same device and time slot """ - print ("\n**** test_schedule_premept_error_active_task ****") + print ("\n**** test_schedule_preempt_error_active_task ****") # used by cancel_schedules agentid = TEST_AGENT taskid = 'task_low_priority3' @@ -828,7 +810,7 @@ def test_schedule_premept_error_active_task(publish_agent, cancel_schedules): @pytest.mark.actuator -def test_schedule_premept_future_task(publish_agent, cancel_schedules): +def test_schedule_preempt_future_task(publish_agent, cancel_schedules): """ Test error response for schedule request. Test schedule preemption of a future task with priority LOW by a higher @@ -839,7 +821,7 @@ def test_schedule_premept_future_task(publish_agent, cancel_schedules): :param cancel_schedules: fixture used to cancel the schedule at the end of test so that other tests can use the same device and time slot """ - print ("\n**** test_schedule_premept_future_task ****") + print ("\n**** test_schedule_preempt_future_task ****") # used by cancel_schedules agentid = 'new_agent' taskid = 'task_high_priority4' @@ -1110,7 +1092,7 @@ def test_get_success(publish_agent, cancel_schedules): :param cancel_schedules: fixture used to cancel the schedule at the end of test so that other tests can use the same device and time slot """ - print ("\n**** test_get_value_success ****") + print("\n**** test_get_value_success ****") agentid = TEST_AGENT taskid = 'task_set_and_get' cancel_schedules.append({'agentid': agentid, 'taskid': taskid}) @@ -1150,6 +1132,7 @@ def test_get_success(publish_agent, cancel_schedules): print(result) assert result == 1.0 + @pytest.mark.actuator def test_get_success_with_point(publish_agent, cancel_schedules): """ @@ -1161,7 +1144,7 @@ def test_get_success_with_point(publish_agent, cancel_schedules): :param cancel_schedules: fixture used to cancel the schedule at the end of test so that other tests can use the same device and time slot """ - print ("\n**** test_get_value_success ****") + print("\n**** test_get_value_success ****") agentid = TEST_AGENT taskid = 'task_set_and_get' cancel_schedules.append({'agentid': agentid, 'taskid': taskid}) @@ -1188,7 +1171,7 @@ def test_get_success_with_point(publish_agent, cancel_schedules): 'set_point', # Method agentid, # Requestor 'fakedriver1', 1.0, point='SampleWritableFloat1', # Point to set - # New value + # New value ).get(timeout=10) assert result == 1.0 @@ -1216,8 +1199,7 @@ def test_get_error_invalid_point(publish_agent): PLATFORM_ACTUATOR, # Target agent 'get_point', # Method 'fakedriver1/SampleWritableFloat123').get(timeout=10) - pytest.fail('Expecting RemoteError for accessing invalid point. ' - 'Code returned {}'.format(result)) + pytest.fail('Expecting RemoteError for accessing invalid point. Code returned {}'.format(result)) except RemoteError as e: assert e.message.find( 'Point not configured on device: SampleWritableFloat123') != -1 @@ -1228,13 +1210,13 @@ def test_set_value_float(publish_agent, cancel_schedules, revert_devices): """ Test setting a float value of a point through rpc Expected result = value of the actuation point - :param publish_agent: fixture invoked to setup all agents necessary and returns an instance of Agent object used for publishing :param cancel_schedules: fixture used to cancel the schedule at the end of test so that other tests can use the same device and time slot + :param revert_devices: list of devices to revert during test """ - print ("\n**** test_set_float_value ****") + print("\n**** test_set_float_value ****") taskid = 'task_set_float_value' agentid = TEST_AGENT device = 'fakedriver0' @@ -1272,7 +1254,6 @@ def test_revert_point(publish_agent, cancel_schedules): """ Test setting a float value of a point through rpc Expected result = value of the actuation point - :param publish_agent: fixture invoked to setup all agents necessary and returns an instance of Agent object used for publishing :param cancel_schedules: fixture used to cancel the schedule at the end @@ -1331,6 +1312,7 @@ def test_revert_point(publish_agent, cancel_schedules): # Value taken from fake_unit_testing.csv assert result == approx(initial_value) + @pytest.mark.actuator def test_revert_point_with_point(publish_agent, cancel_schedules): """ @@ -1395,6 +1377,7 @@ def test_revert_point_with_point(publish_agent, cancel_schedules): # Value taken from fake_unit_testing.csv assert result == approx(initial_value) + @pytest.mark.actuator def test_revert_device(publish_agent, cancel_schedules): """ @@ -1471,7 +1454,7 @@ def test_set_error_array(publish_agent, cancel_schedules): :param cancel_schedules: fixture used to cancel the schedule at the end of test so that other tests can use the same device and time slot """ - print ("\n**** test_set_error_array ****") + print("\n**** test_set_error_array ****") # set agentid and task id for cancel_schedules fixture agentid = TEST_AGENT taskid = 'task_set_float_array_value' @@ -1505,6 +1488,7 @@ def test_set_error_array(publish_agent, cancel_schedules): except RemoteError as e: assert "TypeError" in e.message + @pytest.mark.actuator def test_set_lock_error(publish_agent): """ @@ -1631,10 +1615,10 @@ def test_get_multiple_points(publish_agent, cancel_schedules): ['fakedriver0/SampleWritableFloat1', 'fakedriver1/SampleWritableFloat1']).get(timeout=10) - assert results == {'fakedriver0/SampleWritableFloat1': 10.0, - 'fakedriver1/SampleWritableFloat1': 1.0} + assert results == {'fakedriver0/SampleWritableFloat1': 10.0, 'fakedriver1/SampleWritableFloat1': 1.0} assert errors == {} + @pytest.mark.actuator def test_get_multiple_points_separate_pointname(publish_agent, cancel_schedules): results, errors = publish_agent.vip.rpc.call( @@ -1643,8 +1627,7 @@ def test_get_multiple_points_separate_pointname(publish_agent, cancel_schedules) [['fakedriver0', 'SampleWritableFloat1'], ['fakedriver1', 'SampleWritableFloat1']]).get(timeout=10) - assert results == {'fakedriver0/SampleWritableFloat1': 10.0, - 'fakedriver1/SampleWritableFloat1': 1.0} + assert results == {'fakedriver0/SampleWritableFloat1': 10.0, 'fakedriver1/SampleWritableFloat1': 1.0} assert errors == {} @@ -1656,14 +1639,13 @@ def test_get_multiple_captures_errors(publish_agent, cancel_schedules): ['fakedriver0/nonexistentpoint']).get(timeout=10) assert results == {} - assert errors['fakedriver0/nonexistentpoint'] == "DriverInterfaceError('Point not configured on device: nonexistentpoint',)" + assert errors['fakedriver0/nonexistentpoint'] == \ + "DriverInterfaceError('Point not configured on device: nonexistentpoint',)" + @pytest.mark.actuator def test_get_multiple_captures_errors_invalid_point(publish_agent, cancel_schedules): - results, errors = publish_agent.vip.rpc.call( - 'platform.actuator', - 'get_multiple_points', - [42]).get(timeout=10) + results, errors = publish_agent.vip.rpc.call('platform.actuator', 'get_multiple_points', [42]).get(timeout=10) assert results == {} assert errors['42'] == "ValueError('Invalid topic: 42',)" @@ -1713,6 +1695,7 @@ def test_set_multiple_points(publish_agent, cancel_schedules): assert result == {} + @pytest.mark.actuator def test_set_multiple_points_separate_pointname(publish_agent, cancel_schedules): agentid = TEST_AGENT @@ -1815,9 +1798,7 @@ def test_set_multiple_captures_errors(publish_agent, cancel_schedules): @pytest.mark.actuator def test_scrape_all(publish_agent, cancel_schedules): - result = publish_agent.vip.rpc.call('platform.actuator', - 'scrape_all', - 'fakedriver0').get(timeout=10) + result = publish_agent.vip.rpc.call('platform.actuator', 'scrape_all', 'fakedriver0').get(timeout=10) assert type(result) is dict assert len(result) == 13 @@ -1833,11 +1814,12 @@ def test_set_value_no_lock(publish_agent, volttron_instance): """ alternate_actuator_vip_id = "my_actuator" - #Use actuator that allows write with no lock. + # Use actuator that allows write with no lock. my_actuator_uuid = volttron_instance.install_agent( agent_dir=get_services_core("ActuatorAgent"), config_file=get_services_core("ActuatorAgent/tests/actuator-no-lock.config"), start=True, vip_identity=alternate_actuator_vip_id) + agentid = "" try: agentid = TEST_AGENT @@ -1873,11 +1855,12 @@ def test_set_value_no_lock_failure(publish_agent, volttron_instance): """ alternate_actuator_vip_id = "my_actuator" - #Use actuator that allows write with no lock. + # Use actuator that allows write with no lock. my_actuator_uuid = volttron_instance.install_agent( agent_dir=get_services_core("ActuatorAgent"), config_file=get_services_core("ActuatorAgent/tests/actuator-no-lock.config"), start=True, vip_identity=alternate_actuator_vip_id) + publish_agent2 = None try: agentid2 = "test-agent2" taskid = "test-task" @@ -1902,7 +1885,7 @@ def test_set_value_no_lock_failure(publish_agent, volttron_instance): agentid = TEST_AGENT with pytest.raises(RemoteError): - result = publish_agent.vip.rpc.call( + publish_agent.vip.rpc.call( alternate_actuator_vip_id, # Target agent 'set_point', # Method agentid, # Requestor @@ -1925,18 +1908,16 @@ def test_set_value_no_lock_failure(publish_agent, volttron_instance): @pytest.mark.actuator -def test_set_value_float_failure(publish_agent): +def test_set_value_float_failure(publish_agent, cancel_schedules): """ Test setting a float value of a point through rpc Expected result = value of the actuation point - :param publish_agent: fixture invoked to setup all agents necessary and returns an instance of Agent object used for publishing :param cancel_schedules: fixture used to cancel the schedule at the end of test so that other tests can use the same device and time slot """ print ("\n**** test_set_float_value ****") - taskid = 'task_set_float_value' agentid = TEST_AGENT with pytest.raises(RemoteError): @@ -1948,3 +1929,20 @@ def test_set_value_float_failure(publish_agent): 2.5 # New value ).get(timeout=10) pytest.fail("Expecting remote error.") + + +@pytest.mark.actuator +def test_actuator_default_config(volttron_instance, publish_agent): + """ + Test the default configuration file included with the agent + """ + config_path = os.path.join(get_services_core("ActuatorAgent"), "config") + with open(config_path, "r") as config_file: + config_json = json.load(config_file) + assert isinstance(config_json, dict) + volttron_instance.install_agent( + agent_dir=get_services_core("ActuatorAgent"), + config_file=config_json, + start=True, + vip_identity="health_test") + assert publish_agent.vip.rpc.call("health_test", "health.get_status").get(timeout=10).get('status') == STATUS_GOOD diff --git a/services/core/ActuatorAgent/tests/test_actuator_rpc_unit.py b/services/core/ActuatorAgent/tests/test_actuator_rpc_unit.py index 3a33ee4654..38489bf6f3 100644 --- a/services/core/ActuatorAgent/tests/test_actuator_rpc_unit.py +++ b/services/core/ActuatorAgent/tests/test_actuator_rpc_unit.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- {{{ # vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et: # -# Copyright 2019, Battelle Memorial Institute. +# Copyright 2020, Battelle Memorial Institute. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -52,6 +52,7 @@ from volttrontesting.utils.utils import AgentMock from volttron.platform.vip.agent import Agent +pytestmark = [pytest.mark.actuator_unit, pytest.mark.unit] PRIORITY_LOW = "LOW" SUCCESS = "SUCCESS" @@ -66,7 +67,6 @@ ActuatorAgent.__bases__ = (AgentMock.imitate(Agent, Agent()),) -@pytest.mark.actuator_unit @pytest.mark.parametrize("topic, point", [("path/topic", None), ("another/path/to/topic", 42)]) def test_get_point_should_succeed(topic, point): with get_actuator_agent(vip_rpc_call_res=MockedAsyncResult(10.0)) as actuator_agent: @@ -76,7 +76,6 @@ def test_get_point_should_succeed(topic, point): assert result is not None -@pytest.mark.actuator_unit @pytest.mark.parametrize( "point, device_state", [ @@ -101,7 +100,6 @@ def test_set_point_should_succeed(point, device_state): assert result is not None -@pytest.mark.actuator_unit @pytest.mark.parametrize("rpc_peer", [None, 42, []]) def test_set_point_should_raise_type_error(rpc_peer): with pytest.raises(TypeError, match="Agent id must be a nonempty string"): @@ -114,7 +112,6 @@ def test_set_point_should_raise_type_error(rpc_peer): actuator_agent.set_point(requester_id, topic, value, point=point) -@pytest.mark.actuator_unit def test_set_point_should_raise_lock_error_on_non_matching_device(): with pytest.raises(LockError): requester_id = "requester-id-1" @@ -125,7 +122,6 @@ def test_set_point_should_raise_lock_error_on_non_matching_device(): actuator_agent.set_point(requester_id, topic, value) -@pytest.mark.actuator_unit def test_scrape_all_should_succeed(): with get_actuator_agent(vip_rpc_call_res=MockedAsyncResult({})) as actuator_agent: topic = "whan/that/aprille" @@ -136,7 +132,6 @@ def test_scrape_all_should_succeed(): -@pytest.mark.actuator_unit @pytest.mark.parametrize( "topics", [ @@ -156,7 +151,6 @@ def test_get_multiple_points_should_succeed(topics): assert len(errors) == 0 -@pytest.mark.actuator_unit @pytest.mark.parametrize("invalid_topics", [[(123,)], [(None)], [[123]], [[None]]]) def test_get_multiple_points_should_return_errors(invalid_topics): with get_actuator_agent() as actuator_agent: @@ -168,7 +162,6 @@ def test_get_multiple_points_should_return_errors(invalid_topics): assert len(errors) == 1 -@pytest.mark.actuator_unit @pytest.mark.parametrize( "topic_values, device_state", [ @@ -186,7 +179,6 @@ def test_get_multiple_points_should_return_errors(invalid_topics): ), ], ) -@pytest.mark.actuator_unit def test_set_multiple_points_should_succeed(topic_values, device_state): requester_id = "requester-id-1" mocked_rpc_call_res = MockedAsyncResult(({})) @@ -197,7 +189,6 @@ def test_set_multiple_points_should_succeed(topic_values, device_state): assert result == {} -@pytest.mark.actuator_unit @pytest.mark.parametrize("invalid_topic_values", [[(None,)], [(1234,)]]) def test_set_multiple_points_should_raise_value_error(invalid_topic_values): with pytest.raises(ValueError): @@ -207,7 +198,6 @@ def test_set_multiple_points_should_raise_value_error(invalid_topic_values): actuator_agent.set_multiple_points("request-id-1", invalid_topic_values) -@pytest.mark.actuator_unit def test_set_multiple_points_should_raise_lock_error_on_empty_devices(): with pytest.raises(LockError): requester_id = "requester-id-1" @@ -217,7 +207,6 @@ def test_set_multiple_points_should_raise_lock_error_on_empty_devices(): actuator_agent.set_multiple_points("request-id-1", topic_values) -@pytest.mark.actuator_unit def test_set_multiple_points_should_raise_lock_error_on_non_matching_requester(): with pytest.raises(LockError): requester_id = "wrong-requester" @@ -231,7 +220,6 @@ def test_set_multiple_points_should_raise_lock_error_on_non_matching_requester() actuator_agent.set_multiple_points("request-id-1", topic_values) -@pytest.mark.actuator_unit @pytest.mark.parametrize("point", [None, "foobarpoint"]) def test_revert_point_should_raise_lock_error_on_empty_devices(point): with pytest.raises(LockError): @@ -242,7 +230,6 @@ def test_revert_point_should_raise_lock_error_on_empty_devices(point): actuator_agent.revert_point(requester_id, topic, point=point) -@pytest.mark.actuator_unit @pytest.mark.parametrize("point", [None, "foobarpoint"]) def test_revert_point_should_raise_lock_error_on_non_matching_requester(point): with pytest.raises(LockError): @@ -257,7 +244,6 @@ def test_revert_point_should_raise_lock_error_on_non_matching_requester(point): actuator_agent.revert_point(requester_id, topic, point=point) -@pytest.mark.actuator_unit def test_revert_device_should_raise_lock_error_on_empty_devices(): with pytest.raises(LockError): requester_id = "request-id-1" @@ -267,7 +253,6 @@ def test_revert_device_should_raise_lock_error_on_empty_devices(): actuator_agent.revert_device(requester_id, topic) -@pytest.mark.actuator_unit def test_revert_device_should_raise_lock_error_on_non_matching_requester(): with pytest.raises(LockError): device_state = { @@ -281,7 +266,6 @@ def test_revert_device_should_raise_lock_error_on_non_matching_requester(): actuator_agent.revert_device(requester_id, topic) -@pytest.mark.actuator_unit def test_request_new_schedule_should_succeed(): with get_actuator_agent() as actuator_agent: result = actuator_agent.request_new_schedule(REQUESTER_ID, TASK_ID, @@ -290,7 +274,6 @@ def test_request_new_schedule_should_succeed(): assert result["result"] == SUCCESS -@pytest.mark.actuator_unit def test_request_new_schedule_should_succeed_when_stop_start_times_overlap(): start = str(datetime.now()) end = str(datetime.now() + timedelta(seconds=1)) @@ -304,7 +287,6 @@ def test_request_new_schedule_should_succeed_when_stop_start_times_overlap(): assert result["result"] == SUCCESS -@pytest.mark.actuator_unit @pytest.mark.parametrize( "task_id, expected_info", [ @@ -325,7 +307,6 @@ def test_request_new_schedule_should_fail_on_invalid_taskid(task_id, expected_in assert result["info"] == expected_info -@pytest.mark.actuator_unit @pytest.mark.parametrize( "invalid_priority, expected_info", [("LOW2", "INVALID_PRIORITY"), (None, "MISSING_PRIORITY")], @@ -341,7 +322,6 @@ def test_request_new_schedule_should_fail_on_invalid_priority(invalid_priority, assert result["info"] == expected_info -@pytest.mark.actuator_unit @pytest.mark.parametrize( "time_slot_request, expected_info", [ @@ -372,7 +352,6 @@ def test_request_new_schedule_should_fail_invalid_time_slot_requests(time_slot_r assert result["info"] == expected_info -@pytest.mark.actuator_unit def test_request_cancel_schedule_should_succeed_happy_path(): true_request_result = RequestResult( True, {}, "" @@ -384,7 +363,6 @@ def test_request_cancel_schedule_should_succeed_happy_path(): assert result["result"] == SUCCESS -@pytest.mark.actuator_unit def test_request_cancel_schedule_should_fail_on_invalid_task_id(): false_request_result = RequestResult( False, {}, "TASK_ID_DOES_NOT_EXIST" diff --git a/services/core/ActuatorAgent/tests/test_scheduler.py b/services/core/ActuatorAgent/tests/test_scheduler.py index 8956439d9a..271f4da056 100644 --- a/services/core/ActuatorAgent/tests/test_scheduler.py +++ b/services/core/ActuatorAgent/tests/test_scheduler.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- {{{ # vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et: # -# Copyright 2019, Battelle Memorial Institute. +# Copyright 2020, Battelle Memorial Institute. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -36,24 +36,24 @@ # under Contract DE-AC05-76RL01830 # }}} -from datetime import datetime, timedelta - -from dateutil.parser import parse import os import sys +from datetime import datetime, timedelta +from dateutil.parser import parse test_dir = os.path.dirname(os.path.realpath(__file__)) sys.path.append(test_dir + '/../actuator') -from scheduler import ScheduleManager, DeviceState, PRIORITY_HIGH, \ - PRIORITY_LOW, PRIORITY_LOW_PREEMPT +from scheduler import ScheduleManager, DeviceState, PRIORITY_HIGH, PRIORITY_LOW, PRIORITY_LOW_PREEMPT -def verify_add_task(schedule_manager, agent_id, task_id, requests, priority, - now): +test_dir = os.path.dirname(os.path.realpath(__file__)) +sys.path.append(test_dir + '/../actuator') + + +def verify_add_task(schedule_manager, agent_id, task_id, requests, priority, now): schedule_manager.get_schedule_state(now) - result = schedule_manager.request_slots(agent_id, task_id, requests, - priority, now) + result = schedule_manager.request_slots(agent_id, task_id, requests, priority, now) schedule_manager.get_schedule_state(now) schedule_next_event_time = schedule_manager.get_next_event_time(now) return result, schedule_next_event_time @@ -66,38 +66,30 @@ def test_basic(): print('Basic Test', now) sch_man = ScheduleManager(60, now=now) ag = ('Agent1', 'Task1', - (['campus/building/rtu1', parse('2013-11-27 12:00:00'), - parse('2013-11-27 13:00:00')],), + (['campus/building/rtu1', parse('2013-11-27 12:00:00'), parse('2013-11-27 13:00:00')],), PRIORITY_HIGH, now) result1, event_time1 = verify_add_task(sch_man, *ag) success, data, info_string = result1 - # success1, data1, info_string1 = result1 - assert all((success, not data, not info_string, - event_time1 == parse('2013-11-27 12:00:00'))) + assert all((success, not data, not info_string, event_time1 == parse('2013-11-27 12:00:00'))) state = sch_man.get_schedule_state(now + timedelta(minutes=30)) - assert state == { - 'campus/building/rtu1': DeviceState('Agent1', 'Task1', 3600.0)} + assert state == {'campus/building/rtu1': DeviceState('Agent1', 'Task1', 3600.0)} state = sch_man.get_schedule_state(now + timedelta(minutes=60)) - assert state == { - 'campus/building/rtu1': DeviceState('Agent1', 'Task1', 1800.0)} + assert state == {'campus/building/rtu1': DeviceState('Agent1', 'Task1', 1800.0)} def test_two_devices(): print('Basic Test: Two devices', now) sch_man = ScheduleManager(60, now=now) ag = ('Agent1', 'Task1', - (['campus/building/rtu1', parse('2013-11-27 12:00:00'), - parse('2013-11-27 13:00:00')], - ['campus/building/rtu2', parse('2013-11-27 12:00:00'), - parse('2013-11-27 13:00:00')]), + (['campus/building/rtu1', parse('2013-11-27 12:00:00'), parse('2013-11-27 13:00:00')], + ['campus/building/rtu2', parse('2013-11-27 12:00:00'), parse('2013-11-27 13:00:00')]), PRIORITY_HIGH, now) result1, event_time1 = verify_add_task(sch_man, *ag) success, data, info_string = result1 - assert all((success, not data, not info_string, - event_time1 == parse('2013-11-27 12:00:00'))) + assert all((success, not data, not info_string, event_time1 == parse('2013-11-27 12:00:00'))) state = sch_man.get_schedule_state(now + timedelta(minutes=30)) assert state == { @@ -113,24 +105,19 @@ def test_two_agents_two_devices(): print('Test requests: Two agents different devices', now) sch_man = ScheduleManager(60, now=now) ag1 = ('Agent1', 'Task1', - (['campus/building/rtu1', parse('2013-11-27 12:00:00'), - parse('2013-11-27 12:30:00')],), + (['campus/building/rtu1', parse('2013-11-27 12:00:00'), parse('2013-11-27 12:30:00')],), PRIORITY_HIGH, now) ag2 = ('Agent2', 'Task2', - (['campus/building/rtu2', parse('2013-11-27 12:00:00'), - parse('2013-11-27 13:00:00')],), + (['campus/building/rtu2', parse('2013-11-27 12:00:00'), parse('2013-11-27 13:00:00')],), PRIORITY_HIGH, now) result1, event_time1 = verify_add_task(sch_man, *ag1) success, data, info_string = result1 - # success1, data1, info_string1 = result1 - assert all((success, not data, not info_string, - event_time1 == parse('2013-11-27 12:00:00'))) + assert all((success, not data, not info_string, event_time1 == parse('2013-11-27 12:00:00'))) result2, event_time2 = verify_add_task(sch_man, *ag2) success2, data2, info_string2 = result2 - assert all((success2, not data2, not info_string2, - event_time2 == parse('2013-11-27 12:00:00'))) + assert all((success2, not data2, not info_string2, event_time2 == parse('2013-11-27 12:00:00'))) state = sch_man.get_schedule_state(now + timedelta(minutes=30)) assert state == { @@ -145,50 +132,41 @@ def test_touching_requests(): print('Test touching requests: Two agents', now) sch_man = ScheduleManager(60, now=now) ag1 = ('Agent1', 'Task1', - (['campus/building/rtu1', parse('2013-11-27 12:00:00'), - parse('2013-11-27 12:30:00')],), + (['campus/building/rtu1', parse('2013-11-27 12:00:00'), parse('2013-11-27 12:30:00')],), PRIORITY_HIGH, now) ag2 = ('Agent2', 'Task2', - (['campus/building/rtu1', parse('2013-11-27 12:30:00'), - parse('2013-11-27 13:00:00')],), + (['campus/building/rtu1', parse('2013-11-27 12:30:00'), parse('2013-11-27 13:00:00')],), PRIORITY_HIGH, now) result1, event_time1 = verify_add_task(sch_man, *ag1) success1, data1, info_string1 = result1 - assert all((success1, not data1, not info_string1, - event_time1 == parse('2013-11-27 12:00:00'))) + assert all((success1, not data1, not info_string1, event_time1 == parse('2013-11-27 12:00:00'))) result2, event_time2 = verify_add_task(sch_man, *ag2) success2, data2, info_string2 = result2 - assert all((success2, not data2, not info_string2, - event_time2 == parse('2013-11-27 12:00:00'))) + assert all((success2, not data2, not info_string2, event_time2 == parse('2013-11-27 12:00:00'))) state = sch_man.get_schedule_state(now + timedelta(minutes=30)) - assert state == { - 'campus/building/rtu1': DeviceState('Agent1', 'Task1', 1800.0)} + assert state == {'campus/building/rtu1': DeviceState('Agent1', 'Task1', 1800.0)} state = sch_man.get_schedule_state(now + timedelta(minutes=60)) - assert state == { - 'campus/building/rtu1': DeviceState('Agent2', 'Task2', 1800.0)} + assert state == {'campus/building/rtu1': DeviceState('Agent2', 'Task2', 1800.0)} def test_schedule_self_conflict(): print('Testing self conflicting schedule', now) sch_man = ScheduleManager(60, now=now) ag = ('Agent1', 'Task1', - (['campus/building/rtu1', parse('2013-11-27 12:00:00'), - parse('2013-11-27 12:45:00')], - ['campus/building/rtu1', parse('2013-11-27 12:30:00'), - parse('2013-11-27 13:00:00')]), + (['campus/building/rtu1', parse('2013-11-27 12:00:00'), parse('2013-11-27 12:45:00')], + ['campus/building/rtu1', parse('2013-11-27 12:30:00'), parse('2013-11-27 13:00:00')]), PRIORITY_HIGH, now) result1, event_time1 = verify_add_task(sch_man, *ag) success1, data1, info_string1 = result1 - print (not success1) - print (data1 == {}) - print (info_string1.startswith('REQUEST_CONFLICTS_WITH_SELF')) - assert all((not success1, data1 == {}, - info_string1.startswith('REQUEST_CONFLICTS_WITH_SELF'))) + print(not success1) + print(data1 == {}) + print(info_string1.startswith('REQUEST_CONFLICTS_WITH_SELF')) + assert all((not success1, data1 == {}, info_string1.startswith('REQUEST_CONFLICTS_WITH_SELF'))) def test_malformed_schedule(): @@ -201,23 +179,20 @@ def test_malformed_schedule(): result1, event_time1 = verify_add_task(sch_man, *ag) success1, data1, info_string1 = result1 - assert all((not success1, data1 == {}, - info_string1.startswith('MALFORMED_REQUEST'))) + assert all((not success1, data1 == {}, info_string1.startswith('MALFORMED_REQUEST'))) def test_malformed_schdeule_bad_timestr(): print('Testing malformed schedule: Bad time strings', now) sch_man = ScheduleManager(60, now=now) ag = ('Agent1', 'Task1', - (['campus/building/rtu1', 'fdhkdfyug', - 'Twinkle, twinkle, little bat...'],), + (['campus/building/rtu1', 'fdhkdfyug', 'Twinkle, twinkle, little bat...'],), PRIORITY_HIGH, now) result1, event_time1 = verify_add_task(sch_man, *ag) success1, data1, info_string1 = result1 - assert all((not success1, data1 == {}, - info_string1.startswith('MALFORMED_REQUEST'))) + assert all((not success1, data1 == {}, info_string1.startswith('MALFORMED_REQUEST'))) def test_malformed_bad_device(): @@ -229,54 +204,46 @@ def test_malformed_bad_device(): now) result1, event_time1 = verify_add_task(sch_man, *ag) success1, data1, info_string1 = result1 - assert all((not success1, data1 == {}, - info_string1.startswith('MALFORMED_REQUEST'))) + assert all((not success1, data1 == {}, info_string1.startswith('MALFORMED_REQUEST'))) def test_schedule_conflict(): print('Test conflicting requests: Two agents', now) sch_man = ScheduleManager(60, now=now) ag1 = ('Agent1', 'Task1', - (['campus/building/rtu1', parse('2013-11-27 12:00:00'), - parse('2013-11-27 12:35:00')],), + (['campus/building/rtu1', parse('2013-11-27 12:00:00'), parse('2013-11-27 12:35:00')],), PRIORITY_HIGH, now) ag2 = ('Agent2', 'Task2', - (['campus/building/rtu1', parse('2013-11-27 12:30:00'), - parse('2013-11-27 13:00:00')],), + (['campus/building/rtu1', parse('2013-11-27 12:30:00'), parse('2013-11-27 13:00:00')],), PRIORITY_HIGH, now) result1, event_time1 = verify_add_task(sch_man, *ag1) success1, data1, info_string1 = result1 - assert all((success1, not data1, not info_string1, - event_time1 == parse('2013-11-27 12:00:00'))) + assert all((success1, not data1, not info_string1, event_time1 == parse('2013-11-27 12:00:00'))) result2, event_time2 = verify_add_task(sch_man, *ag2) success2, data2, info_string2 = result2 conflicts2 = data2 assert not success2 assert conflicts2 == {'Agent1': {'Task1': [ - ['campus/building/rtu1', '2013-11-27 12:00:00', - '2013-11-27 12:35:00']]}} + ['campus/building/rtu1', '2013-11-27 12:00:00', '2013-11-27 12:35:00']]}} def test_conflict_override(): print('Test conflicting requests: Agent2 overrides Agent1', now) sch_man = ScheduleManager(60, now=now) ag1 = ('Agent1', 'Task1', - (['campus/building/rtu1', parse('2013-11-27 12:00:00'), - parse('2013-11-27 12:35:00')],), + (['campus/building/rtu1', parse('2013-11-27 12:00:00'), parse('2013-11-27 12:35:00')],), PRIORITY_LOW, now) ag2 = ('Agent2', 'Task2', - (['campus/building/rtu1', parse('2013-11-27 12:30:00'), - parse('2013-11-27 13:00:00')],), + (['campus/building/rtu1', parse('2013-11-27 12:30:00'), parse('2013-11-27 13:00:00')],), PRIORITY_HIGH, now) result1, event_time1 = verify_add_task(sch_man, *ag1) success1, data1, info_string1 = result1 - assert all((success1, not data1, not info_string1, - event_time1 == parse('2013-11-27 12:00:00'))) + assert all((success1, not data1, not info_string1, event_time1 == parse('2013-11-27 12:00:00'))) result2, event_time2 = verify_add_task(sch_man, *ag2) success2, data2, info_string2 = result2 assert success2 @@ -286,52 +253,43 @@ def test_conflict_override(): def test_conflict_override_fail_on_running_agent(): - print( - 'Test conflicting requests: Agent2 fails to override running Agent1', - now) + print('Test conflicting requests: Agent2 fails to override running Agent1', now) sch_man = ScheduleManager(60, now=now) ag1 = ('Agent1', 'Task1', - (['campus/building/rtu1', parse('2013-11-27 12:00:00'), - parse('2013-11-27 12:35:00')],), + (['campus/building/rtu1', parse('2013-11-27 12:00:00'), parse('2013-11-27 12:35:00')],), PRIORITY_LOW, now) ag2 = ('Agent2', 'Task2', - (['campus/building/rtu1', parse('2013-11-27 12:30:00'), - parse('2013-11-27 13:00:00')],), + (['campus/building/rtu1', parse('2013-11-27 12:30:00'), parse('2013-11-27 13:00:00')],), PRIORITY_HIGH, now + timedelta(minutes=45)) result1, event_time1 = verify_add_task(sch_man, *ag1) success1, data1, info_string1 = result1 - assert all((success1, not data1, not info_string1, - event_time1 == parse('2013-11-27 12:00:00'))) + assert all((success1, not data1, not info_string1, event_time1 == parse('2013-11-27 12:00:00'))) result2, event_time2 = verify_add_task(sch_man, *ag2) success2, data2, info_string2 = result2 conflicts2 = data2 assert not success2 assert conflicts2 == {'Agent1': {'Task1': [ - ['campus/building/rtu1', '2013-11-27 12:00:00', - '2013-11-27 12:35:00']]}} + ['campus/building/rtu1', '2013-11-27 12:00:00', '2013-11-27 12:35:00']]}} def test_conflict_override_success_running_agent(): print('Test conflicting requests: Agent2 overrides running Agent1', now) sch_man = ScheduleManager(60, now=now) ag1 = ('Agent1', 'Task1', - (['campus/building/rtu1', parse('2013-11-27 12:00:00'), - parse('2013-11-27 12:35:00')],), + (['campus/building/rtu1', parse('2013-11-27 12:00:00'), parse('2013-11-27 12:35:00')],), PRIORITY_LOW_PREEMPT, now) now2 = now + timedelta(minutes=45) ag2 = ('Agent2', 'Task2', - (['campus/building/rtu1', parse('2013-11-27 12:05:00'), - parse('2013-11-27 13:00:00')],), + (['campus/building/rtu1', parse('2013-11-27 12:05:00'), parse('2013-11-27 13:00:00')],), PRIORITY_HIGH, now2) result1, event_time1 = verify_add_task(sch_man, *ag1) success1, data1, info_string1 = result1 - assert all((success1, not data1, not info_string1, - event_time1 == parse('2013-11-27 12:00:00'))) + assert all((success1, not data1, not info_string1, event_time1 == parse('2013-11-27 12:00:00'))) result2, event_time2 = verify_add_task(sch_man, *ag2) success2, data2, info_string2 = result2 assert success2 @@ -340,40 +298,31 @@ def test_conflict_override_success_running_agent(): assert event_time2 == parse('2013-11-27 12:16:00') state = sch_man.get_schedule_state(now2 + timedelta(seconds=30)) - assert state == { - 'campus/building/rtu1': DeviceState('Agent1', 'Task1', 30.0)} + assert state == {'campus/building/rtu1': DeviceState('Agent1', 'Task1', 30.0)} state = sch_man.get_schedule_state(now2 + timedelta(seconds=60)) - assert state == { - 'campus/building/rtu1': DeviceState('Agent2', 'Task2', 2640.0)} + assert state == {'campus/building/rtu1': DeviceState('Agent2', 'Task2', 2640.0)} def test_conflict_override_error(): - print('Test conflicting requests: ' - 'Agent2 fails to override running Agent1 ' - 'because of non high priority.', - now) + print('Test conflicting requests: Agent2 fails to override running Agent1 because of non high priority.', now) sch_man = ScheduleManager(60, now=now) ag1 = ('Agent1', 'Task1', - (['campus/building/rtu1', parse('2013-11-27 12:00:00'), - parse('2013-11-27 12:35:00')],), + (['campus/building/rtu1', parse('2013-11-27 12:00:00'), parse('2013-11-27 12:35:00')],), PRIORITY_LOW_PREEMPT, now) ag2 = ('Agent2', 'Task2', - (['campus/building/rtu1', parse('2013-11-27 12:30:00'), - parse('2013-11-27 13:00:00')],), + (['campus/building/rtu1', parse('2013-11-27 12:30:00'), parse('2013-11-27 13:00:00')],), PRIORITY_LOW, now) result1, event_time1 = verify_add_task(sch_man, *ag1) success1, data1, info_string1 = result1 - assert all((success1, not data1, not info_string1, - event_time1 == parse('2013-11-27 12:00:00'))) + assert all((success1, not data1, not info_string1, event_time1 == parse('2013-11-27 12:00:00'))) result2, event_time2 = verify_add_task(sch_man, *ag2) success2, data2, info_string2 = result2 conflicts2 = data2 assert not success2 assert conflicts2 == {'Agent1': {'Task1': [ - ['campus/building/rtu1', '2013-11-27 12:00:00', - '2013-11-27 12:35:00']]}} + ['campus/building/rtu1', '2013-11-27 12:00:00', '2013-11-27 12:35:00']]}} def test_non_conflict_schedule(): @@ -382,24 +331,19 @@ def test_non_conflict_schedule(): now) sch_man = ScheduleManager(60, now=now) ag1 = ('Agent1', 'Task1', - (['campus/building/rtu1', parse('2013-11-27 12:00:00'), - parse('2013-11-27 12:15:00')], - ['campus/building/rtu2', parse('2013-11-27 12:00:00'), - parse('2013-11-27 13:00:00')], - ['campus/building/rtu3', parse('2013-11-27 12:45:00'), - parse('2013-11-27 13:00:00')],), + (['campus/building/rtu1', parse('2013-11-27 12:00:00'), parse('2013-11-27 12:15:00')], + ['campus/building/rtu2', parse('2013-11-27 12:00:00'), parse('2013-11-27 13:00:00')], + ['campus/building/rtu3', parse('2013-11-27 12:45:00'), parse('2013-11-27 13:00:00')],), PRIORITY_LOW_PREEMPT, now) now2 = now + timedelta(minutes=55) ag2 = ('Agent2', 'Task2', - (['campus/building/rtu1', parse('2013-11-27 12:30:00'), - parse('2013-11-27 13:00:00')],), + (['campus/building/rtu1', parse('2013-11-27 12:30:00'), parse('2013-11-27 13:00:00')],), PRIORITY_HIGH, now2) result1, event_time1 = verify_add_task(sch_man, *ag1) success1, data1, info_string1 = result1 - assert all((success1, not data1, not info_string1, - event_time1 == parse('2013-11-27 12:00:00'))) + assert all((success1, not data1, not info_string1, event_time1 == parse('2013-11-27 12:00:00'))) result2, event_time2 = verify_add_task(sch_man, *ag2) success2, data2, info_string2 = result2 assert success2 @@ -414,24 +358,19 @@ def test_conflict_override_success_running_agent2(): now) sch_man = ScheduleManager(60, now=now) ag1 = ('Agent1', 'Task1', - (['campus/building/rtu1', parse('2013-11-27 12:00:00'), - parse('2013-11-27 12:15:00')], - ['campus/building/rtu2', parse('2013-11-27 12:00:00'), - parse('2013-11-27 13:00:00')], - ['campus/building/rtu3', parse('2013-11-27 12:45:00'), - parse('2013-11-27 13:00:00')],), + (['campus/building/rtu1', parse('2013-11-27 12:00:00'), parse('2013-11-27 12:15:00')], + ['campus/building/rtu2', parse('2013-11-27 12:00:00'), parse('2013-11-27 13:00:00')], + ['campus/building/rtu3', parse('2013-11-27 12:45:00'), parse('2013-11-27 13:00:00')],), PRIORITY_LOW_PREEMPT, now) now2 = now + timedelta(minutes=55) ag2 = ('Agent2', 'Task2', - (['campus/building/rtu3', parse('2013-11-27 12:30:00'), - parse('2013-11-27 13:00:00')],), + (['campus/building/rtu3', parse('2013-11-27 12:30:00'), parse('2013-11-27 13:00:00')],), PRIORITY_HIGH, now2) result1, event_time1 = verify_add_task(sch_man, *ag1) success1, data1, info_string1 = result1 - assert all((success1, not data1, not info_string1, - event_time1 == parse('2013-11-27 12:00:00'))) + assert all((success1, not data1, not info_string1, event_time1 == parse('2013-11-27 12:00:00'))) result2, event_time2 = verify_add_task(sch_man, *ag2) success2, data2, info_string2 = result2 assert success2 diff --git a/services/core/Ambient/README.md b/services/core/Ambient/README.md new file mode 100644 index 0000000000..5b307521c3 --- /dev/null +++ b/services/core/Ambient/README.md @@ -0,0 +1,164 @@ +# Ambient Weather Agent + +The Ambient weather agent provides the ability to query for current +weather data from Ambient weather stations via the Ambient weather API. +The agent inherits features of the Volttron BaseWeatherAgent which +provides caching of recently recieved data, as well as point name +mapping and unit conversion using the standardized CF-conventions +scheme.lu + +The values from the Ambient weather station can be accessed through the +cloud platform which can be accessed at + + +Two API Keys are required for all REST API requests: + +> applicationKey - identifies the developer / application. To request an +> application key please email +> +> apiKey - grants access to past/present data for a given user\'s +> devices. A typical consumer-facing application will initially ask the +> user to create an apiKey on thier AmbientWeather.net account page +> () and paste it into the +> app. Developers for personal or in-house apps will also need to create +> an apiKey on their own account page. + +API requests are capped at 1 request/second for each user\'s apiKey and +3 requests/second per applicationKey. When this limit is exceeded, the +API will return a 429 response code. This will result in a response from +the Ambient agent containing \"weather_error\" and no weather data. + +## Ambient Endpoints + +The Ambient Weather agent provides only current weather data (all other +base weather endpoints are unimplemented, and will return a record +containing \"weather_error\" if used). + +The location format for the Ambient agent is as follows: +``` +{"location": ""} +``` + +Ambient locations are Arbitrary string identifiers given to a weather +station by the weather station owner/operator. + +This is an example response: + + 2019-12-17 15:35:56,395 (listeneragent-3.3 3103) listener.agent INFO: Peer: pubsub, Sender: platform.ambient:, Bus: , Topic: weather/poll/current/all, Headers: {'Date': '2019-12-17T23:35:56.392709+00:00', 'Content-Type': 'Content-Type', 'min_compatible_version': '3.0', 'max_compatible_version': ''}, Message: + [{'location': 'Lab Home A', + 'observation_time': '2019-12-18T07:33:00.000000+00:00', + 'weather_results': {'batt1': 1, + 'battout': 1, + 'dateutc': 1576625580000, + 'dewPointin': 39.6, + 'feelsLikein': 70.2, + 'humidity1': 1, + 'humidityin': 31, + 'macAddress': '50:F1:4A:F7:3C:C4', + 'name': 'Home A WS', + 'tempinf': 71.9, + 'tz': 'Etc/GMT'}}, + {'location': 'Lab Home B', + 'observation_time': '2019-12-18T07:33:00.000000+00:00', + 'weather_results': {'batt1': 1, + 'battout': 1, + 'dateutc': 1576625580000, + 'dewPoint1': 28.6, + 'dewPointin': 23.5, + 'feelsLike1': 35.7, + 'feelsLikein': 53.4, + 'humidity1': 75, + 'humidityin': 31, + 'macAddress': '18:93:D7:3B:89:0C', + 'name': 'Home B WS', + 'temp1f': 35.7, + 'tempinf': 53.4, + 'tz': 'Etc/GMT'}}] + +The selection of weather data points which are included may depend upon +the type of Ambient device. + +### Configuration + +The following is an example configuration for the Ambient agent. The +\"api_key\" and \"app_key\" parameters are required while all others are +optional. + +**Parameters** + +1. "api_key" - api key string provided by Ambient - this is + required and will not be provided by the VOLTTRON team. +2. "appplication_key" - application key string provided by + Ambient - this is required and will not be provided by the + VOLTTRON team. +3. "database_file" - sqlite database file for weather data + caching. Defaults to \"weather.sqlite\" in the agent\'s data + directory. +4. "max_size_gb" - maximum size of cache database. When cache + exceeds this size, data will get purged from cache till cache is + within the configured size. +5. "poll_locations" - list of locations to periodically poll for + current data. +6. "poll_interval" - polling frequency or the number of seconds + between each poll. + +Example configuration: + +``` +{ + "application_key" : "", + "api_key":"", + "poll_locations": [ + {"location": "Lab Home A"}, + {"location": "Lab Home B"} + ], + "poll_interval": 60, + "identity": "platform.ambient" +} +``` + +#### Registry Configuration + +The registry configuration file for this agent can be found in agent\'s +data directory. This configuration provides the point name mapping from +the Ambient API\'s point scheme to the CF-conventions scheme by default. +Points that do not specify \"Standard_Point_Name\" were found to not +have a logical match to any point found in the CF-Conventions. For these +points Ambient point name convention (Service_Point_Name) will be used. + + |Service_Point_Name |Standard_Point_Name | Service_Units | Standard_Units | + |--------------------|------------------------------|----------------|-----------------| + |feelsLike |apparent_temperature | degF | | + |dewPoint |dew_point_temperature | degF | | + |dewPointin |dew_point_temperature_indoor | degF | | + |soiltempf | | degF | | + |soilhum | | | | + |uv |ultraviolet_index | | | + +## Running Ambient Agent Tests + +The following instructions can be used to run PyTests for the Ambient +agent. + +1\. Set up the test file - test_ambient_agent.py is the PyTest file for +the ambient agent. The test file features a few variables at the top of +the tests. These will need to be filled in by the runner of the Ambient +agent tests. The LOCATIONS variable specifies a list of \"locations\" of +Ambient devices. The required format is a list of dictionaries of the +form {\"location\": \}. Locations are +determined by the user when configuring a weather station for the +Ambient service using the Ambient app. For more information about the +Ambient API, visit + +2\. Set up the test environment - The tests are intended to be run from +the Volttron root directory using the Volttron environment. Setting the +environment variable, DEBUG_MODE=True or DEBUG=1 will preserve the test +setup and can be useful for debugging purposes. When testing from +pycharm set the Working Directory value to be the root of volttron +source/checkout directory. + +Example command line: + +``` +(volttron) @:~/volttron$ pytest -s ~/house-deployment/Ambient +``` diff --git a/services/core/Ambient/README.rst b/services/core/Ambient/README.rst deleted file mode 100644 index 6d20b34d93..0000000000 --- a/services/core/Ambient/README.rst +++ /dev/null @@ -1,149 +0,0 @@ -.. _Ambient Weather Agent: - -===================== -Ambient Weather Agent -===================== - -The Ambient weather agent provides the ability to query for current weather data from Ambient weather stations via the -Ambient weather API. The agent inherits features of the Volttron BaseWeatherAgent which provides caching of recently -recieved data, as well as point name mapping and unit conversion using the standardized CF-conventions scheme. - -The values from the Ambient weather station can be accessed through the cloud platform which can be accessed at -https://dashboard.ambientweather.net/dashboard - -Two API Keys are required for all REST API requests: - - applicationKey - identifies the developer / application. To request an application key please email - support@ambientweather.com - - apiKey - grants access to past/present data for a given user's devices. A typical consumer-facing application will - initially ask the user to create an apiKey on thier AmbientWeather.net account page - (https://dashboard.ambientweather.net/account) and paste it into the app. Developers for personal or in-house apps - will also need to create an apiKey on their own account page. - -API requests are capped at 1 request/second for each user's apiKey and 3 requests/second per applicationKey. When this -limit is exceeded, the API will return a 429 response code. This will result in a response from the Ambient agent -containing "weather_error" and no weather data. - ------------------ -Ambient Endpoints ------------------ - -The Ambient Weather agent provides only current weather data (all other base weather endpoints are unimplemented, and -will return a record containing "weather_error" if used). - -The location format for the Ambient agent is as follows: - - {"location": ""} - -Ambient locations are Arbitrary string identifiers given to a weather station by the weather station owner/operator. - -This is an example response: - -:: - - 2019-12-17 15:35:56,395 (listeneragent-3.3 3103) listener.agent INFO: Peer: pubsub, Sender: platform.ambient:, Bus: , Topic: weather/poll/current/all, Headers: {'Date': '2019-12-17T23:35:56.392709+00:00', 'Content-Type': 'Content-Type', 'min_compatible_version': '3.0', 'max_compatible_version': ''}, Message: - [{'location': 'Lab Home A', - 'observation_time': '2019-12-18T07:33:00.000000+00:00', - 'weather_results': {'batt1': 1, - 'battout': 1, - 'dateutc': 1576625580000, - 'dewPointin': 39.6, - 'feelsLikein': 70.2, - 'humidity1': 1, - 'humidityin': 31, - 'macAddress': '50:F1:4A:F7:3C:C4', - 'name': 'Home A WS', - 'tempinf': 71.9, - 'tz': 'Etc/GMT'}}, - {'location': 'Lab Home B', - 'observation_time': '2019-12-18T07:33:00.000000+00:00', - 'weather_results': {'batt1': 1, - 'battout': 1, - 'dateutc': 1576625580000, - 'dewPoint1': 28.6, - 'dewPointin': 23.5, - 'feelsLike1': 35.7, - 'feelsLikein': 53.4, - 'humidity1': 75, - 'humidityin': 31, - 'macAddress': '18:93:D7:3B:89:0C', - 'name': 'Home B WS', - 'temp1f': 35.7, - 'tempinf': 53.4, - 'tz': 'Etc/GMT'}}] - -The selection of weather data points which are included may depend upon the type of Ambient device. - -************* -Configuration -************* - -The following is an example configuration for the Ambient agent. The "api_key" -and "app_key" parameters are required while all others are optional. - -**Parameters** - 1. "api_key" - api key string provided by Ambient - this is required and will not be provided by the VOLTTRON team. - 2. "appplication_key" - application key string provided by Ambient - this is required and will not be provided by the VOLTTRON team. - 3. "database_file" - sqlite database file for weather data caching. Defaults to "weather.sqlite" in the agent's data directory. - 4. "max_size_gb" - maximum size of cache database. When cache exceeds this size, data will get purged from cache till - cache is within the configured size. - 5. "poll_locations - list of locations to periodically poll for current data. - 6. "poll_interval" - polling frequency or the number of seconds between each poll. - -Example configuration: - -.. code-block:: json - - { - "application_key" : "", - "api_key":"", - "poll_locations": [ - {"location": "Lab Home A"}, - {"location": "Lab Home B"} - ], - "poll_interval": 60, - "identity": "platform.ambient" - } - -Registry Configuration ----------------------- -The registry configuration file for this agent can be found in agent's data -directory. This configuration provides the point name mapping from the Ambient -API's point scheme to the CF-conventions scheme by default. Points that do not -specify "Standard_Point_Name" were found to not have a logical match to any -point found in the CF-Conventions. For these points Ambient point name -convention (Service_Point_Name) will be used. - -.. csv-table:: Registry Configuration - :header: Service_Point_Name,Standard_Point_Name,Service_Units,Standard_Units - - feelsLike,apparent_temperature,degF, - dewPoint,dew_point_temperature,degF, - dewPointin,dew_point_temperature_indoor,degF, - soiltempf,,degF, - soilhum,,, - uv,ultraviolet_index,, - ---------------------------- -Running Ambient Agent Tests ---------------------------- - -The following instructions can be used to run PyTests for the Ambient agent. - -1. Set up the test file - test_ambient_agent.py is the PyTest file for the ambient agent. The test file features a few -variables at the top of the tests. These will need to be filled in by the runner of the Ambient agent tests. The LOCATIONS -variable specifies a list of "locations" of Ambient devices. The required format is a list of dictionaries of the form -{"location": }. Locations are determined by the user when configuring a weather -station for the Ambient service using the Ambient app. For more information about the Ambient API, visit https://www.ambientweather.com/api.html - -2. Set up the test environment - The tests are intended to be run from the Volttron root directory using the Volttron -environment. Setting the environment variable, DEBUG_MODE=True or DEBUG=1 will preserve the test setup and can be useful for debugging purposes. When testing from pycharm set the Working Directory value to be the root of volttron source/checkout directory. - -Example command line: - -.. code-block:: - - (volttron) @:~/volttron$ pytest -s ~/house-deployment/Ambient - - diff --git a/services/core/Ambient/setup.py b/services/core/Ambient/setup.py index f29523e7ca..2c6992b2f8 100644 --- a/services/core/Ambient/setup.py +++ b/services/core/Ambient/setup.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- {{{ # vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et: # -# Copyright 2019, Battelle Memorial Institute. +# Copyright 2020, Battelle Memorial Institute. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/services/core/Ambient/tests/test_ambient_agent.py b/services/core/Ambient/tests/test_ambient_agent.py index a16d27d87e..b50ec44d53 100644 --- a/services/core/Ambient/tests/test_ambient_agent.py +++ b/services/core/Ambient/tests/test_ambient_agent.py @@ -54,7 +54,7 @@ _log = logging.getLogger(__name__) # Ambient agent tests rely upon the configuration of devices operated -# by the owner/operater of Ambient devices - To run the Ambient tests +# by the owner/operator of Ambient devices - To run the Ambient tests # the test_data/test_ambient_data.json file should be populated: # api_key should be filled in with a valid Ambient API key, app_key # with an Ambient application key, and locations with a list of device @@ -157,10 +157,9 @@ def weather(request, volttron_instance): agent = volttron_instance.install_agent( vip_identity=identity, agent_dir=source, - start=False, + start=True, config_file=config) - volttron_instance.start_agent(agent) gevent.sleep(3) def stop_agent(): @@ -307,15 +306,22 @@ def test_polling_locations_valid_config(volttron_instance, query_agent, config, :return: """ agent_uuid = None - query_agent.poll_callback.reset_mock() try: agent_uuid = volttron_instance.install_agent( vip_identity="poll.weather", agent_dir=ambient_agent_path, - start=False, + start=True, config_file=config) - volttron_instance.start_agent(agent_uuid) - gevent.sleep(3) + + # wait for the agent to start up + gevent.sleep(1) + + # make sure we don't have any existing callback args + query_agent.poll_callback.reset_mock() + + # wait for the update interval + gevent.sleep(config.get("poll_interval")) + print(query_agent.poll_callback.call_args_list) assert len(result_topics) == query_agent.poll_callback.call_count assert "poll.weather" == query_agent.poll_callback.call_args[0][1] diff --git a/services/core/BACnetProxy/README.md b/services/core/BACnetProxy/README.md new file mode 100644 index 0000000000..e04597ca8d --- /dev/null +++ b/services/core/BACnetProxy/README.md @@ -0,0 +1,33 @@ +# BACnet Proxy Agent + +Communication with BACnet device on a network happens via a single virtual BACnet device. In VOLTTRON driver framework, +we use a separate agent specifically for communicating with BACnet devices and managing the virtual BACnet device. + +## Dependencies +1. The BACnet Proxy agent requires the BACPypes package. This package can be installed in an activated environment with: + ``` + pip install bacpypes + ``` +2. Current versions of VOLTTRON support only BACPypes version 0.16.7 + +## Agent Configuration + +``` + { + "device_address": "10.0.2.15", + "max_apdu_length": 1024, + "object_id": 599, + "object_name": "Volttron BACnet driver", + "vendor_id": 15, + "segmentation_supported": "segmentedBoth" + } +``` +1. device_address - Address bound to the network port over which BACnet communication will happen on the computer +running VOLTTRON. This is NOT the address of any target device. +2. object_id - ID of the Device object of the virtual BACnet device. Defaults to 599. Only needs to be changed if there +is a conflicting BACnet device ID on your network. +3. max_apdu_length - Maximum size message the device can handle +4. object_name - Name of the object. Defaults to “Volttron BACnet driver”. (Optional) +5. vendor_id - Vendor ID of the virtual BACnet device. Defaults to 15. (Optional) +6. segmentation_supported - Segmentation allows larger messages to be broken up into segments and spliced back together. +Possible setting are “segmentedBoth” (default), “segmentedTransmit”, “segmentedReceive”, or “noSegmentation” (Optional) diff --git a/services/core/BACnetProxy/bacnet_proxy/agent.py b/services/core/BACnetProxy/bacnet_proxy/agent.py index 607c51ddb4..cbee1d1d26 100644 --- a/services/core/BACnetProxy/bacnet_proxy/agent.py +++ b/services/core/BACnetProxy/bacnet_proxy/agent.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- {{{ # vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et: # -# Copyright 2019, Battelle Memorial Institute. +# Copyright 2020, Battelle Memorial Institute. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -219,9 +219,6 @@ def _get_iocb_for_apdu(self, apdu, invoke_key): def _get_value_from_read_property_request(self, apdu, working_iocb): # find the datatype - - #_log.debug("WIGGEDYWACKYO") - datatype = get_datatype(apdu.objectIdentifier[0], apdu.propertyIdentifier) if not datatype: working_iocb.set_exception(TypeError("unknown datatype")) @@ -517,7 +514,7 @@ def send_cov_subscription_callback(device_address, subscriber_process_identifier def forward_cov_callback(point_name, apdu, result_dict): """ - Asynchronous callback to forward cov values to the master driver + Asynchronous callback to forward cov values to the platform driver for gevent """ async_call.send(None, self.forward_cov, point_name, apdu, result_dict) @@ -595,13 +592,13 @@ def ping_device(self, target_address, device_id): self.who_is(device_id, device_id, target_address) def _cast_value(self, value, datatype): - if datatype is Integer: - value = int(value) - elif datatype is Real: - value = float(value) - elif datatype is Unsigned: - value = int(value) - return datatype(value) + if datatype is Integer: + value = int(value) + elif datatype is Real: + value = float(value) + elif datatype is Unsigned: + value = int(value) + return datatype(value) @RPC.export def write_property(self, target_address, value, object_type, instance_number, property_name, priority=None, diff --git a/services/core/BACnetProxy/bacnet-proxy.agent b/services/core/BACnetProxy/config similarity index 100% rename from services/core/BACnetProxy/bacnet-proxy.agent rename to services/core/BACnetProxy/config diff --git a/services/core/BACnetProxy/setup.py b/services/core/BACnetProxy/setup.py index e231183a9f..65bf2b0a14 100644 --- a/services/core/BACnetProxy/setup.py +++ b/services/core/BACnetProxy/setup.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- {{{ # vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et: # -# Copyright 2019, Battelle Memorial Institute. +# Copyright 2020, Battelle Memorial Institute. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/services/core/CrateHistorian/README.md b/services/core/CrateHistorian/README.md new file mode 100644 index 0000000000..cac6d03158 --- /dev/null +++ b/services/core/CrateHistorian/README.md @@ -0,0 +1,85 @@ +# Crate Historian + +Crate is an open source SQL database designed on top of a No-SQL design. +It allows automatic data replication and self-healing clusters for high +availability, automatic sharding, and fast joins, aggregations and +sub-selects. + +Find out more about crate from . + +# Upgrading + +As of version 3 of the CrateHistorian the default topics table is topics +instead of topic. To continue using the same table name for topics +please add a tabledef section to your configuration file + +``` {.python} +{ + "connection": { + "type": "crate", + # Optional table prefix defaults to historian + "schema": "testing", + "params": { + "host": "localhost:4200" + } + }, + "tables_def": { + "table_prefix": "", + "data_table": "data", + "topics_table": "topics", + "meta_table": "meta" + } +} +``` + +> **_NOTE:_** +CrateHistorian is still alpha, schemas could change in the future, do +not use this for production data until schema is confirmed as final +Currently the historian supports two schemas for numerical data, the +primary schema closely resembles the SQLHistorian schema but there is an +optional \"raw\" schema that can be enabled in the config below that +utilizes some of the advanced indexing features of crate + +# Prerequisites + +## 1. Crate Database + +Install crate version 3.3.3 from +. Untar the +file and run crate-3.3.3/bin/crate to start crate. After the +installation the service will be available for viewing at + by default. + +> **_NOTE:_** +Authentication for crate is an enterprise subscription only feature. + + +## 2. Crate Driver + +There is a python library for crate that must be installed in the +volttron python environment in order to access crate. From an activated +environment, in the root of the volttron folder, execute the following +command: + +> python bootstrap.py --crate + +or + +> pip install crate + +# Configuration + +The following is an example of the crate historian\'s configuration. + +``` {.python} +{ + "connection": { + "type": "crate", + # Optional table prefix defaults to historian + "schema": "testing", + "params": { + "host": "localhost:4200" + } + } +} +``` diff --git a/services/core/CrateHistorian/README.rst b/services/core/CrateHistorian/README.rst deleted file mode 100644 index ff23cfb1f5..0000000000 --- a/services/core/CrateHistorian/README.rst +++ /dev/null @@ -1,95 +0,0 @@ -.. _Crate_Historian: - -=============== -Crate Historian -=============== - -Crate is an open source SQL database designed on top of a No-SQL design. It -allows automatic data replication and self-healing clusters for high -availability, automatic sharding, and fast joins, aggregations and sub-selects. - -Find out more about crate from ``_. - -Upgrading -~~~~~~~~~ - -As of version 3 of the CrateHistorian the default topics table is topics instead of topic. To continue -using the same table name for topics please add a tabledef section to your configuration file - -.. code-block:: python - - { - "connection": { - "type": "crate", - # Optional table prefix defaults to historian - "schema": "testing", - "params": { - "host": "localhost:4200" - } - }, - "tables_def": { - "table_prefix": "", - "data_table": "data", - "topics_table": "topics", - "meta_table": "meta" - } - } - -Note -~~~~ - -CrateHistorian is still alpha, schemas could change in the future, do not use -this for production data until schema is confirmed as final -Currently the historian supports two schemas for numerical data, the primary -schema closely resembles the SQLHistorian schema but there is an optional -"raw" schema that can be enabled in the config below that utilizes some of -the advanced indexing features of crate - - -Prerequisites -~~~~~~~~~~~~~ - -1. Crate Database ------------------ - -Install crate version 3.3.3 from https://cdn.crate.io/downloads/releases/crate-3.3.3.tar.gz. -Untar the file and run crate-3.3.3/bin/crate to start crate. After the installation -the service will be available for viewing at http://localhost:4200 by default. - -.. note:: Authentication for crate is an enterprise subscription only feature. - -2. Crate Driver ---------------- - -There is a python library for crate that must be installed in the volttron -python environment in order to access crate. From an activated environment, -in the root of the volttron folder, execute the following command: - - :: - - python bootstrap.py --crate - -or - - :: - - pip install crate - - -Configuration -~~~~~~~~~~~~~ -The following is an example of the crate historian's configuration. - -.. code-block:: python - - { - "connection": { - "type": "crate", - # Optional table prefix defaults to historian - "schema": "testing", - "params": { - "host": "localhost:4200" - } - } - } - diff --git a/services/core/CrateHistorian/cratedb/historian.py b/services/core/CrateHistorian/cratedb/historian.py index b4bf2a1631..1ecb806c15 100644 --- a/services/core/CrateHistorian/cratedb/historian.py +++ b/services/core/CrateHistorian/cratedb/historian.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- {{{ # vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et: # -# Copyright 2019, Battelle Memorial Institute. +# Copyright 2020, Battelle Memorial Institute. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/services/core/CrateHistorian/setup.py b/services/core/CrateHistorian/setup.py index f513a83c9a..203cd3945b 100644 --- a/services/core/CrateHistorian/setup.py +++ b/services/core/CrateHistorian/setup.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- {{{ # vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et: # -# Copyright 2019, Battelle Memorial Institute. +# Copyright 2020, Battelle Memorial Institute. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/services/core/CrateHistorian/tests/test_crate_historian.py b/services/core/CrateHistorian/tests/test_crate_historian.py index cc4cbd7624..fd57b0b61a 100644 --- a/services/core/CrateHistorian/tests/test_crate_historian.py +++ b/services/core/CrateHistorian/tests/test_crate_historian.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- {{{ # vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et: # -# Copyright 2019, Battelle Memorial Institute. +# Copyright 2020, Battelle Memorial Institute. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -39,20 +39,22 @@ import gevent import pytest - -from volttron.platform import get_services_core - +import os +import json try: from crate import client HAS_CRATE = True except ImportError: HAS_CRATE = False +from volttron.platform import get_services_core +from volttron.platform.messaging.health import STATUS_GOOD + crate_config = { - "schema": "testing", "connection": { "type": "crate", + "schema": "testing", "params": { "host": "localhost:4200" } @@ -73,8 +75,8 @@ u'topics' ] -# used only in one test function. And clean up should happen between -# different volttron_instance types + +# used only in one test function. And clean up should happen between different volttron_instance types @pytest.fixture() def crate_connection1(): host = crate_config_no_schema['connection']['params']['host'] @@ -86,8 +88,7 @@ def crate_connection1(): conn.close() -# used only in one test function. And clean up should happen between -# different volttron_instance types +# used only in one test function. And clean up should happen between different volttron_instance types @pytest.fixture() def crate_connection2(): host = crate_config['connection']['params']['host'] @@ -100,13 +101,11 @@ def crate_connection2(): conn.close() - def clean_schema_from_database(connection, schema): tables = retrieve_tables_from_schema(connection, schema) cursor = connection.cursor() for tbl in tables: - query = "DROP TABLE IF EXISTS {schema}.{table}".format(table=tbl, - schema=schema) + query = "DROP TABLE IF EXISTS {schema}.{table}".format(table=tbl, schema=schema) cursor.execute(query) cursor.close() @@ -133,8 +132,7 @@ def test_creates_default_table_prefixes(volttron_instance, crate_connection1): vi = volttron_instance assert not retrieve_tables_from_schema(crate_connection1, "historian") - agent_uuid = vi.install_agent(agent_dir=get_services_core("CrateHistorian"), - config_file=crate_config_no_schema) + agent_uuid = vi.install_agent(agent_dir=get_services_core("CrateHistorian"), config_file=crate_config_no_schema) gevent.sleep(2) tables = retrieve_tables_from_schema(crate_connection1, "historian") @@ -154,8 +152,7 @@ def test_creates_schema_prefix_tables(volttron_instance, crate_connection2): vi = volttron_instance assert not retrieve_tables_from_schema(crate_connection2, "testing") - agent_uuid = vi.install_agent(agent_dir=get_services_core("CrateHistorian"), - config_file=crate_config) + agent_uuid = vi.install_agent(agent_dir=get_services_core("CrateHistorian"), config_file=crate_config) gevent.sleep(2) tables = retrieve_tables_from_schema(crate_connection2, "testing") @@ -166,3 +163,22 @@ def test_creates_schema_prefix_tables(volttron_instance, crate_connection2): if agent_uuid: vi.remove_agent(agent_uuid) + +@pytest.mark.historian +def test_crate_default_config(volttron_instance): + """ + Test the default configuration file included with the agent + """ + publish_agent = volttron_instance.build_agent(identity="test_agent") + gevent.sleep(1) + + config_path = os.path.join(get_services_core("CrateHistorian"), "config") + with open(config_path, "r") as config_file: + config_json = json.load(config_file) + assert isinstance(config_json, dict) + volttron_instance.install_agent( + agent_dir=get_services_core("CrateHistorian"), + config_file=config_json, + start=True, + vip_identity="health_test") + assert publish_agent.vip.rpc.call("health_test", "health.get_status").get(timeout=10).get('status') == STATUS_GOOD diff --git a/services/core/DNP3Agent/README.md b/services/core/DNP3Agent/README.md index 5e8b0cc032..3cf6162e19 100644 --- a/services/core/DNP3Agent/README.md +++ b/services/core/DNP3Agent/README.md @@ -60,7 +60,7 @@ The **install_dnp3_agent.sh** script installs the agent: (volttron) $ export DNP3_ROOT=$VOLTTRON_ROOT/services/core/DNP3Agent (volttron) $ export AGENT_MODULE=dnp3.agent (volttron) $ cd $VOLTTRON_ROOT - (volttron) $ python scripts/install-agent.py -s $DNP3_ROOT -i dnp3agent -c $DNP3_ROOT/dnp3agent.config -t dnp3agent -f + (volttron) $ python scripts/install-agent.py -s $DNP3_ROOT -i dnp3agent -c $DNP3_ROOT/config -t dnp3agent -f (Note that $AGENT_MODULE directs the installer to use agent source code residing in the "dnp3" directory.) diff --git a/services/core/DNP3Agent/dnp3agent.config b/services/core/DNP3Agent/config similarity index 100% rename from services/core/DNP3Agent/dnp3agent.config rename to services/core/DNP3Agent/config diff --git a/services/core/DNP3Agent/conftest.py b/services/core/DNP3Agent/conftest.py index 882f7ad602..74134a5b76 100644 --- a/services/core/DNP3Agent/conftest.py +++ b/services/core/DNP3Agent/conftest.py @@ -2,7 +2,7 @@ from volttrontesting.fixtures.volttron_platform_fixtures import * -collect_ignore = ["function_test.py", "tests/mesa_master_test.py"] +collect_ignore = ["function_test.py", "tests/mesa_platform_test.py"] try: import pydnp3 diff --git a/services/core/DNP3Agent/dnp3/base_dnp3_agent.py b/services/core/DNP3Agent/dnp3/base_dnp3_agent.py index bdcde6f2da..46eb97a4f8 100644 --- a/services/core/DNP3Agent/dnp3/base_dnp3_agent.py +++ b/services/core/DNP3Agent/dnp3/base_dnp3_agent.py @@ -69,7 +69,7 @@ class BaseDNP3Agent(Agent): export VOLTTRON_ROOT= export DNP3_ROOT=$VOLTTRON_ROOT/services/core/DNP3Agent cd $VOLTTRON_ROOT - python scripts/install-agent.py -s $DNP3_ROOT -i dnp3agent -c $DNP3_ROOT/dnp3agent.config -t dnp3agent -f + python scripts/install-agent.py -s $DNP3_ROOT -i dnp3agent -c $DNP3_ROOT/config -t dnp3agent -f """ def __init__(self, points=None, point_topic='', local_ip=None, port=None, diff --git a/services/core/DNP3Agent/dnp3_master.py b/services/core/DNP3Agent/dnp3_master.py index 6fecba6272..166026959f 100644 --- a/services/core/DNP3Agent/dnp3_master.py +++ b/services/core/DNP3Agent/dnp3_master.py @@ -47,7 +47,7 @@ def __init__(self, log_handler=asiodnp3.ConsoleLogger().Create(), channel_listener=asiodnp3.PrintingChannelListener().Create(), soe_handler=asiodnp3.PrintingSOEHandler().Create(), - master_application=asiodnp3.DefaultMasterApplication().Create(), + platform_application=asiodnp3.DefaultMasterApplication().Create(), stack_config=None): self.log_levels = log_levels @@ -57,7 +57,7 @@ def __init__(self, self.log_handler = log_handler self.channel_listener = channel_listener self.soe_handler = soe_handler - self.master_application = master_application + self.platform_application = platform_application self.stackConfig = stack_config if not self.stackConfig: @@ -90,7 +90,7 @@ def connect(self): # This returns a thread-safe interface used for sending commands. self.master = self.channel.AddMaster("master", self.soe_handler, - self.master_application, + self.platform_application, self.stackConfig) # Enable the master. This will start communications. @@ -507,7 +507,7 @@ def main(): dnp3_master = DNP3Master(log_handler=LogHandler(), channel_listener=ChannelListener(), soe_handler=SOEHandler(), - master_application=MasterApplication()) + platform_application=MasterApplication()) dnp3_master.connect() # Ad-hoc tests can be inserted here if desired. dnp3_master.shutdown() diff --git a/services/core/DNP3Agent/install_dnp3_agent.sh b/services/core/DNP3Agent/install_dnp3_agent.sh index 1893d99a24..420ae1dd4d 100644 --- a/services/core/DNP3Agent/install_dnp3_agent.sh +++ b/services/core/DNP3Agent/install_dnp3_agent.sh @@ -7,7 +7,7 @@ export AGENT_MODULE=dnp3.agent cd $VOLTTRON_ROOT -python scripts/install-agent.py -s $DNP3_ROOT -i dnp3agent -c $DNP3_ROOT/dnp3agent.config -t dnp3agent -f +python scripts/install-agent.py -s $DNP3_ROOT -i dnp3agent -c $DNP3_ROOT/config -t dnp3agent -f # Put the agent's point definitions in the config store. cd $VOLTTRON_ROOT @@ -16,4 +16,4 @@ vctl config store dnp3agent mesa_points.config $DNP3_ROOT/dnp3/mesa_points.confi echo echo Stored point configurations in config store... vctl config list dnp3agent -echo \ No newline at end of file +echo diff --git a/services/core/DNP3Agent/tests/MesaTestAgent/testagent.config b/services/core/DNP3Agent/tests/MesaTestAgent/testagent.config index 2b7cc53ef4..1f73dc61fa 100644 --- a/services/core/DNP3Agent/tests/MesaTestAgent/testagent.config +++ b/services/core/DNP3Agent/tests/MesaTestAgent/testagent.config @@ -2,7 +2,7 @@ "mesaagent_id": "mesaagent", # The point configuration can be defined here by the test agent, or the test agent # can rely on the DNP3 driver's config (e.g. dnp3.csv). If a point configuration is - # defined here, and the test agent is started after the MasterDriverAgent, the test + # defined here, and the test agent is started after the PlatformDriverAgent, the test # agent's point config will override the DNP3 driver config. # "point_config": { # "DCHD.WTgt": {"group": 41, "index": 65}, diff --git a/services/core/DNP3Agent/tests/README.md b/services/core/DNP3Agent/tests/README.md index d4b961570a..8238796c8f 100644 --- a/services/core/DNP3Agent/tests/README.md +++ b/services/core/DNP3Agent/tests/README.md @@ -38,6 +38,6 @@ methods that test and validate various types of data and behavior. **MesaTestAgent** is a VOLTTRON agent that interacts with MesaAgent, sending RPC calls and subscribing to MesaAgent publication of data. -**mesa_master_cmd.py** is a standalone command-line utility (built on the Python +**mesa_platform_cmd.py** is a standalone command-line utility (built on the Python Cmd library) that sends point and function values from the master to the (MesaAgent) DNP3 outstation. diff --git a/services/core/DNP3Agent/tests/mesa_master_test.py b/services/core/DNP3Agent/tests/mesa_master_test.py index 66ae12ff4a..b1d301c458 100644 --- a/services/core/DNP3Agent/tests/mesa_master_test.py +++ b/services/core/DNP3Agent/tests/mesa_master_test.py @@ -121,10 +121,10 @@ def send_json(self, pdefs, func_def_path, send_json_path='', send_json=None): def main(): - mesa_master_test = MesaMasterTest() - mesa_master_test.connect() + mesa_platform_test = MesaMasterTest() + mesa_platform_test.connect() # Ad-hoc tests can be inserted here if desired. - mesa_master_test.shutdown() + mesa_platform_test.shutdown() if __name__ == '__main__': diff --git a/services/core/Darksky/README.md b/services/core/Darksky/README.md new file mode 100644 index 0000000000..3081c64c75 --- /dev/null +++ b/services/core/Darksky/README.md @@ -0,0 +1,137 @@ +# Dark Sky Agent + +Powered by [Dark Sky](https://darksky.net/dev) + +This agent provides the ability to query for current and forecast +weather data from Dark Sky. The agent extends BaseWeatherAgent that +provides caching of recently requested data, API call tracking, as well +as mapping of weather point names from Darksky\'s naming scheme to the +standardized CF-conventions scheme. + +## Requirements + +The Dark Sky agent requires the Pint package. This package can be +installed in an activated environment with: + + pip install pint + +## Dark Sky Endpoints + +The Dark Sky agent provides the following endpoints in addition to those +included with the base weather agent: + +### Get Minutely Forecast Data + +RPC call to weather service method **'get_minutely_forecast'** + +Parameters: + +- **locations** - List of dictionaries containing location details. Dark Sky requires + +> [{"lat": , "long": },...] + +optional parameters: + +- **minutes** - The number of minutes for which forecast data should + be returned. By default, it is 60 minutes as well as the current + minute. Dark Sky does not provide minutely data for more than one + hour (60 minutes) into the future. + +### Get Daily Forecast Data + +RPC call to weather service method **'get_minutely_forecast'** + +Parameters: + +- **locations** - List of dictionaries containing location details. Dark Sky requires + +> [{"lat": , "long": },...] + +optional parameters: + +- **days** - The number of days for which forecast data should be + returned. By default, it is the next 7 days as well as the current day. + +**Please note: If your forecast request to the Dark Sky agent asks for +more data points than the default, the agent must use an additional API +calls; an additional API call will be used to fetch any records not +included in the default forecast request for the current day, and one +additional call for each subsequent day of data the request would +require, regardless of Dark Sky agent endpoint (If requesting 60 hours +of hourly data Monday night at 8PM, 3 API calls must be made to fulfill +the request: one for the initial request containing 48 hours of data, +one for the remaining 4 hours of Wednesday evening\'s data, and one for +records in Thursday\'s forecast).** + +## Configuration + +The following is an example configuration for the Dark Sky agent. The +\"api_key\" parameter is required while all others are optional. + +**Parameters** + +1. \"api_key\" - api key string provided by Dark Sky - this is required and will not be provided by the VOLTTRON team. +2. \"api_calls_limit\" - limit of api calls that can be made to the remote before the agent no longer returns weather +results. The agent will keep track of number of api calls and return an error when the limit is reached without +attempting a connection to dark sky server. This is primarily used to prevent possible charges. If set to -1, no limit +will be applied by the agent. Dark sky api might return a error after limit is exceeded. Defaults to -1 +3. \"database_file\" - sqlite database file for weather data caching. Defaults to \"weather.sqlite\" in the agent\'s +data directory. +4. \"max_size_gb\" - maximum size of cache database. When cache exceeds this size, data will get purged from cache +till cache is within the configured size. +5. \"poll_locations - list of locations to periodically poll for current data. +6. \"poll_interval\" - polling frequency or the number of seconds between each poll. +7. \"performance_mode\" - If set to true, request response will exclude extra data points (this is primarily useful +for reducing network traffic). If set to false, all data points are included in the response, and extra data is cached +(to reduce the number of API calls used for future RPC calls). Defaults to True. + +Example configuration: + + { + "api_key": "", + "api_calls_limit": 1000, + "database_file": "weather.sqlite", + "max_size_gb": 1, + "poll_locations": [{"lat": 39.7555, "long": -105.2211}, + {"lat": 46.2804, "long": -119.2752}], + "poll_interval": 60 + } + +## Registry Configuration + +The registry configuration file for this agent can be found in agent\'s +data directory. This configuration provides the point name mapping from +the Dark Sky API\'s point scheme to the CF-conventions scheme by +default. Points that do not specify \"Standard_Point_Name\" were found +to not have a logical match to any point found in the CF-Conventions. +For these points Dark Sky point name convention (Service_Point_Name) +will be used. + + |Service_Point_Name | Standard_Point_Name | Service_Units | Standard_Units | + |---------------------|-------------------------|--------------------|----------------| + |precipIntensity | lwe_precipitation_rate |millimeter / hour |meter / second | + |precipProbability | | | | + |temperature | surface_temperature | degC |degK | + |apparentTemperature | | degC |degK | + |dewPoint | dew_point_temperature | degC |degK | + + +## Notes + +The Dark Sky agent requires an API key to be configured in order for +users to request data. A user of the Dark Sky agent must obtain the key +themselves. + +API call tracking features will work only when each agent instance uses +its own api key. If API key is shared across multiple dark sky agent +instances, disable this feature by setting api_calls_limit = -1. + +As of writing, dark sky gives 1000 daily API calls free for a trial +account. Once this limit is reached, the error \"daily usage limit +exceeded\" is returned. See for details + +By default performance mode is set to True and for a given location and +time period only the requested data points are returned. Set +performance_mode to False to query all available data for a given +location and time period if you want to cache all the data points for +future retrieval there by reducing number of API calls. diff --git a/services/core/Darksky/README.rst b/services/core/Darksky/README.rst deleted file mode 100644 index 34495b81a2..0000000000 --- a/services/core/Darksky/README.rst +++ /dev/null @@ -1,139 +0,0 @@ -.. _Darksky Agent: - -============== -Dark Sky Agent -============== - -Powered by `Dark Sky `_ - -This agent provides the ability to query for current and forecast weather -data from Dark Sky. The agent extends BaseWeatherAgent that provides caching of -recently requested data, API call tracking, as well as mapping of weather -point names from Darksky's naming scheme to the standardized CF-conventions scheme. - -Requirements ------------- -The Dark Sky agent requires the Pint package. This package can be installed in an -activated environment with: - -:: - - pip install pint - -****************** -Dark Sky Endpoints -****************** -The Dark Sky agent provides the following endpoints in addition to those -included with the base weather agent: - -Get Minutely Forecast Data --------------------------- -RPC call to weather service method **’get_minutely_forecast’** - -Parameters: - - 1. **locations** - List of dictionaries containing location details. Dark Sky requires - [{"lat": , "long": },...] - -optional parameters: - - 2. **minutes** - The number of minutes for which forecast data should be - returned. By default, it is 60 minutes as well as the current minute. - Dark Sky does not provide minutely data for more than one hour (60 - minutes) into the future. - -Get Daily Forecast Data ------------------------ -RPC call to weather service method **’get_minutely_forecast’** - -Parameters: - - 1. **locations** - List of dictionaries containing location details. Dark Sky requires - [{"lat": , "long": },...] - -optional parameters: - - 2. **days** - The number of days for which forecast data should be - returned. By default, it is the next 7 days as well as the current day. - - -**Please note: If your forecast request to the Dark Sky agent asks for more data -points than the default, the agent must use an additional API calls; an -additional API call will be used to fetch any records not included in the -default forecast request for the current day, and one additional call for each -subsequent day of data the request would require, regardless of Dark Sky agent -endpoint (If requesting 60 hours of hourly data Monday night at 8PM, 3 API calls -must be made to fulfill the request: one for the initial request containing 48 -hours of data, one for the remaining 4 hours of Wednesday evening's data, and -one for records in Thursday's forecast).** - -************* -Configuration -************* - -The following is an example configuration for the Dark Sky agent. The "api_key" -parameter is required while all others are optional. - -**Parameters** - - 1. "api_key" - api key string provided by Dark Sky - this is required and will not be provided by the VOLTTRON team. - 2. "api_calls_limit" - limit of api calls that can be made to the remote before the agent no longer returns weather - results. The agent will keep track of number of api calls and return an error when the limit is reached without - attempting a connection to dark sky server. This is primarily used to prevent possible charges. If set to -1, no - limit will be applied by the agent. Dark sky api might return a error after limit is exceeded. Defaults to -1 - 3. "database_file" - sqlite database file for weather data caching. Defaults to "weather.sqlite" in the agent's data directory. - 4. "max_size_gb" - maximum size of cache database. When cache exceeds this size, data will get purged from cache till - cache is within the configured size. - 5. "poll_locations - list of locations to periodically poll for current data. - 6. "poll_interval" - polling frequency or the number of seconds between each poll. - 7. "performance_mode" - If set to true, request response will exclude extra data points (this is primarily useful for - reducing network traffic). If set to false, all data points are included in the response, and extra data is cached - (to reduce the number of API calls used for future RPC calls). Defaults to True. - -Example configuration: - -:: - - { - "api_key": "", - "api_calls_limit": 1000, - "database_file": "weather.sqlite", - "max_size_gb": 1, - "poll_locations": [{"lat": 39.7555, "long": -105.2211}, - {"lat": 46.2804, "long": -119.2752}], - "poll_interval": 60 - } - -Registry Configuration ----------------------- -The registry configuration file for this agent can be found in agent's data -directory. This configuration provides the point name mapping from the Dark Sky -API's point scheme to the CF-conventions scheme by default. Points that do not -specify "Standard_Point_Name" were found to not have a logical match to any -point found in the CF-Conventions. For these points Dark Sky point name -convention (Service_Point_Name) will be used. - -.. csv-table:: Registry Configuration - :header: Service_Point_Name,Standard_Point_Name,Service_Units,Standard_Units - - precipIntensity,lwe_precipitation_rate,millimeter / hour,meter / second - precipProbability,,, - temperature,surface_temperature,degC,degK - apparentTemperature,,degC,degK - dewPoint,dew_point_temperature,degC,degK - -Notes ------ -The Dark Sky agent requires an API key to be configured in order for users to -request data. A user of the Dark Sky agent must obtain the key themselves. - -API call tracking features will work only when each agent instance uses its own api key. -If API key is shared across multiple dark sky agent instances, disable this feature -by setting api_calls_limit = -1. - -As of writing, dark sky gives 1000 daily API calls free for a trial account. Once this limit is reached, -the error "daily usage limit exceeded" is returned. See https://darksky.net/dev for details - -By default performance mode is set to True and for a given location and time period only the requested -data points are returned. Set performance_mode to False to query all available data for a given location -and time period if you want to cache all the data points for future retrieval there by reducing number of API calls. diff --git a/services/core/Darksky/darksky/agent.py b/services/core/Darksky/darksky/agent.py index 51abe5f754..dc547cb5ac 100644 --- a/services/core/Darksky/darksky/agent.py +++ b/services/core/Darksky/darksky/agent.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- {{{ # vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et: # -# Copyright 2019, Battelle Memorial Institute. +# Copyright 2020, Battelle Memorial Institute. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -301,6 +301,8 @@ def query_current_weather(self, location): """ darksky_response = self.get_darksky_data( 'get_current_weather', location) + if 'currently' not in darksky_response: + _log.error("Current data not found in Dark Sky response: {}".format(darksky_response)) current_response = darksky_response.pop('currently') # Darksky required attribution current_response["attribution"] = "Powered by Dark Sky" diff --git a/services/core/Darksky/setup.py b/services/core/Darksky/setup.py index d9116e61a6..552e7d67a8 100644 --- a/services/core/Darksky/setup.py +++ b/services/core/Darksky/setup.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- {{{ # vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et: # -# Copyright 2019, Battelle Memorial Institute. +# Copyright 2020, Battelle Memorial Institute. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -74,4 +74,4 @@ 'eggsecutable = ' + agent_module + ':main', ] } -) \ No newline at end of file +) diff --git a/services/core/Darksky/tests/test_darksky.py b/services/core/Darksky/tests/test_darksky.py index 3f8f7597c7..a4a1b93b24 100644 --- a/services/core/Darksky/tests/test_darksky.py +++ b/services/core/Darksky/tests/test_darksky.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- {{{ # vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et: # -# Copyright 2019, Battelle Memorial Institute. +# Copyright 2020, Battelle Memorial Institute. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -40,15 +40,16 @@ import os import copy import gevent -from mock import MagicMock import sqlite3 +import json import logging -from volttron.platform.agent import utils from datetime import datetime, timedelta +from mock import MagicMock + from volttron.platform.agent.utils import get_aware_utc_now, format_timestamp from volttron.platform.messaging.health import STATUS_GOOD - from volttron.platform import get_services_core +from volttron.platform.agent import utils __version__ = "0.1.0" @@ -89,24 +90,19 @@ } # global variable. Set to skip the module -pytestmark = pytest.mark.skipif(not API_KEY, reason="No API key found. " - "Darksky weather API " - "key needs to be set in " - "the environment variable " - "DARKSKY_KEY") +pytestmark = pytest.mark.skipif(not API_KEY, reason="No API key found. Darksky weather API key needs to be set in " + "the environment variable DARKSKY_KEY") @pytest.fixture(scope="function") def cleanup_cache(volttron_instance, query_agent, weather): weather_uuid = weather[0] identity = weather[1] - tables = ["get_current_weather", "get_hourly_forecast", - "get_minutely_forecast", "get_daily_forecast"] + tables = ["get_current_weather", "get_hourly_forecast", "get_minutely_forecast", "get_daily_forecast"] version = query_agent.vip.rpc.call(identity, 'get_version').get(timeout=3) cwd = volttron_instance.volttron_home - database_file = "/".join([cwd, "agents", weather_uuid, "darkskyagent-" + - version, "darkskyagent-" + version + - ".agent-data", "weather.sqlite"]) + database_file = "/".join([cwd, "agents", weather_uuid, "darkskyagent-" + version, "darkskyagent-" + version + + ".agent-data", "weather.sqlite"]) _log.debug(database_file) sqlite_connection = sqlite3.connect(database_file) cursor = sqlite_connection.cursor() @@ -141,6 +137,7 @@ def stop_agent(): request.addfinalizer(stop_agent) return agent + @pytest.fixture(scope="module", params=[darksky_service, darksky_perf]) def weather(request, volttron_instance): print("** Setting up weather agent module **") @@ -152,10 +149,9 @@ def weather(request, volttron_instance): agent = volttron_instance.install_agent( vip_identity=identity, agent_dir=source, - start=False, + start=True, config_file=config) - volttron_instance.start_agent(agent) gevent.sleep(3) def stop_agent(): @@ -166,20 +162,18 @@ def stop_agent(): request.addfinalizer(stop_agent) return agent, identity + @pytest.mark.parametrize("locations", [ [{"lat": 39.7555, "long": -105.2211}], [{"lat": 39.7555, "long": -105.2211}, {"lat": 46.2804, "long": -119.2752}] ]) @pytest.mark.darksky -def test_success_current(volttron_instance, cleanup_cache, weather, - query_agent, - locations): +def test_success_current(volttron_instance, cleanup_cache, query_agent, weather, locations): weather_uuid = weather[0] identity = weather[1] version = query_agent.vip.rpc.call(identity, 'get_version').get(timeout=3) cwd = volttron_instance.volttron_home - database_file = "/".join([cwd, "agents", weather_uuid, "darkskyagent-" + - version, "darkskyagent-" + version + + database_file = "/".join([cwd, "agents", weather_uuid, "darkskyagent-" + version, "darkskyagent-" + version + ".agent-data", "weather.sqlite"]) sqlite_connection = sqlite3.connect(database_file) cursor = sqlite_connection.cursor() @@ -188,8 +182,7 @@ def test_success_current(volttron_instance, cleanup_cache, weather, cursor.execute(api_calls_query) current_api_calls = cursor.fetchone()[0] - query_data = query_agent.vip.rpc.call(identity, 'get_current_weather', - locations).get(timeout=30) + query_data = query_agent.vip.rpc.call(identity, 'get_current_weather', locations).get(timeout=30) if query_data[0].get("weather_error"): error = query_data[0].get("weather_error") @@ -216,18 +209,15 @@ def test_success_current(volttron_instance, cleanup_cache, weather, else: results = record.get("weather_error") if results.startswith("Remote API returned no data") or \ - results.startswith("Remote API redirected request, " - "but redirect failed") \ - or results.startswith("Remote API returned invalid " - "response") \ - or results.startswith("API request failed with unexpected " - "response"): + results.startswith("Remote API redirected request, but redirect failed") \ + or results.startswith("Remote API returned invalid response") \ + or results.startswith("API request failed with unexpected response"): assert True else: assert False services = {"get_minutely_forecast": 60, "get_hourly_forecast": 48, - "get_daily_forecast":7} + "get_daily_forecast": 7} for service, records_amount in services.items(): query = 'SELECT COUNT(*) FROM {service}'.format(service=service) cursor.execute(query) @@ -237,8 +227,7 @@ def test_success_current(volttron_instance, cleanup_cache, weather, else: assert num_records is records_amount*len(locations) - cache_data = query_agent.vip.rpc.call(identity, 'get_current_weather', - locations).get(timeout=30) + cache_data = query_agent.vip.rpc.call(identity, 'get_current_weather', locations).get(timeout=30) cursor.execute(api_calls_query) new_api_calls = cursor.fetchone()[0] @@ -262,48 +251,41 @@ def test_success_current(volttron_instance, cleanup_cache, weather, @pytest.mark.darksky -def test_calls_exceeded(volttron_instance, cleanup_cache, query_agent, - weather): +def test_calls_exceeded(volttron_instance, cleanup_cache, query_agent, weather): weather_uuid = weather[0] identity = weather[1] version = query_agent.vip.rpc.call(identity, 'get_version').get(timeout=3) cwd = volttron_instance.volttron_home - database_file = "/".join([cwd, "agents", weather_uuid, "darkskyagent-" + - version, "darkskyagent-" + version + + database_file = "/".join([cwd, "agents", weather_uuid, "darkskyagent-" + version, "darkskyagent-" + version + ".agent-data", "weather.sqlite"]) sqlite_connection = sqlite3.connect(database_file) cursor = sqlite_connection.cursor() for i in range(0, 100): time = format_timestamp(get_aware_utc_now() + timedelta(seconds=i)) - insert_query = """INSERT INTO API_CALLS - (CALL_TIME) VALUES (?);""" + insert_query = """INSERT INTO API_CALLS (CALL_TIME) VALUES (?);""" cursor.execute(insert_query, (time,)) sqlite_connection.commit() locations = [{"lat": 39.7555, "long": -105.2211}] - query_data = query_agent.vip.rpc.call(identity, 'get_current_weather', - locations).get(timeout=30) + query_data = query_agent.vip.rpc.call(identity, 'get_current_weather', locations).get(timeout=30) - assert query_data[0]['weather_error'] == 'No calls currently available ' \ - 'for the configured API key' + assert query_data[0]['weather_error'] == 'No calls currently available for the configured API key' assert not query_data[0].get('weather_results') - query_data = query_data = query_agent.vip.rpc.call( - identity, 'get_hourly_forecast', locations).get(timeout=30) + query_data = query_agent.vip.rpc.call(identity, 'get_hourly_forecast', locations).get(timeout=30) - assert query_data[0]['weather_error'] == 'No calls currently available ' \ - 'for the configured API key' + assert query_data[0]['weather_error'] == 'No calls currently available for the configured API key' assert not query_data[0].get('weather_results') delete_query = "DROP TABLE IF EXISTS API_CALLS;" cursor.execute(delete_query) - create_query = """CREATE TABLE API_CALLS - (CALL_TIME TIMESTAMP NOT NULL);""" + create_query = """CREATE TABLE API_CALLS (CALL_TIME TIMESTAMP NOT NULL);""" cursor.execute(create_query) sqlite_connection.commit() + @pytest.mark.parametrize("locations", [ ["fail"], [{"lat": 39.7555}], @@ -312,12 +294,10 @@ def test_calls_exceeded(volttron_instance, cleanup_cache, query_agent, @pytest.mark.darksky def test_current_fail(weather, query_agent, locations): identity = weather[1] - query_data = query_agent.vip.rpc.call(identity, 'get_current_weather', - locations).get(timeout=30) + query_data = query_agent.vip.rpc.call(identity, 'get_current_weather', locations).get(timeout=30) for record in query_data: error = record.get("weather_error") - assert error.startswith("Invalid location format.") or error.startswith( - "Invalid location") + assert error.startswith("Invalid location format.") or error.startswith("Invalid location") assert record.get("weather_results") is None @@ -333,14 +313,12 @@ def test_current_fail(weather, query_agent, locations): 'get_hourly_forecast'), ]) @pytest.mark.darksky -def test_success_forecast(volttron_instance, cleanup_cache, weather, - query_agent, locations, service): +def test_success_forecast(volttron_instance, cleanup_cache, weather, query_agent, locations, service): weather_uuid = weather[0] identity = weather[1] version = query_agent.vip.rpc.call(identity, 'get_version').get(timeout=3) cwd = volttron_instance.volttron_home - database_file = "/".join([cwd, "agents", weather_uuid, "darkskyagent-" + - version, "darkskyagent-" + version + + database_file = "/".join([cwd, "agents", weather_uuid, "darkskyagent-" + version, "darkskyagent-" + version + ".agent-data", "weather.sqlite"]) sqlite_connection = sqlite3.connect(database_file) cursor = sqlite_connection.cursor() @@ -352,14 +330,11 @@ def test_success_forecast(volttron_instance, cleanup_cache, weather, query_data = [] if service == "get_minutely_forecast": - query_data = query_agent.vip.rpc.call( - identity, service, locations).get(timeout=30) + query_data = query_agent.vip.rpc.call(identity, service, locations).get(timeout=30) if service == "get_hourly_forecast": - query_data = query_agent.vip.rpc.call( - identity, service, locations).get(timeout=30) + query_data = query_agent.vip.rpc.call(identity, service, locations).get(timeout=30) if service == "get_daily_forecast": - query_data = query_agent.vip.rpc.call( - identity, service, locations).get(timeout=30) + query_data = query_agent.vip.rpc.call(identity, service, locations).get(timeout=30) if query_data[0].get("weather_error"): error = query_data[0].get("weather_error") @@ -405,12 +380,9 @@ def test_success_forecast(volttron_instance, cleanup_cache, weather, error = location_data.get("weather_error") if error and not results: if error.startswith("Remote API returned no data") \ - or error.startswith("Remote API redirected request, but " - "redirect failed") \ - or error.startswith("Remote API returned invalid " - "response") \ - or error.startswith("API request failed with " - "unexpected response"): + or error.startswith("Remote API redirected request, but redirect failed") \ + or error.startswith("Remote API returned invalid response") \ + or error.startswith("API request failed with unexpected response"): assert True else: assert False @@ -428,16 +400,12 @@ def test_success_forecast(volttron_instance, cleanup_cache, weather, cache_data = [] # default quantity - if service == 'get_minutely_forecast': - cache_data = query_agent.vip.rpc.call( - identity, service, locations).get(timeout=30) + cache_data = query_agent.vip.rpc.call(identity, service, locations).get(timeout=30) if service == 'get_hourly_forecast': - cache_data = query_agent.vip.rpc.call( - identity, service, locations).get(timeout=30) + cache_data = query_agent.vip.rpc.call(identity, service, locations).get(timeout=30) if service == 'get_daily_forecast': - cache_data = query_agent.vip.rpc.call( - identity, service, locations).get(timeout=30) + cache_data = query_agent.vip.rpc.call(identity, service, locations).get(timeout=30) cursor.execute(api_calls_query) new_api_calls = cursor.fetchone()[0] @@ -449,12 +417,9 @@ def test_success_forecast(volttron_instance, cleanup_cache, weather, print(query_location_data) cache_location_data = cache_data[x] print(cache_location_data) - assert cache_location_data.get( - "generation_time") == query_location_data.get("generation_time") - assert cache_location_data.get("lat") == query_location_data.get( - "lat") - assert cache_location_data.get("long") == query_location_data.get( - "long") + assert cache_location_data.get("generation_time") == query_location_data.get("generation_time") + assert cache_location_data.get("lat") == query_location_data.get("lat") + assert cache_location_data.get("long") == query_location_data.get("long") if cache_location_data.get("weather_results"): query_weather_results = query_location_data.get("weather_results") @@ -470,20 +435,16 @@ def test_success_forecast(volttron_instance, cleanup_cache, weather, else: results = cache_location_data.get("weather_error") if results.startswith("Remote API returned no data") \ - or results.startswith("Remote API redirected request, but " - "redirect failed") \ - or results.startswith("Remote API returned invalid " - "response") \ - or results.startswith("API request failed with unexpected " - "response"): + or results.startswith("Remote API redirected request, but redirect failed") \ + or results.startswith("Remote API returned invalid response") \ + or results.startswith("API request failed with unexpected response"): assert True else: assert False for service_name, records_amount in services.items(): if not service_name == service: - query = 'SELECT COUNT(*) FROM {service}'.format( - service=service_name) + query = 'SELECT COUNT(*) FROM {service}'.format(service=service_name) cursor.execute(query) num_records = cursor.fetchone()[0] if identity == 'platform.darksky_perf': @@ -491,6 +452,7 @@ def test_success_forecast(volttron_instance, cleanup_cache, weather, else: assert num_records is records_amount*len(locations) + @pytest.mark.parametrize("locations, service", [ ([{"lat": 39.7555, "long": -105.2211}], 'get_minutely_forecast'), ([{"lat": 39.7555, "long": -105.2211}, {"lat": 46.2804, "long": -119.2752}], @@ -503,20 +465,16 @@ def test_success_forecast(volttron_instance, cleanup_cache, weather, 'get_hourly_forecast'), ]) @pytest.mark.darksky -def test_less_than_default_forecast(volttron_instance, cleanup_cache, weather, - query_agent, locations, service): +def test_less_than_default_forecast(volttron_instance, cleanup_cache, weather, query_agent, locations, service): query_data = [] cache_data = [] identity = weather[1] if service == 'get_minutely_forecast': - query_data = query_agent.vip.rpc.call( - identity, service, locations, minutes=2).get(timeout=30) + query_data = query_agent.vip.rpc.call(identity, service, locations, minutes=2).get(timeout=30) elif service == 'get_hourly_forecast': - query_data = query_agent.vip.rpc.call( - identity, service, locations, hours=2).get(timeout=30) + query_data = query_agent.vip.rpc.call(identity, service, locations, hours=2).get(timeout=30) elif service == 'get_daily_forecast': - query_data = query_agent.vip.rpc.call( - identity, service, locations, days=2).get(timeout=30) + query_data = query_agent.vip.rpc.call(identity, service, locations, days=2).get(timeout=30) else: pytest.fail('invalid request type') if query_data[0].get("weather_error"): @@ -530,14 +488,11 @@ def test_less_than_default_forecast(volttron_instance, cleanup_cache, weather, assert len(record['weather_results']) == 2 if service == 'get_minutely_forecast': - cache_data = query_agent.vip.rpc.call( - identity, service, locations, minutes=2).get(timeout=30) + cache_data = query_agent.vip.rpc.call(identity, service, locations, minutes=2).get(timeout=30) elif service == 'get_hourly_forecast': - cache_data = query_agent.vip.rpc.call( - identity, service, locations, hours=2).get(timeout=30) + cache_data = query_agent.vip.rpc.call(identity, service, locations, hours=2).get(timeout=30) elif service == 'get_daily_forecast': - cache_data = query_agent.vip.rpc.call( - identity, service, locations, days=2).get(timeout=30) + cache_data = query_agent.vip.rpc.call(identity, service, locations, days=2).get(timeout=30) assert len(cache_data) == len(query_data) for x in range(0, len(cache_data)): @@ -545,12 +500,9 @@ def test_less_than_default_forecast(volttron_instance, cleanup_cache, weather, print(query_location_data) cache_location_data = cache_data[x] print(cache_location_data) - assert cache_location_data.get( - "generation_time") == query_location_data.get("generation_time") - assert cache_location_data.get("lat") == query_location_data.get( - "lat") - assert cache_location_data.get("long") == query_location_data.get( - "long") + assert cache_location_data.get("generation_time") == query_location_data.get("generation_time") + assert cache_location_data.get("lat") == query_location_data.get("lat") + assert cache_location_data.get("long") == query_location_data.get("long") if cache_location_data.get("weather_results"): query_weather_results = query_location_data.get("weather_results") @@ -564,6 +516,7 @@ def test_less_than_default_forecast(volttron_instance, cleanup_cache, weather, for key in cache_result[1]: assert cache_result[1][key] == result[1][key] + @pytest.mark.parametrize("locations, service", [ ([{"lat": 39.7555, "long": -105.2211}], 'get_minutely_forecast'), ([{"lat": 39.7555, "long": -105.2211}, {"lat": 46.2804, "long": -119.2752}], @@ -576,26 +529,22 @@ def test_less_than_default_forecast(volttron_instance, cleanup_cache, weather, 'get_hourly_forecast'), ]) @pytest.mark.darksky -def test_more_than_default_forecast(volttron_instance, cleanup_cache, weather, - query_agent, locations, service): +def test_more_than_default_forecast(volttron_instance, cleanup_cache, weather, query_agent, locations, service): identity = weather[1] big_request = 0 query_data = [] cache_data = [] if service == 'get_minutely_forecast': big_request = 61 - query_data = query_agent.vip.rpc.call( - identity, service, locations, minutes=big_request).get(timeout=30) + query_data = query_agent.vip.rpc.call(identity, service, locations, minutes=big_request).get(timeout=30) if big_request > 60: big_request = 60 # dark sky provides 60 minutes max. elif service == 'get_hourly_forecast': big_request = 50 - query_data = query_agent.vip.rpc.call( - identity, service, locations, hours=big_request).get(timeout=30) + query_data = query_agent.vip.rpc.call(identity, service, locations, hours=big_request).get(timeout=30) elif service == 'get_daily_forecast': big_request = 9 - query_data = query_agent.vip.rpc.call( - identity, service, locations, days=big_request).get(timeout=30) + query_data = query_agent.vip.rpc.call(identity, service, locations, days=big_request).get(timeout=30) else: pytest.fail('invalid request type') if query_data[0].get("weather_error"): @@ -607,14 +556,11 @@ def test_more_than_default_forecast(volttron_instance, cleanup_cache, weather, assert len(record['weather_results']) == big_request if service == 'get_minutely_forecast': - cache_data = query_agent.vip.rpc.call( - identity, service, locations, minutes=big_request).get(timeout=30) + cache_data = query_agent.vip.rpc.call(identity, service, locations, minutes=big_request).get(timeout=30) elif service == 'get_hourly_forecast': - cache_data = query_agent.vip.rpc.call( - identity, service, locations, hours=big_request).get(timeout=30) + cache_data = query_agent.vip.rpc.call(identity, service, locations, hours=big_request).get(timeout=30) elif service == 'get_daily_forecast': - cache_data = query_agent.vip.rpc.call( - identity, service, locations, days=big_request).get(timeout=30) + cache_data = query_agent.vip.rpc.call(identity, service, locations, days=big_request).get(timeout=30) assert len(cache_data) == len(query_data) print("Query data: \n {}".format(query_data)) @@ -625,12 +571,9 @@ def test_more_than_default_forecast(volttron_instance, cleanup_cache, weather, for x in range(0, len(cache_data)): query_location_data = query_data[x] cache_location_data = cache_data[x] - assert cache_location_data.get( - "generation_time") == query_location_data.get("generation_time") - assert cache_location_data.get("lat") == query_location_data.get( - "lat") - assert cache_location_data.get("long") == query_location_data.get( - "long") + assert cache_location_data.get("generation_time") == query_location_data.get("generation_time") + assert cache_location_data.get("lat") == query_location_data.get("lat") + assert cache_location_data.get("long") == query_location_data.get("long") if cache_location_data.get("weather_results"): query_weather_results = query_location_data.get("weather_results") @@ -659,8 +602,7 @@ def test_more_than_default_forecast(volttron_instance, cleanup_cache, weather, @pytest.mark.darksky def test_forecast_fail(weather, query_agent, locations, service): identity = weather[1] - query_data = query_agent.vip.rpc.call(identity, service, - locations).get(timeout=30) + query_data = query_agent.vip.rpc.call(identity, service, locations).get(timeout=30) for record in query_data: error = record.get("weather_error") if error.startswith("Invalid location format."): @@ -672,7 +614,6 @@ def test_forecast_fail(weather, query_agent, locations, service): assert record.get("weather_results") is None -@pytest.mark.darksky @pytest.mark.parametrize('config, result_topics', [ ({'poll_locations': [{"lat": 39.7555, "long": -105.2211}, {"lat": 46.2804, "long": 119.2752}], @@ -688,18 +629,25 @@ def test_forecast_fail(weather, query_agent, locations, service): }, ['weather/poll/current/test1', 'weather/poll/current/test2']) ]) -def test_polling_locations_valid_config(volttron_instance, query_agent, config, - result_topics): +@pytest.mark.darksky +def test_polling_locations_valid_config(volttron_instance, query_agent, config, result_topics): agent_uuid = None - query_agent.poll_callback.reset_mock() try: agent_uuid = volttron_instance.install_agent( vip_identity="poll.weather", agent_dir=get_services_core("Darksky"), - start=False, + start=True, config_file=config) - volttron_instance.start_agent(agent_uuid) - gevent.sleep(3) + + # wait for the agent to start up + gevent.sleep(1) + + # make sure we don't have any existing callback args + query_agent.poll_callback.reset_mock() + + # wait for the duration of the update interval + gevent.sleep(config.get("poll_interval")) + print(query_agent.poll_callback.call_args_list) assert len(result_topics) == query_agent.poll_callback.call_count assert "poll.weather" == query_agent.poll_callback.call_args[0][1] @@ -718,10 +666,63 @@ def test_polling_locations_valid_config(volttron_instance, query_agent, config, assert isinstance(results1, list) assert len(results1) == len(config["poll_locations"]) i = i + 1 - assert query_agent.vip.rpc.call( - "poll.weather", "health.get_status").get(timeout=10).get( - 'status') == STATUS_GOOD + assert query_agent.vip.rpc.call("poll.weather", "health.get_status").get(timeout=10).get('status') == \ + STATUS_GOOD finally: if agent_uuid: volttron_instance.stop_agent(agent_uuid) volttron_instance.remove_agent(agent_uuid) + + +@pytest.mark.darksky +def test_default_config(volttron_instance, query_agent, cleanup_cache): + """ + Test the default configuration file included with the agent + """ + locations = [{"lat": 39.7555, "long": -105.2211}] + + publish_agent = volttron_instance.build_agent(identity="test_agent") + gevent.sleep(1) + + config_path = os.path.join(get_services_core("Darksky"), "config") + with open(config_path, "r") as config_file: + config_json = json.load(config_file) + assert isinstance(config_json, dict) + + config_json["api_key"] = API_KEY + + volttron_instance.install_agent( + agent_dir=get_services_core("Darksky"), + config_file=config_json, + start=True, + vip_identity="health_test") + assert publish_agent.vip.rpc.call("health_test", "health.get_status").get(timeout=10).get('status') == STATUS_GOOD + + query_data = query_agent.vip.rpc.call("health_test", 'get_current_weather', locations).get(timeout=30) + + if query_data[0].get("weather_error"): + error = query_data[0].get("weather_error") + if error.endswith("Remote API returned Code 403"): + pytest.skip("API key has exceeded daily call limit") + + print(query_data) + + assert len(query_data) == len(locations) + for record in query_data: + # check format here + assert record.get("observation_time") + assert (record.get("lat") and record.get("long")) + results = record.get("weather_results") + if results: + assert isinstance(results, dict) + assert "data" not in results + assert results["attribution"] == "Powered by Dark Sky" + else: + results = record.get("weather_error") + if results.startswith("Remote API returned no data") or \ + results.startswith("Remote API redirected request, but redirect failed") \ + or results.startswith("Remote API returned invalid response") \ + or results.startswith("API request failed with unexpected response"): + assert True + else: + assert False diff --git a/services/core/DataMover/README.md b/services/core/DataMover/README.md new file mode 100644 index 0000000000..fd03c11da3 --- /dev/null +++ b/services/core/DataMover/README.md @@ -0,0 +1,52 @@ +# DataMover Historian + +The DataMover Historian is used to send data from one instance of +VOLTTRON to another. This agent is similar to the Forward Historian but +does not publish data on the target platform\'s message bus. Messages +are instead inserted into the backup queue in the target\'s historian. +This helps to ensure that messages are recorded. + +If the target instance becomes unavailable or the target historian is +stopped then this agent\'s cache will build up until it reaches it\'s +maximum capacity or the instance and agent comes back online. + +The DataMover now uses the configuration store for storing its +configurations. This allows dynamic updating of configuration without +having to rebuild the agent. + +## Configuration Options + +The following JSON configuration file shows all the options currently +supported by the DataMover agent. + +``` {.python} +{ + # destination-serverkey + # The destination instance's publickey. Required if the + # destination-vip-address has not been added to the known-host file. + # See vctl auth --help for all instance security options. + # + # This can be retrieved either through the command: + # vctl auth serverkey + # Or if the web is enabled on the destination through the browser at: + # http(s)://hostaddress:port/discovery/ + "destination-serverkey": null, + + # destination-vip-address - REQUIRED + # Address of the target platform. + # Examples: + # "destination-vip": "ipc://@/home/volttron/.volttron/run/vip.socket" + # "destination-vip": "tcp://127.0.0.1:23916" + "destination-vip": "tcp://:", + + # destination_historian_identity + # Identity of the historian to send data to. Only needed if data + # should be sent an agent other than "platform.historian" + "destination-historian-identity": "platform.historian", + + # remote_identity - OPTIONAL + # identity that will show up in peers list on the remote platform + # By default this identity is randomly generated + "remote-identity": "22916.datamover" +} +``` diff --git a/services/core/DataMover/README.rst b/services/core/DataMover/README.rst deleted file mode 100644 index 0674bd3794..0000000000 --- a/services/core/DataMover/README.rst +++ /dev/null @@ -1,58 +0,0 @@ -.. _DataMover_Historian - -=================== -DataMover Historian -=================== - -The DataMover Historian is used to send data from one instance of VOLTTRON to -another. This agent is similar to the Forward Historian but does not publish -data on the target platform's message bus. Messages are instead inserted into -the backup queue in the target's historian. This helps to ensure that messages -are recorded. - -If the target instance becomes unavailable or the target historian is stopped -then this agent's cache will build up until it reaches it's maximum capacity -or the instance and agent comes back online. - -The DataMover now uses the configuration store for storing its -configurations. This allows dynamic updating of configuration without having -to rebuild the agent. - -Configuration Options ---------------------- - -The following JSON configuration file shows all the options currently supported -by the DataMover agent. - -.. code-block:: python - - { - # destination-serverkey - # The destination instance's publickey. Required if the - # destination-vip-address has not been added to the known-host file. - # See vctl auth --help for all instance security options. - # - # This can be retrieved either through the command: - # vctl auth serverkey - # Or if the web is enabled on the destination through the browser at: - # http(s)://hostaddress:port/discovery/ - "destination-serverkey": null, - - # destination-vip-address - REQUIRED - # Address of the target platform. - # Examples: - # "destination-vip": "ipc://@/home/volttron/.volttron/run/vip.socket" - # "destination-vip": "tcp://127.0.0.1:23916" - "destination-vip": "tcp://:", - - # destination_historian_identity - # Identity of the historian to send data to. Only needed if data - # should be sent an agent other than "platform.historian" - "destination-historian-identity": "platform.historian", - - # remote_identity - OPTIONAL - # identity that will show up in peers list on the remote platform - # By default this identity is randomly generated - "remote-identity": "22916.datamover" - } - diff --git a/services/core/DataMover/datamover/agent.py b/services/core/DataMover/datamover/agent.py index a8f529bba1..e1507c15c1 100755 --- a/services/core/DataMover/datamover/agent.py +++ b/services/core/DataMover/datamover/agent.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- {{{ # vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et: # -# Copyright 2019, Battelle Memorial Institute. +# Copyright 2020, Battelle Memorial Institute. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/services/core/DataMover/setup.py b/services/core/DataMover/setup.py index e231183a9f..65bf2b0a14 100644 --- a/services/core/DataMover/setup.py +++ b/services/core/DataMover/setup.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- {{{ # vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et: # -# Copyright 2019, Battelle Memorial Institute. +# Copyright 2020, Battelle Memorial Institute. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/services/core/DataMover/tests/test_datamover.py b/services/core/DataMover/tests/test_datamover.py index ab251f2164..581a1094bd 100644 --- a/services/core/DataMover/tests/test_datamover.py +++ b/services/core/DataMover/tests/test_datamover.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- {{{ # vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et: # -# Copyright 2019, Battelle Memorial Institute. +# Copyright 2020, Battelle Memorial Institute. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -35,12 +35,14 @@ # BATTELLE for the UNITED STATES DEPARTMENT OF ENERGY # under Contract DE-AC05-76RL01830 # }}} + import os +import json import random -from datetime import datetime import gevent import pytest from pytest import approx +from datetime import datetime from volttron.platform import get_services_core from volttron.platform.agent import utils @@ -48,6 +50,7 @@ from volttron.platform.messaging import topics from volttron.platform.vip.agent import Agent from volttron.platform.keystore import KnownHostsStore +from volttron.platform.messaging.health import STATUS_GOOD datamover_uuid = None datamover_config = { @@ -124,7 +127,6 @@ def sqlhistorian(request, volttron_instances): @pytest.fixture(scope="module") def forwarder(request, volttron_instances): global volttron_instance1, volttron_instance2 - global datamover_uuid, datamover_config # 1. Update destination address in forwarder configuration @@ -167,12 +169,10 @@ def test_devices_topic(publish_agent, query_agent): another instance. Test if topic name substitutions happened. Publish to 'devices/PNNL/BUILDING_1/Device/all' in volttron_instance1 and query for topic 'devices/PNNL/BUILDING1_ANON/Device/all' in volttron_instance2 - :param publish_agent: Fake agent used to publish messages to bus in volttron_instance1. Calling this fixture makes sure all the dependant fixtures are called to setup and start volttron_instance1 and forwareder agent and returns the instance of fake agent to publish - :param query_agent: Fake agent used to query sqlhistorian in volttron_instance2. Calling this fixture makes sure all the dependant fixtures are called to setup and start volttron_instance2 and sqlhistorian @@ -217,12 +217,10 @@ def test_record_topic(publish_agent, query_agent): """ Test if record topic message is getting forwarded to historian running on another instance. - :param publish_agent: Fake agent used to publish messages to bus in volttron_instance1. Calling this fixture makes sure all the dependant fixtures are called to setup and start volttron_instance1 and forwareder agent and returns the instance of fake agent to publish - :param query_agent: Fake agent used to query sqlhistorian in volttron_instance2. Calling this fixture makes sure all the dependant fixtures are called to setup and start volttron_instance2 and sqlhistorian @@ -275,12 +273,10 @@ def test_record_topic_no_header(publish_agent, query_agent): """ Test if record topic message is getting forwarded to historian running on another instance. - :param publish_agent: Fake agent used to publish messages to bus in volttron_instance1. Calling this fixture makes sure all the dependant fixtures are called to setup and start volttron_instance1 and forwareder agent and returns the instance of fake agent to publish - :param query_agent: Fake agent used to query sqlhistorian in volttron_instance2. Calling this fixture makes sure all the dependant fixtures are called to setup and start volttron_instance2 and sqlhistorian @@ -323,12 +319,10 @@ def test_analysis_topic(publish_agent, query_agent): 'analysis/PNNL/BUILDING_1/Device/MixedAirTemperature' in volttron_instance1 and query for topic 'PNNL/BUILDING1_ANON/Device/MixedAirTemperature' in volttron_instance2 - :param publish_agent: Fake agent used to publish messages to bus in volttron_instance1. Calling this fixture makes sure all the dependant fixtures are called to setup and start volttron_instance1 and forwareder agent and returns the instance of fake agent to publish - :param query_agent: Fake agent used to query sqlhistorian in volttron_instance2. Calling this fixture makes sure all the dependant fixtures are called to setup and start volttron_instance2 and sqlhistorian @@ -391,12 +385,10 @@ def test_analysis_topic_no_header(publish_agent, query_agent): 'analysis/PNNL/BUILDING_1/Device/MixedAirTemperature' in volttron_instance1 and query for topic 'PNNL/BUILDING1_ANON/Device/MixedAirTemperature' in volttron_instance2 - :param publish_agent: Fake agent used to publish messages to bus in volttron_instance1. Calling this fixture makes sure all the dependant fixtures are called to setup and start volttron_instance1 and forwareder agent and returns the instance of fake agent to publish - :param query_agent: Fake agent used to query sqlhistorian in volttron_instance2. Calling this fixture makes sure all the dependant fixtures are called to setup and start volttron_instance2 and sqlhistorian @@ -457,13 +449,10 @@ def test_log_topic(publish_agent, query_agent): Record should get entered into database with current time at time of insertion and should ignore timestamp in header. Topic name substitution should have happened - - :param publish_agent: Fake agent used to publish messages to bus in volttron_instance1. Calling this fixture makes sure all the dependant fixtures are called to setup and start volttron_instance1 and forwareder agent and returns the instance of fake agent to publish - :param query_agent: Fake agent used to query sqlhistorian in volttron_instance2. Calling this fixture makes sure all the dependant fixtures are called to setup and start volttron_instance2 and sqlhistorian @@ -517,12 +506,10 @@ def test_log_topic_no_header(publish_agent, query_agent): query for topic 'datalogger/PNNL/BUILDING1_ANON/Device/MixedAirTemperature' in volttron_instance2 - :param publish_agent: Fake agent used to publish messages to bus in volttron_instance1. Calling this fixture makes sure all the dependant fixtures are called to setup and start volttron_instance1 and forwareder agent and returns the instance of fake agent to publish - :param query_agent: Fake agent used to query sqlhistorian in volttron_instance2. Calling this fixture makes sure all the dependant fixtures are called to setup and start volttron_instance2 and sqlhistorian @@ -581,3 +568,22 @@ def test_old_config(volttron_instances, forwarder): print("data_mover agent id: ", uuid) +@pytest.mark.historian +@pytest.mark.forwarder +def test_default_config(volttron_instances): + """ + Test the default configuration file included with the agent + """ + publish_agent = volttron_instance1.build_agent(identity="test_agent") + gevent.sleep(1) + + config_path = os.path.join(get_services_core("DataMover"), "config") + with open(config_path, "r") as config_file: + config_json = json.load(config_file) + assert isinstance(config_json, dict) + volttron_instance1.install_agent( + agent_dir=get_services_core("DataMover"), + config_file=config_json, + start=True, + vip_identity="health_test") + assert publish_agent.vip.rpc.call("health_test", "health.get_status").get(timeout=10).get('status') == STATUS_GOOD diff --git a/services/core/ExternalData/README.md b/services/core/ExternalData/README.md new file mode 100644 index 0000000000..201cdd3454 --- /dev/null +++ b/services/core/ExternalData/README.md @@ -0,0 +1,111 @@ +External Data Agent +=================== +This agent gathers and publishes JSON data available via a web api + +Configuration +------------- + +The following is an example configuration file for the External Data Agent: + + { + #Interval at which to scrape the sources. + "interval":300, + + #Global topic prefix for all publishes. + "global_topic_prefix": "record", + + #Default user name and password if all sources require the same + #credentials. Can be overridden in individual sources. + #"default_user":"my_user_name", + #"default_password" : "my_password", + + "sources": + [ + { + #Valid types are "csv", "json", and "raw" + #Defaults to "raw" + "type": "csv", + #Source URL for CSV data. + "url": "https://example.com/example", + + #URL parameters for data query (optional). + # See https://en.wikipedia.org/wiki/Query_string + "params": {"period": "currentinterval", + "format": "csv"}, + + #Topic to publish on. + "topic": "example/examplecsvdata1", + + #Column used to break rows in CSV out into separate publishes. + #The key will be removed from the row data and appended to the end + # of the publish topic. + # If this option is missing the entire CSV will be published as a list + # of objects. + #If the column does not exist nothing will be published. + "key": "Key Column", + + #Attempt to parse these columns in the data into numeric types. + #Currently columns are parsed with ast.literal_eval() + #Values that fail to parse are left as strings unless the + # values is an empty string. Empty strings are changed to None. + "parse": ["Col1", "Col2"], + + #Source specific authentication. + "user":"username", + "password" : "password" + }, + { + #Valid types are "csv", "json", and "raw" + #Defaults to "raw" + "type": "csv", + #Source URL for CSV data. + "url": "https://example.com/example_flat", + + #URL parameters for data query (optional). + # See https://en.wikipedia.org/wiki/Query_string + "params": {"format": "csv"}, + + #Topic to publish on. (optional) + "topic": "example/examplecsvdata1", + + #If the rows in a csv represent key/value pairs use this + #setting to reduce this format to a single object for publishing. + "flatten": true, + + #Attempt to parse these columns in the data into numeric types. + #Currently columns are parsed with ast.literal_eval() + #Values that fail to parse are left as strings unless the + # values is an empty string. Empty strings are changed to None. + "parse": ["Col1", "Col2"] + }, + { + #Valid types are "csv", "json", and "raw" + #Defaults to "raw" + "type": "json", + #Source URL for JSON data. + "url": "https://example.com/api/example1", + + #URL parameters for data query (optional) + # See https://en.wikipedia.org/wiki/Query_string + "params": {"format": "json"}, + + #Topic to publish on. (optional) + "topic": "example/exampledata1", + + #Path to desired data withing the JSON. Optional. + #Elements in a path may be either a string or an integer. + #Useful for peeling off unneeded layers around the wanted data. + "path": ["parentobject", "0"], + + #After resolving the path above if the resulting data is a list + # the key is the path to a value in a list item. Each item in the list + # is published separately with the key appended to the end of the topic. + # Elements in a key may be a string or an integer. (optional) + "key": ["Location", "$"], + + #Source specific authentication. + "user":"username", + "password" : "password" + } + ] +} diff --git a/services/core/ExternalData/config b/services/core/ExternalData/config index 918b613681..02db8638dc 100644 --- a/services/core/ExternalData/config +++ b/services/core/ExternalData/config @@ -1,100 +1,34 @@ { - #Interval at which to scrape the sources. "interval":300, - - #Global topic prefix for all publishes. "global_topic_prefix": "record", - - #Default user name and password if all sources require the same - #credentials. Can be overridden in individual sources. - #"default_user":"my_user_name", - #"default_password" : "my_password", - "sources": [ { - #Valid types are "csv", "json", and "raw" - #Defaults to "raw" "type": "csv", - #Source URL for CSV data. "url": "https://example.com/example", - - #URL parameters for data query (optional). - # See https://en.wikipedia.org/wiki/Query_string "params": {"period": "currentinterval", "format": "csv"}, - - #Topic to publish on. "topic": "example/examplecsvdata1", - - #Column used to break rows in CSV out into separate publishes. - #The key will be removed from the row data and appended to the end - # of the publish topic. - # If this option is missing the entire CSV will be published as a list - # of objects. - #If the column does not exist nothing will be published. "key": "Key Column", - - #Attempt to parse these columns in the data into numeric types. - #Currently columns are parsed with ast.literal_eval() - #Values that fail to parse are left as strings unless the - # values is an empty string. Empty strings are changed to None. "parse": ["Col1", "Col2"], - - #Source specific authentication. "user":"username", "password" : "password" }, { - #Valid types are "csv", "json", and "raw" - #Defaults to "raw" "type": "csv", - #Source URL for CSV data. "url": "https://example.com/example_flat", - - #URL parameters for data query (optional). - # See https://en.wikipedia.org/wiki/Query_string "params": {"format": "csv"}, - - #Topic to publish on. (optional) "topic": "example/examplecsvdata1", - - #If the rows in a csv represent key/value pairs use this - #setting to reduce this format to a single object for publishing. "flatten": true, - - #Attempt to parse these columns in the data into numeric types. - #Currently columns are parsed with ast.literal_eval() - #Values that fail to parse are left as strings unless the - # values is an empty string. Empty strings are changed to None. "parse": ["Col1", "Col2"] }, { - #Valid types are "csv", "json", and "raw" - #Defaults to "raw" "type": "json", - #Source URL for JSON data. "url": "https://example.com/api/example1", - - #URL parameters for data query (optional) - # See https://en.wikipedia.org/wiki/Query_string "params": {"format": "json"}, - - #Topic to publish on. (optional) "topic": "example/exampledata1", - - #Path to desired data withing the JSON. Optional. - #Elements in a path may be either a string or an integer. - #Useful for peeling off unneeded layers around the wanted data. "path": ["parentobject", "0"], - - #After resolving the path above if the resulting data is a list - # the key is the path to a value in a list item. Each item in the list - # is published separately with the key appended to the end of the topic. - # Elements in a key may be a string or an integer. (optional) "key": ["Location", "$"], - - #Source specific authentication. "user":"username", "password" : "password" } diff --git a/services/core/ExternalData/external_data/agent.py b/services/core/ExternalData/external_data/agent.py index f242cc09fa..0a2ba4fd41 100644 --- a/services/core/ExternalData/external_data/agent.py +++ b/services/core/ExternalData/external_data/agent.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- {{{ # vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et: # -# Copyright 2019, Battelle Memorial Institute. +# Copyright 2020, Battelle Memorial Institute. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -39,9 +39,9 @@ import logging import sys import csv +import requests from ast import literal_eval from io import StringIO -import requests from requests.auth import HTTPBasicAuth from volttron.platform.vip.agent import Agent @@ -307,13 +307,11 @@ def _handle_raw(self, headers, request, url, source_topic, source_params): def main(argv=sys.argv): - """ - Main method called by the eggsecutable. - """ + """Main method called by the eggsecutable.""" try: utils.vip_main(external_data_agent) except Exception as e: - _log.exception('unhandled exception') + _log.exception('unhandled exception: {}'.format(e)) if __name__ == '__main__': diff --git a/services/core/ExternalData/setup.py b/services/core/ExternalData/setup.py index eaf3948ea7..cc64769bff 100644 --- a/services/core/ExternalData/setup.py +++ b/services/core/ExternalData/setup.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- {{{ # vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et: # -# Copyright 2019, Battelle Memorial Institute. +# Copyright 2020, Battelle Memorial Institute. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/services/core/ForwardHistorian/README.md b/services/core/ForwardHistorian/README.md new file mode 100644 index 0000000000..6d29281ed6 --- /dev/null +++ b/services/core/ForwardHistorian/README.md @@ -0,0 +1,106 @@ +# Forward Historian + + +The Forward Historian is used to send data from one instance of VOLTTRON +to another. This agents primary purpose is to allow the target +instance\'s pubsub bus to simulate data coming from a real device. If +the target instance becomes unavailable or one of the \"required +agents\" becomes unavailable then the cache of this agent will build up +until it reaches it\'s maximum capacity or the instance and agents come +back online. + +The Forward Historian now uses the configuration store for storing its +configurations. This allows dynamic updating of configuration without +having to rebuild the agent. + +## FAQ /Notes + +By default the Forward Historian adds an X-Forwarded and +X-Forwarded-From header to the forwarded message. The X-Forwarded-From +uses the instance-name of the platform (ip address:port by default). + +## Configuration Options + +The following JSON configuration file shows all the options currently +supported by the ForwardHistorian agent. + +``` {.python} +{ + # destination-serverkey + # The destination instance's publickey. Required if the + # destination-vip-address has not been added to the known-host file. + # See vctl auth --help for all instance security options. + # + # This can be retrieved either through the command: + # vctl auth serverkey + # Or if the web is enabled on the destination through the browser at: + # http(s)://hostaddress:port/discovery/ + "destination-serverkey": null, + + # destination-vip-address - REQUIRED + # Address of the target platform. + # Examples: + # "destination-vip": "ipc://@/home/volttron/.volttron/run/vip.socket" + # "destination-vip": "tcp://127.0.0.1:22916" + "destination-vip": "tcp://:" + + # required_target_agents + # Allows checking on the remote instance to verify peer identtites + # are connected before publishing. + # + # Example: + # Require the platform.historian agent to be present on the + # destination instance before publishing. + # "required_target_agent" ["platform.historian"] + "required_target_agents": [], + + # capture_device_data + # This is True by default and allows the Forwarder to forward + # data published from the device topic + "capture_device_data": true, + + # capture_analysis_data + # This is True by default and allows the Forwarder to forward + # data published from the device topic + "capture_analysis_data": true, + + # capture_log_data + # This is True by default and allows the Forwarder to forward + # data published from the datalogger topic + "capture_log_data": true, + + # capture_record_data + # This is True by default and allows the Forwarder to forward + # data published from the record topic + "capture_record_data": true, + + # custom_topic_list + # Unlike other historians, the forward historian can re-publish from + # any topic. The custom_topic_list is prefixes to subscribe to on + # the local bus and forward to the destination instance. + "custom_topic_list": ["actuator", "alert"], + + # cache_only + # Allows one to put the forward historian in a cache only mode so that + # data is backed up while doing operations on the destination + # instance. + # + # Setting this to true will start cache to backup and not attempt + # to publish to the destination instance. + "cache_only": false, + + # topic_replace_list - Deprecated in favor of retrieving the list of + # replacements from the VCP on the current instance. + "topic_replace_list": [ + #{"from": "FromString", "to": "ToString"} + ], + + # Publish a message to the log after a certain number of "successful" + # publishes. To disable the message to not print anything set the + # count to 0. + # + # Note "successful" means that it was removed from the backup cache. + "message_publish_count": 10000 + +} +``` diff --git a/services/core/ForwardHistorian/README.rst b/services/core/ForwardHistorian/README.rst deleted file mode 100644 index 1f9449bee9..0000000000 --- a/services/core/ForwardHistorian/README.rst +++ /dev/null @@ -1,110 +0,0 @@ -.. _Forward_Historian - -================= -Forward Historian -================= - -The Forward Historian is used to send data from one instance of VOLTTRON to -another. This agents primary purpose is to allow the target instance's pubsub -bus to simulate data coming from a real device. If the target instance -becomes unavailable or one of the "required agents" becomes unavailable then -the cache of this agent will build up until it reaches it's maximum capacity -or the instance and agents come back online. - -The Forward Historian now uses the configuration store for storing its -configurations. This allows dynamic updating of configuration without having -to rebuild the agent. - -FAQ /Notes ----------- - -* By default the Forward Historian adds an X-Forwarded and X-Forwarded-From -header to the forwarded message. The X-Forwarded-From uses the instance-name -of the platform (ip address:port by default). - -Configuration Options ---------------------- - -The following JSON configuration file shows all the options currently supported -by the ForwardHistorian agent. - -.. code-block:: python - - { - # destination-serverkey - # The destination instance's publickey. Required if the - # destination-vip-address has not been added to the known-host file. - # See vctl auth --help for all instance security options. - # - # This can be retrieved either through the command: - # vctl auth serverkey - # Or if the web is enabled on the destination through the browser at: - # http(s)://hostaddress:port/discovery/ - "destination-serverkey": null, - - # destination-vip-address - REQUIRED - # Address of the target platform. - # Examples: - # "destination-vip": "ipc://@/home/volttron/.volttron/run/vip.socket" - # "destination-vip": "tcp://127.0.0.1:22916" - "destination-vip": "tcp://:" - - # required_target_agents - # Allows checking on the remote instance to verify peer identtites - # are connected before publishing. - # - # Example: - # Require the platform.historian agent to be present on the - # destination instance before publishing. - # "required_target_agent" ["platform.historian"] - "required_target_agents": [], - - # capture_device_data - # This is True by default and allows the Forwarder to forward - # data published from the device topic - "capture_device_data": true, - - # capture_analysis_data - # This is True by default and allows the Forwarder to forward - # data published from the device topic - "capture_analysis_data": true, - - # capture_log_data - # This is True by default and allows the Forwarder to forward - # data published from the datalogger topic - "capture_log_data": true, - - # capture_record_data - # This is True by default and allows the Forwarder to forward - # data published from the record topic - "capture_record_data": true, - - # custom_topic_list - # Unlike other historians, the forward historian can re-publish from - # any topic. The custom_topic_list is prefixes to subscribe to on - # the local bus and forward to the destination instance. - "custom_topic_list": ["actuator", "alert"], - - # cache_only - # Allows one to put the forward historian in a cache only mode so that - # data is backed up while doing operations on the destination - # instance. - # - # Setting this to true will start cache to backup and not attempt - # to publish to the destination instance. - "cache_only": false, - - # topic_replace_list - Deprecated in favor of retrieving the list of - # replacements from the VCP on the current instance. - "topic_replace_list": [ - #{"from": "FromString", "to": "ToString"} - ], - - # Publish a message to the log after a certain number of "successful" - # publishes. To disable the message to not print anything set the - # count to 0. - # - # Note "successful" means that it was removed from the backup cache. - "message_publish_count": 10000 - - } diff --git a/services/core/ForwardHistorian/config_web_address b/services/core/ForwardHistorian/config_web_address new file mode 100644 index 0000000000..91aaf975f4 --- /dev/null +++ b/services/core/ForwardHistorian/config_web_address @@ -0,0 +1,4 @@ +{ + "destination-address": "https://centvolttron2:8443", + "custom_topic_list": ["heartbeat"] +} diff --git a/services/core/ForwardHistorian/forwarder/agent.py b/services/core/ForwardHistorian/forwarder/agent.py index 1faebf22f5..5ca36a1d5d 100644 --- a/services/core/ForwardHistorian/forwarder/agent.py +++ b/services/core/ForwardHistorian/forwarder/agent.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- {{{ # vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et: # -# Copyright 2019, Battelle Memorial Institute. +# Copyright 2020, Battelle Memorial Institute. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -66,10 +66,8 @@ def historian(config_path, **kwargs): custom_topic_list = config.pop('custom_topic_list', []) topic_replace_list = config.pop('topic_replace_list', []) destination_vip = config.pop('destination-vip', None) - destination_instance_name = config.pop('destination-instance-name', None) service_topic_list = config.pop('service_topic_list', None) destination_serverkey = None - # This will trigger rmq based forwarder try: destination_address = config.pop('destination-address') except KeyError: @@ -107,7 +105,6 @@ def historian(config_path, **kwargs): topic_replace_list=topic_replace_list, required_target_agents=required_target_agents, cache_only=cache_only, - destination_instance_name=destination_instance_name, destination_address=destination_address, **kwargs) @@ -123,7 +120,6 @@ def __init__(self, destination_vip, destination_serverkey, topic_replace_list=[], required_target_agents=[], cache_only=False, - destination_instance_name=None, destination_address=None, **kwargs): kwargs["process_loop_in_greenlet"] = True @@ -140,14 +136,12 @@ def __init__(self, destination_vip, destination_serverkey, self.destination_serverkey = destination_serverkey self.required_target_agents = required_target_agents self.cache_only = cache_only - self.destination_instance_name = destination_instance_name self.destination_address = destination_address config = { "custom_topic_list": custom_topic_list, "topic_replace_list": self.topic_replace_list, "required_target_agents": self.required_target_agents, "destination_vip": self.destination_vip, - "destination_instance_name": self.destination_instance_name, "destination_serverkey": self.destination_serverkey, "cache_only": self.cache_only, "destination_address": self.destination_address @@ -164,7 +158,6 @@ def configure(self, configuration): custom_topic_set = set(configuration.get('custom_topic_list', [])) self.destination_vip = str(configuration.get('destination_vip', "")) self.destination_serverkey = str(configuration.get('destination_serverkey', "")) - self.destination_instance_name = str(configuration.get('destination_instance_name', "")) self.required_target_agents = configuration.get('required_target_agents', []) self.topic_replace_list = configuration.get('topic_replace_list', []) self.cache_only = configuration.get('cache_only', False) @@ -302,7 +295,7 @@ def capture_data(self, peer, sender, bus, topic, headers, message): # if the topic wasn't changed then we don't forward anything for # it. if topic == original_topic: - _log.warn( + _log.warning( "Topic {} not published because not anonymized.".format(original_topic)) return @@ -351,7 +344,7 @@ def publish_to_historian(self, to_publish_list): except Unreachable: skip = "Skipping publish: Target platform not running " \ "required agent {}".format(vip_id) - _log.warn(skip) + _log.warning(skip) self.vip.health.set_status( STATUS_BAD, skip) return diff --git a/services/core/ForwardHistorian/setup.py b/services/core/ForwardHistorian/setup.py index 34a6208c0f..65bf2b0a14 100644 --- a/services/core/ForwardHistorian/setup.py +++ b/services/core/ForwardHistorian/setup.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- {{{ # vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et: # -# Copyright 2019, Battelle Memorial Institute. +# Copyright 2020, Battelle Memorial Institute. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -69,4 +69,4 @@ 'eggsecutable = ' + agent_module + ':main', ] } -) \ No newline at end of file +) diff --git a/services/core/ForwardHistorian/tests/test_forward_historian.py b/services/core/ForwardHistorian/tests/test_forward_historian.py index 9138084dcb..668ff9b98f 100644 --- a/services/core/ForwardHistorian/tests/test_forward_historian.py +++ b/services/core/ForwardHistorian/tests/test_forward_historian.py @@ -1,112 +1,681 @@ -from copy import deepcopy -from datetime import datetime -import random -import tempfile +# -*- coding: utf-8 -*- {{{ +# vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et: +# +# Copyright 2020, Battelle Memorial Institute. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# This material was prepared as an account of work sponsored by an agency of +# the United States Government. Neither the United States Government nor the +# United States Department of Energy, nor Battelle, nor any of their +# employees, nor any jurisdiction or organization that has cooperated in the +# development of these materials, makes any warranty, express or +# implied, or assumes any legal liability or responsibility for the accuracy, +# completeness, or usefulness or any information, apparatus, product, +# software, or process disclosed, or represents that its use would not infringe +# privately owned rights. Reference herein to any specific commercial product, +# process, or service by trade name, trademark, manufacturer, or otherwise +# does not necessarily constitute or imply its endorsement, recommendation, or +# favoring by the United States Government or any agency thereof, or +# Battelle Memorial Institute. The views and opinions of authors expressed +# herein do not necessarily state or reflect those of the +# United States Government or any agency thereof. +# +# PACIFIC NORTHWEST NATIONAL LABORATORY operated by +# BATTELLE for the UNITED STATES DEPARTMENT OF ENERGY +# under Contract DE-AC05-76RL01830 +# }}} +import random +import math +import os +import json import gevent import pytest from pytest import approx +from copy import deepcopy +from datetime import datetime from volttron.platform import get_services_core from volttron.platform.agent import utils -from volttron.platform import jsonapi - from volttron.platform.messaging import headers as headers_mod +from volttron.platform.vip.agent import Agent +from volttron.platform.messaging.health import STATUS_GOOD +from volttron.platform.keystore import KnownHostsStore -from volttrontesting.utils.platformwrapper import build_vip_address +# import types +DEVICES_ALL_TOPIC = "devices/Building/LAB/Device/all" -BASE_FORWARD_CONFIG = { - "agentid": "forwarder1", - "destination-vip": None -} -FORWARDER_CONFIG = { - "agentid": "forwarder", - "destination-vip": {}, +forwarder_uuid = None +forwarder_config = { + "destination-vip": "", "custom_topic_list": [], "topic_replace_list": [ {"from": "PNNL/BUILDING_1", "to": "PNNL/BUILDING1_ANON"} ] } - -# Module level variables -DEVICES_ALL_TOPIC = "devices/Building/LAB/Device/all" -query_points = { - "oat_point": "Building/LAB/Device/OutsideAirTemperature", - "mixed_point": "Building/LAB/Device/MixedAirTemperature", - "damper_point": "Building/LAB/Device/DamperSignal" +sqlite_config = { + "connection": { + "type": "sqlite", + "params": { + "database": 'test.sqlite' + } + } } +volttron_instance1 = None +volttron_instance2 = None + + +@pytest.fixture(scope="module") +def volttron_instances(request, get_volttron_instances): + global volttron_instance1, volttron_instance2 + + # if volttron_instance1 is None: + volttron_instance1, volttron_instance2 = get_volttron_instances(2) + + +# Fixture for setup and teardown of publish agent +@pytest.fixture(scope="module") +def publish_agent(request, volttron_instances, forwarder): + global volttron_instance1, volttron_instance2 + + # 1: Start a fake agent to publish to message bus + agent = volttron_instance1.build_agent(identity='test-agent') + + # 2: add a tear down method to stop sqlhistorian agent and the fake + # agent that published to message bus + def stop_agent(): + print("In teardown method of publish_agent") + if isinstance(agent, Agent): + agent.core.stop() + + request.addfinalizer(stop_agent) + return agent + + +@pytest.fixture(scope="module") +def query_agent(request, volttron_instances, sqlhistorian): + # 1: Start a fake agent to query the sqlhistorian in volttron_instance2 + agent = volttron_instance2.build_agent() + + # 2: add a tear down method to stop sqlhistorian agent and the fake + # agent that published to message bus + def stop_agent(): + print("In teardown method of module") + agent.core.stop() + + request.addfinalizer(stop_agent) + return agent + + +@pytest.fixture(scope="module") +def sqlhistorian(request, volttron_instances): + global volttron_instance1, volttron_instance2 + global sqlite_config + + # 1: Install historian agent + # Install and start sqlhistorian agent in instance2 + agent_uuid = volttron_instance2.install_agent( + agent_dir=get_services_core("SQLHistorian"), + config_file=sqlite_config, + start=True, + vip_identity='platform.historian') + print("sqlite historian agent id: ", agent_uuid) + + +@pytest.fixture(scope="module") +def forwarder(request, volttron_instances): + # print "Fixture forwarder" + global volttron_instance1, volttron_instance2 + + global forwarder_uuid, forwarder_config + # 1. Update destination address in forwarder configuration + + volttron_instance1.allow_all_connections() + volttron_instance2.allow_all_connections() + + # setup destination address to include keys + known_hosts_file = os.path.join(volttron_instance1.volttron_home, 'known_hosts') + known_hosts = KnownHostsStore(known_hosts_file) + known_hosts.add(volttron_instance2.vip_address, volttron_instance2.serverkey) + + forwarder_config["destination-vip"] = volttron_instance2.vip_address + forwarder_config["destination-serverkey"] = volttron_instance2.serverkey + + # 1: Install historian agent + # Install and start sqlhistorian agent in instance2 + forwarder_uuid = volttron_instance1.install_agent( + agent_dir=get_services_core("ForwardHistorian"), + config_file=forwarder_config, + start=True) + print("forwarder agent id: ", forwarder_uuid) -allforwardedmessage = [] -publishedmessages = [] +def publish(publish_agent, topic, header, message): + if isinstance(publish_agent, Agent): + publish_agent.vip.pubsub.publish('pubsub', + topic, + headers=header, + message=message).get(timeout=10) + else: + publish_agent.publish_json(topic, header, message) -def do_publish(agent1): - global publishedmessages - # Publish fake data. The format mimics the format used by VOLTTRON - # drivers. + +@pytest.mark.historian +@pytest.mark.forwarder +def test_devices_topic(publish_agent, query_agent): + """ + Test if devices topic message is getting forwarded to historian running on + another instance. Test if topic name substitutions happened. + Publish to 'devices/PNNL/BUILDING_1/Device/all' in volttron_instance1 and query + for topic 'devices/PNNL/BUILDING1_ANON/Device/all' in volttron_instance + @param publish_agent: Fake agent used to publish messages to bus in + volttron_instance1. Calling this fixture makes sure all the dependant + fixtures are called to setup and start volttron_instance1 and forwareder + agent and returns the instance of fake agent to publish + @param query_agent: Fake agent used to query sqlhistorian in + volttron_instance. Calling this fixture makes sure all the dependant + fixtures are called to setup and start volttron_instance and sqlhistorian + agent and returns the instance of a fake agent to query the historian + """ + print("\n** test_devices_topic **") + oat_reading = random.uniform(30, 100) + float_meta = {'units': 'F', 'tz': 'UTC', 'type': 'float'} + # Create a message for all points. + all_message = [{'OutsideAirTemperature': oat_reading}, + {'OutsideAirTemperature': float_meta}] + + # Publish messages twice + time1 = utils.format_timestamp(datetime.utcnow()) + headers = { + headers_mod.DATE: time1 + } + publish(publish_agent, 'devices/PNNL/BUILDING_1/Device/all', headers, all_message) + gevent.sleep(1) + + # Verify topic name replacement by querying the replaced topic name + # PNNL/BUILDING_1 should be replaced with PNNL/BUILDING1_ANON + result = query_agent.vip.rpc.call( + 'platform.historian', + 'query', + topic='PNNL/BUILDING1_ANON/Device/OutsideAirTemperature', + start=time1, + count=20, + order="LAST_TO_FIRST").get(timeout=10) + + assert (len(result['values']) == 1) + (time1_date, time1_time) = time1.split("T") + assert (result['values'][0][0] == time1_date + 'T' + time1_time + '+00:00') + assert (result['values'][0][1] == approx(oat_reading)) + assert set(result['metadata'].items()) == set(float_meta.items()) + + +@pytest.mark.historian +@pytest.mark.forwarder +def test_analysis_topic(publish_agent, query_agent): + """ + Test if devices topic message is getting forwarded to historian running on + another instance. Test if topic name substitutions happened. + Publish to topic + 'analysis/PNNL/BUILDING_1/Device/MixedAirTemperature' in volttron_instance1 and + query for topic + 'PNNL/BUILDING1_ANON/Device/MixedAirTemperature' in volttron_instance + @param publish_agent: Fake agent used to publish messages to bus in + volttron_instance1. Calling this fixture makes sure all the dependant + fixtures are called to setup and start volttron_instance1 and forwareder + agent and returns the instance of fake agent to publish + @param query_agent: Fake agent used to query sqlhistorian in + volttron_instance. Calling this fixture makes sure all the dependant + fixtures are called to setup and start volttron_instance and sqlhistorian + agent and returns the instance of a fake agent to query the historian + """ + print("\n** test_analysis_topic **") + # Publish fake data. The format mimics the format used by VOLTTRON drivers. # Make some random readings oat_reading = random.uniform(30, 100) mixed_reading = oat_reading + random.uniform(-5, 5) damper_reading = random.uniform(0, 100) - float_meta = {'units': 'F', 'tz': 'UTC', 'type': 'float'} - percent_meta = {'units': '%', 'tz': 'UTC', 'type': 'float'} - # Create a message for all points. all_message = [{'OutsideAirTemperature': oat_reading, 'MixedAirTemperature': mixed_reading, 'DamperSignal': damper_reading}, - {'OutsideAirTemperature': float_meta, - 'MixedAirTemperature': float_meta, - 'DamperSignal': percent_meta + {'OutsideAirTemperature': {'units': 'F', 'tz': 'UTC', + 'type': 'float'}, + 'MixedAirTemperature': {'units': 'F', 'tz': 'UTC', + 'type': 'float'}, + 'DamperSignal': {'units': '%', 'tz': 'UTC', + 'type': 'float'} }] # Create timestamp now = utils.format_timestamp(datetime.utcnow()) - - # now = '2015-12-02T00:00:00' + print("now is ", now) headers = { headers_mod.DATE: now, headers_mod.TIMESTAMP: now } - print("Published time in header: " + now) + # Publish messages + publish(publish_agent, 'analysis/PNNL/BUILDING_1/Device', headers, all_message) + gevent.sleep(0.5) + + # pytest.set_trace() + # Query the historian + result = query_agent.vip.rpc.call( + 'platform.historian', + 'query', + topic='PNNL/BUILDING1_ANON/Device/MixedAirTemperature', + start=now, + order="LAST_TO_FIRST").get(timeout=10) + print('Query Result', result) + assert (len(result['values']) == 1) + (now_date, now_time) = now.split("T") + if now_time[-1:] == 'Z': + now_time = now_time[:-1] + assert (result['values'][0][0] == now_date + 'T' + now_time + '+00:00') + assert (result['values'][0][1] == approx(mixed_reading)) + + +@pytest.mark.historian +@pytest.mark.forwarder +def test_analysis_topic_no_header(publish_agent, query_agent): + """ + Test if devices topic message is getting forwarded to historian running on + another instance. Test if topic name substitutions happened. + Publish to topic + 'analysis/PNNL/BUILDING_1/Device/MixedAirTemperature' in volttron_instance1 and + query for topic + 'PNNL/BUILDING1_ANON/Device/MixedAirTemperature' in volttron_instance + @param publish_agent: Fake agent used to publish messages to bus in + volttron_instance1. Calling this fixture makes sure all the dependant + fixtures are called to setup and start volttron_instance1 and forwareder + agent and returns the instance of fake agent to publish + @param query_agent: Fake agent used to query sqlhistorian in + volttron_instance. Calling this fixture makes sure all the dependant + fixtures are called to setup and start volttron_instance and sqlhistorian + agent and returns the instance of a fake agent to query the historian + """ + print("\n** test_analysis_topic **") + # Publish fake data. The format mimics the format used by VOLTTRON drivers. + # Make some random readings + oat_reading = random.uniform(30, 100) + mixed_reading = oat_reading + random.uniform(-5, 5) + damper_reading = random.uniform(0, 100) + + # Create a message for all points. + all_message = [{'OutsideAirTemperature': oat_reading, + 'MixedAirTemperature': mixed_reading, + 'DamperSignal': damper_reading}, + {'OutsideAirTemperature': {'units': 'F', 'tz': 'UTC', + 'type': 'float'}, + 'MixedAirTemperature': {'units': 'F', 'tz': 'UTC', + 'type': 'float'}, + 'DamperSignal': {'units': '%', 'tz': 'UTC', + 'type': 'float'} + }] + + # Create timestamp + now = datetime.utcnow().isoformat() + 'Z' + print("now is ", now) - print('ALL TOPIC IS: {}'.format(DEVICES_ALL_TOPIC)) # Publish messages - agent1.vip.pubsub.publish( - 'pubsub', DEVICES_ALL_TOPIC, headers, all_message).get(timeout=10) - publishedmessages.append(all_message) - gevent.sleep(1.5) + publish(publish_agent, 'analysis/PNNL/BUILDING_1/Device', None, all_message) + gevent.sleep(0.5) + # pytest.set_trace() + # Query the historian + result = query_agent.vip.rpc.call( + 'platform.historian', + 'query', + topic='PNNL/BUILDING1_ANON/Device/MixedAirTemperature', + start=now, + order="LAST_TO_FIRST").get(timeout=10) + print('Query Result', result) + assert (len(result['values']) == 1) + assert (result['values'][0][1] == approx(mixed_reading)) -def onmessage(peer, sender, bus, topic, headers, message): - global allforwardedmessage - print('Message received Topic: {} Header: {} Message: {}' - .format(topic, headers, message)) - allforwardedmessage.append(message) - # print('received: peer=%r, sender=%r, bus=%r, topic=%r, headers=%r, message=%r' % ( - # peer, sender, bus, topic, headers, message)) + +@pytest.mark.historian +@pytest.mark.forwarder +def test_log_topic(publish_agent, query_agent): + """ + Test if log topic message is getting forwarded to historian running on + another instance. Test if topic name substitutions happened. + Publish to topic + 'datalogger/PNNL/BUILDING_1/Device' in volttron_instance1 and + query for topic + 'datalogger/PNNL/BUILDING1_ANON/Device/MixedAirTemperature' in + volttron_instance + Expected result: + Record should get entered into database with current time at time of + insertion and should ignore timestamp in header. Topic name + substitution should have happened + @param publish_agent: Fake agent used to publish messages to bus in + volttron_instance1. Calling this fixture makes sure all the dependant + fixtures are called to setup and start volttron_instance1 and forwareder + agent and returns the instance of fake agent to publish + @param query_agent: Fake agent used to query sqlhistorian in + volttron_instance. Calling this fixture makes sure all the dependant + fixtures are called to setup and start volttron_instance and sqlhistorian + agent and returns the instance of a fake agent to query the historian + """ + print("\n** test_log_topic **") + # Publish fake data. The format mimics the format used by VOLTTRON drivers. + # Make some random readings + oat_reading = random.uniform(30, 100) + mixed_reading = oat_reading + random.uniform(-5, 5) + + # Create a message for all points. + message = {'MixedAirTemperature': {'Readings': mixed_reading, + 'Units': 'F', + 'tz': 'UTC', + 'type': 'float'}} + # pytest.set_trace() + # Create timestamp + current_time = utils.format_timestamp(datetime.utcnow()) + print("current_time is ", current_time) + future_time = '2017-12-02T00:00:00' + headers = { + headers_mod.DATE: future_time, + headers_mod.TIMESTAMP: future_time + } + print("time in header is ", future_time) + + # Publish messages + publish(publish_agent, "datalogger/PNNL/BUILDING_1/Device", headers, message) + gevent.sleep(1) + + # Query the historian + result = query_agent.vip.rpc.call( + 'platform.historian', + 'query', + start=current_time, + topic="datalogger/PNNL/BUILDING1_ANON/Device/MixedAirTemperature", + order="LAST_TO_FIRST").get(timeout=10) + print('Query Result', result) + assert (len(result['values']) == 1) + assert (result['values'][0][1] == approx(mixed_reading)) + + +@pytest.mark.historian +@pytest.mark.forwarder +def test_log_topic_no_header(publish_agent, query_agent): + """ + Test if log topic message is getting forwarded to historian running on + another instance. Test if topic name substitutions happened. + Publish to topic + 'datalogger/PNNL/BUILDING_1/Device' in volttron_instance1 and + query for topic + 'datalogger/PNNL/BUILDING1_ANON/Device/MixedAirTemperature' in + volttron_instance + @param publish_agent: Fake agent used to publish messages to bus in + volttron_instance1. Calling this fixture makes sure all the dependant + fixtures are called to setup and start volttron_instance1 and forwareder + agent and returns the instance of fake agent to publish + @param query_agent: Fake agent used to query sqlhistorian in + volttron_instance. Calling this fixture makes sure all the dependant + fixtures are called to setup and start volttron_instance and sqlhistorian + agent and returns the instance of a fake agent to query the historian + """ + print("\n** test_log_topic **") + # Publish fake data. The format mimics the format used by VOLTTRON drivers. + # Make some random readings + oat_reading = random.uniform(30, 100) + mixed_reading = oat_reading + random.uniform(-5, 5) + current_time = datetime.utcnow().isoformat() + # Create a message for all points. + message = {'MixedAirTemperature': {'Readings': mixed_reading, + 'Units': 'F', + 'tz': 'UTC', + 'type': 'float'}} + gevent.sleep(1) # sleep so that there is no side effect from earlier test + # Publish messages + publish(publish_agent, "datalogger/PNNL/BUILDING_1/Device", None, message) + gevent.sleep(0.5) + + # Query the historian + result = query_agent.vip.rpc.call( + 'platform.historian', + 'query', + topic="datalogger/PNNL/BUILDING1_ANON/Device/MixedAirTemperature", + start=current_time, + order="LAST_TO_FIRST").get(timeout=10) + print('Query Result', result) + assert (len(result['values']) == 1) + assert (result['values'][0][1] == approx(mixed_reading)) @pytest.mark.historian -def test_reconnect_forwarder(get_volttron_instances): - from_instance, to_instance = get_volttron_instances(2, True) - to_instance.allow_all_connections() - - publisher = from_instance.build_agent() - receiver = to_instance.build_agent() - - forwarder_config = deepcopy(BASE_FORWARD_CONFIG) - #forwardtoaddr = build_vip_address(to_instance, receiver) - #print("FORWARD ADDR: {}".format(forwardtoaddr)) - forwarder_config['destination-vip'] = to_instance.vip_address - forwarder_config['destination-serverkey'] = to_instance.keystore.public - - fuuid = from_instance.install_agent( - agent_dir=get_services_core("ForwardHistorian"),start=True, - config_file=forwarder_config) - assert from_instance.is_agent_running(fuuid) +@pytest.mark.forwarder +def test_old_config(volttron_instances, forwarder): + """ + Test adding 'agentid' and 'identity' to config. identity should be + supported with "deprecated warning" and "agentid" should get ignored with a + warning message + """ + print("\n** test_old_config **") + + global forwarder_config + + forwarder_config['agentid'] = "test_forwarder_agent_id" + forwarder_config['identity'] = "second forwarder" + + # 1: Install historian agent + # Install and start sqlhistorian agent in instance2 + forwarder_uuid = volttron_instance1.install_agent( + agent_dir=get_services_core("ForwardHistorian"), config_file=forwarder_config, start=True) + + print("forwarder agent id: ", forwarder_uuid) + + +# @pytest.mark.historian +# @pytest.mark.forwarder +# def test_actuator_topic(publish_agent, query_agent): +# print("\n** test_actuator_topic **") +# global volttron_instance1, volttron_instance2 +# +# # Create platform driver config and 4 fake devices each with 6 points +# process = Popen(['python', 'config_builder.py', '--count=1', +# '--publish-only-depth-all', +# 'fake', 'fake_unit_testing.csv', 'null'], +# env=volttron_instance1.env, +# cwd='scripts/scalability-testing', +# stdout=subprocess.PIPE, stderr=subprocess.PIPE) +# result = process.wait() +# print(result) +# assert result == 0 +# +# # Start the platform driver agent which would intern start the fake driver +# # using the configs created above +# platform_uuid = volttron_instance1.install_agent( +# agent_dir="services/core/PlatformDriverAgent", +# config_file="scripts/scalability-testing/configs/config", +# start=True) +# print("agent id: ", platform_uuid) +# gevent.sleep(2) # wait for the agent to start and start the devices +# +# # Start the actuator agent through which publish agent should communicate +# # to fake device. Start the platform driver agent which would intern start +# # the fake driver using the configs created above +# actuator_uuid = volttron_instance1.install_agent( +# agent_dir="services/core/ActuatorAgent", +# config_file="services/core/ActuatorAgent/tests/actuator.config", +# start=True) +# print("agent id: ", actuator_uuid) +# +# listener_uuid = volttron_instance2.install_agent( +# agent_dir="examples/ListenerAgent", +# config_file="examples/ListenerAgent/config", +# start=True) +# print("agent id: ", listener_uuid) +# +# try: +# # Make query agent running in instance two subscribe to +# # actuator_schedule_result topic +# # query_agent.callback = types.MethodType(callback, query_agent) +# query_agent.callback = MagicMock(name="callback") +# # subscribe to schedule response topic +# query_agent.vip.pubsub.subscribe( +# peer='pubsub', +# prefix=topics.ACTUATOR_SCHEDULE_RESULT, +# callback=query_agent.callback).get() +# +# # Now publish in volttron_instance1 +# +# start = str(datetime.now()) +# end = str(datetime.now() + timedelta(seconds=2)) +# header = { +# 'type': 'NEW_SCHEDULE', +# 'requesterID': 'test-agent', # The name of the requesting agent. +# 'taskID': 'task_schedule_response', +# 'priority': 'LOW' # ('HIGH, 'LOW', 'LOW_PREEMPT'). +# } +# msg = [ +# ['fakedriver0', start, end] +# ] +# # reset mock to ignore any previous callback +# publish(publish_agent, topics.ACTUATOR_SCHEDULE_REQUEST, header, msg) +# gevent.sleep(1) # wait for topic to be forwarded and callback to happen +# +# # assert query_agent.callback.call_count == 1 +# print ('call args ', query_agent.callback.call_args_list) +# # assert query_agent.callback.call_args[0][1] == 'platform.actuator' +# assert query_agent.callback.call_args[0][3] == \ +# topics.ACTUATOR_SCHEDULE_RESULT +# result_header = query_agent.callback.call_args[0][4] +# result_message = query_agent.callback.call_args[0][5] +# assert result_header['type'] == 'NEW_SCHEDULE' +# assert result_header['taskID'] == 'task_schedule_response' +# assert result_header['requesterID'] in ['test-agent', 'pubsub.compat'] +# assert result_message['result'] == 'SUCCESS' +# finally: +# volttron_instance1.stop_agent(platform_uuid) +# volttron_instance1.remove_agent(platform_uuid) +# volttron_instance1.stop_agent(actuator_uuid) +# volttron_instance1.remove_agent(actuator_uuid) +# volttron_instance2.stop_agent(listener_uuid) +# volttron_instance2.remove_agent(listener_uuid) + + +@pytest.mark.historian +@pytest.mark.forwarder +def test_nan_value(publish_agent, query_agent): + """ + Test if devices topic message is getting forwarded to historian running on + another instance. Test if topic name substitutions happened. + Publish to 'devices/PNNL/BUILDING_1/Device/all' in volttron_instance1 and query + for topic 'devices/PNNL/BUILDING1_ANON/Device/all' in volttron_instance2 + + @param publish_agent: Fake agent used to publish messages to bus in + volttron_instance1. Calling this fixture makes sure all the dependant + fixtures are called to setup and start volttron_instance1 and forwareder + agent and returns the instance of fake agent to publish + @param query_agent: Fake agent used to query sqlhistorian in + volttron_instance2. Calling this fixture makes sure all the dependant + fixtures are called to setup and start volttron_instance2 and sqlhistorian + agent and returns the instance of a fake agent to query the historian + """ + print("\n** test_devices_topic **") + float_meta = {'units': 'F', 'tz': 'UTC', 'type': 'float'} + # Create a message for all points. + all_message = [{'nan_value': float("NaN")}, + {'nan_value': float_meta}] + + # Publish messages twice + time1 = utils.format_timestamp(datetime.utcnow()) + headers = { + headers_mod.DATE: time1, + headers_mod.TIMESTAMP: time1 + } + publish(publish_agent, 'devices/PNNL/BUILDING_1/Device/all', headers, all_message) + gevent.sleep(1) + + # Verify topic name replacement by querying the replaced topic name + # PNNL/BUILDING_1 should be replaced with PNNL/BUILDING1_ANON + result = query_agent.vip.rpc.call( + 'platform.historian', + 'query', + topic='PNNL/BUILDING1_ANON/Device/nan_value', + start=time1, + count=20, + order="LAST_TO_FIRST").get(timeout=10) + + assert (len(result['values']) == 1) + (time1_date, time1_time) = time1.split("T") + assert (result['values'][0][0] == time1_date + 'T' + time1_time + '+00:00') + assert (math.isnan(result['values'][0][1])) + assert set(result['metadata'].items()) == set(float_meta.items()) + + +@pytest.mark.historian +@pytest.mark.forwarder +def test_reconnect_forwarder(volttron_instances): + allforwardedmessage = [] + publishedmessages = [] + + def onmessage(peer, sender, bus, topic, headers, message): + print('Message received Topic: {} Header: {} Message: {}'.format(topic, headers, message)) + allforwardedmessage.append(message) + + def do_publish(agent1): + # Publish fake data. The format mimics the format used by VOLTTRON drivers. + # Make some random readings + oat_reading = random.uniform(30, 100) + mixed_reading = oat_reading + random.uniform(-5, 5) + damper_reading = random.uniform(0, 100) + + float_meta = {'units': 'F', 'tz': 'UTC', 'type': 'float'} + percent_meta = {'units': '%', 'tz': 'UTC', 'type': 'float'} + + # Create a message for all points. + all_message = [{'OutsideAirTemperature': oat_reading, + 'MixedAirTemperature': mixed_reading, + 'DamperSignal': damper_reading}, + {'OutsideAirTemperature': float_meta, + 'MixedAirTemperature': float_meta, + 'DamperSignal': percent_meta + }] + + # Create timestamp + now = utils.format_timestamp(datetime.utcnow()) + + # now = '2015-12-02T00:00:00' + headers = { + headers_mod.DATE: now, + headers_mod.TIMESTAMP: now + } + print("Published time in header: " + now) + + print('ALL TOPIC IS: {}'.format(DEVICES_ALL_TOPIC)) + # Publish messages + agent1.vip.pubsub.publish('pubsub', DEVICES_ALL_TOPIC, headers, all_message).get(timeout=10) + publishedmessages.append(all_message) + gevent.sleep(1.5) + + global volttron_instance1, volttron_instance2 + volttron_instance2.allow_all_connections() + + publisher = volttron_instance1.build_agent() + receiver = volttron_instance2.build_agent() + + config = deepcopy(forwarder_config) + config['destination-vip'] = volttron_instance2.vip_address + config['destination-serverkey'] = volttron_instance2.keystore.public + + fuuid = volttron_instance1.install_agent( + agent_dir=get_services_core("ForwardHistorian"), start=True, config_file=config) + assert volttron_instance1.is_agent_running(fuuid) print('Before Subscribing') receiver.vip.pubsub.subscribe('pubsub', '', callback=onmessage) publisher.vip.pubsub.publish('pubsub', 'stuff', message='Fuzzy') @@ -116,5 +685,57 @@ def test_reconnect_forwarder(get_volttron_instances): for i in range(num_messages): do_publish(publisher) - for a,p in zip(allforwardedmessage, publishedmessages): + for a, p in zip(allforwardedmessage, publishedmessages): assert a[0] == approx(p[0]) + + +@pytest.mark.historian +@pytest.mark.forwarder +def test_default_config(volttron_instances, query_agent): + """ + Test the default configuration file included with the agent + """ + global volttron_instance1 + + publish_agent = volttron_instance1.build_agent(identity="test_agent") + gevent.sleep(1) + + config_path = os.path.join(get_services_core("ForwardHistorian"), "config") + with open(config_path, "r") as config_file: + config_json = json.load(config_file) + assert isinstance(config_json, dict) + volttron_instance1.install_agent( + agent_dir=get_services_core("ForwardHistorian"), + config_file=config_json, + start=True, + vip_identity="health_test") + assert publish_agent.vip.rpc.call("health_test", "health.get_status").get(timeout=10).get('status') == STATUS_GOOD + + # do basic sanity check + + oat_reading = random.uniform(30, 100) + float_meta = {'units': 'F', 'tz': 'UTC', 'type': 'float'} + + all_message = [{'OutsideAirTemperature': oat_reading}, + {'OutsideAirTemperature': float_meta}] + + time1 = utils.format_timestamp(datetime.utcnow()) + headers = { + headers_mod.DATE: time1 + } + publish(publish_agent, 'devices/PNNL/BUILDING_1/Device/all', headers, all_message) + gevent.sleep(1) + + result = query_agent.vip.rpc.call( + 'platform.historian', + 'query', + topic='PNNL/BUILDING1_ANON/Device/OutsideAirTemperature', + start=time1, + count=20, + order="LAST_TO_FIRST").get(timeout=10) + + assert (len(result['values']) == 1) + (time1_date, time1_time) = time1.split("T") + assert (result['values'][0][0] == time1_date + 'T' + time1_time + '+00:00') + assert (result['values'][0][1] == approx(oat_reading)) + assert set(result['metadata'].items()) == set(float_meta.items()) diff --git a/services/core/ForwardHistorian/tests/test_forwardhistorian.py b/services/core/ForwardHistorian/tests/test_forwardhistorian.py deleted file mode 100644 index 130fdfdb87..0000000000 --- a/services/core/ForwardHistorian/tests/test_forwardhistorian.py +++ /dev/null @@ -1,628 +0,0 @@ -# -*- coding: utf-8 -*- {{{ -# vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et: -# -# Copyright 2019, Battelle Memorial Institute. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# This material was prepared as an account of work sponsored by an agency of -# the United States Government. Neither the United States Government nor the -# United States Department of Energy, nor Battelle, nor any of their -# employees, nor any jurisdiction or organization that has cooperated in the -# development of these materials, makes any warranty, express or -# implied, or assumes any legal liability or responsibility for the accuracy, -# completeness, or usefulness or any information, apparatus, product, -# software, or process disclosed, or represents that its use would not infringe -# privately owned rights. Reference herein to any specific commercial product, -# process, or service by trade name, trademark, manufacturer, or otherwise -# does not necessarily constitute or imply its endorsement, recommendation, or -# favoring by the United States Government or any agency thereof, or -# Battelle Memorial Institute. The views and opinions of authors expressed -# herein do not necessarily state or reflect those of the -# United States Government or any agency thereof. -# -# PACIFIC NORTHWEST NATIONAL LABORATORY operated by -# BATTELLE for the UNITED STATES DEPARTMENT OF ENERGY -# under Contract DE-AC05-76RL01830 -# }}} -import random -import tempfile -import os -from datetime import datetime, timedelta - -import gevent -import pytest -from pytest import approx - -from volttron.platform import get_services_core -from volttron.platform.agent import utils -from volttron.platform.messaging import headers as headers_mod -from volttron.platform.vip.agent import Agent -from volttron.platform.keystore import KnownHostsStore - -# import types - -forwarder_uuid = None -forwarder_config = { - "destination-vip": "", - "custom_topic_list": [], - "topic_replace_list": [ - {"from": "PNNL/BUILDING_1", "to": "PNNL/BUILDING1_ANON"} - ] -} -sqlite_config = { - "connection": { - "type": "sqlite", - "params": { - "database": 'test.sqlite' - } - } -} - -volttron_instance1 = None -volttron_instance2 = None - -@pytest.fixture(scope="module") -def volttron_instances(request, get_volttron_instances): - global volttron_instance1, volttron_instance2 - - # if volttron_instance1 is None: - volttron_instance1, volttron_instance2 = get_volttron_instances(2) - - -# Fixture for setup and teardown of publish agent -@pytest.fixture(scope="module") -def publish_agent(request, volttron_instances, forwarder): - global volttron_instance1, volttron_instance2 - - # 1: Start a fake agent to publish to message bus - agent = volttron_instance1.build_agent(identity='test-agent') - - # 2: add a tear down method to stop sqlhistorian agent and the fake - # agent that published to message bus - def stop_agent(): - print("In teardown method of publish_agent") - if isinstance(agent, Agent): - agent.core.stop() - - request.addfinalizer(stop_agent) - return agent - - -@pytest.fixture(scope="module") -def query_agent(request, volttron_instances, sqlhistorian): - - # 1: Start a fake agent to query the sqlhistorian in volttron_instance2 - agent = volttron_instance2.build_agent() - - # 2: add a tear down method to stop sqlhistorian agent and the fake - # agent that published to message bus - def stop_agent(): - print("In teardown method of module") - agent.core.stop() - - request.addfinalizer(stop_agent) - return agent - - -@pytest.fixture(scope="module") -def sqlhistorian(request, volttron_instances): - global volttron_instance1, volttron_instance2 - global sqlite_config - - # 1: Install historian agent - # Install and start sqlhistorian agent in instance2 - agent_uuid = volttron_instance2.install_agent( - agent_dir=get_services_core("SQLHistorian"), - config_file=sqlite_config, - start=True, - vip_identity='platform.historian') - print("sqlite historian agent id: ", agent_uuid) - - -@pytest.fixture(scope="module") -def forwarder(request, volttron_instances): - #print "Fixture forwarder" - global volttron_instance1, volttron_instance2 - - global forwarder_uuid, forwarder_config - # 1. Update destination address in forwarder configuration - - volttron_instance1.allow_all_connections() - volttron_instance2.allow_all_connections() - - # setup destination address to include keys - known_hosts_file = os.path.join(volttron_instance1.volttron_home, 'known_hosts') - known_hosts = KnownHostsStore(known_hosts_file) - known_hosts.add(volttron_instance2.vip_address, volttron_instance2.serverkey) - - forwarder_config["destination-vip"] = volttron_instance2.vip_address - forwarder_config["destination-serverkey"] = volttron_instance2.serverkey - - # 1: Install historian agent - # Install and start sqlhistorian agent in instance2 - forwarder_uuid = volttron_instance1.install_agent( - agent_dir=get_services_core("ForwardHistorian"), - config_file=forwarder_config, - start=True) - print("forwarder agent id: ", forwarder_uuid) - - -def publish(publish_agent, topic, header, message): - if isinstance(publish_agent, Agent): - publish_agent.vip.pubsub.publish('pubsub', - topic, - headers=header, - message=message).get(timeout=10) - else: - publish_agent.publish_json(topic, header, message) - -@pytest.mark.historian -@pytest.mark.forwarder -def test_devices_topic(publish_agent, query_agent): - """ - Test if devices topic message is getting forwarded to historian running on - another instance. Test if topic name substitutions happened. - Publish to 'devices/PNNL/BUILDING_1/Device/all' in volttron_instance1 and query - for topic 'devices/PNNL/BUILDING1_ANON/Device/all' in volttron_instance - - @param publish_agent: Fake agent used to publish messages to bus in - volttron_instance1. Calling this fixture makes sure all the dependant - fixtures are called to setup and start volttron_instance1 and forwareder - agent and returns the instance of fake agent to publish - @param query_agent: Fake agent used to query sqlhistorian in - volttron_instance. Calling this fixture makes sure all the dependant - fixtures are called to setup and start volttron_instance and sqlhistorian - agent and returns the instance of a fake agent to query the historian - """ - print("\n** test_devices_topic **") - oat_reading = random.uniform(30, 100) - float_meta = {'units': 'F', 'tz': 'UTC', 'type': 'float'} - # Create a message for all points. - all_message = [{'OutsideAirTemperature': oat_reading}, - {'OutsideAirTemperature': float_meta}] - - # Publish messages twice - time1 = utils.format_timestamp(datetime.utcnow()) - headers = { - headers_mod.DATE: time1 - } - publish(publish_agent, 'devices/PNNL/BUILDING_1/Device/all', headers, all_message) - gevent.sleep(1) - - # Verify topic name replacement by querying the replaced topic name - # PNNL/BUILDING_1 should be replaced with PNNL/BUILDING1_ANON - result = query_agent.vip.rpc.call( - 'platform.historian', - 'query', - topic='PNNL/BUILDING1_ANON/Device/OutsideAirTemperature', - start=time1, - count=20, - order="LAST_TO_FIRST").get(timeout=10) - - assert (len(result['values']) == 1) - (time1_date, time1_time) = time1.split("T") - assert (result['values'][0][0] == time1_date + 'T' + time1_time + '+00:00') - assert (result['values'][0][1] == approx(oat_reading)) - assert set(result['metadata'].items()) == set(float_meta.items()) - - -@pytest.mark.historian -@pytest.mark.forwarder -def test_analysis_topic(publish_agent, query_agent): - """ - Test if devices topic message is getting forwarded to historian running on - another instance. Test if topic name substitutions happened. - Publish to topic - 'analysis/PNNL/BUILDING_1/Device/MixedAirTemperature' in volttron_instance1 and - query for topic - 'PNNL/BUILDING1_ANON/Device/MixedAirTemperature' in volttron_instance - - @param publish_agent: Fake agent used to publish messages to bus in - volttron_instance1. Calling this fixture makes sure all the dependant - fixtures are called to setup and start volttron_instance1 and forwareder - agent and returns the instance of fake agent to publish - @param query_agent: Fake agent used to query sqlhistorian in - volttron_instance. Calling this fixture makes sure all the dependant - fixtures are called to setup and start volttron_instance and sqlhistorian - agent and returns the instance of a fake agent to query the historian - """ - print("\n** test_analysis_topic **") - # Publish fake data. The format mimics the format used by VOLTTRON drivers. - # Make some random readings - oat_reading = random.uniform(30, 100) - mixed_reading = oat_reading + random.uniform(-5, 5) - damper_reading = random.uniform(0, 100) - - # Create a message for all points. - all_message = [{'OutsideAirTemperature': oat_reading, - 'MixedAirTemperature': mixed_reading, - 'DamperSignal': damper_reading}, - {'OutsideAirTemperature': {'units': 'F', 'tz': 'UTC', - 'type': 'float'}, - 'MixedAirTemperature': {'units': 'F', 'tz': 'UTC', - 'type': 'float'}, - 'DamperSignal': {'units': '%', 'tz': 'UTC', - 'type': 'float'} - }] - - # Create timestamp - now = utils.format_timestamp(datetime.utcnow()) - print("now is ", now) - headers = { - headers_mod.DATE: now, - headers_mod.TIMESTAMP: now - } - # Publish messages - publish(publish_agent, 'analysis/PNNL/BUILDING_1/Device', - headers, all_message) - gevent.sleep(0.5) - - # pytest.set_trace() - # Query the historian - result = query_agent.vip.rpc.call( - 'platform.historian', - 'query', - topic='PNNL/BUILDING1_ANON/Device/MixedAirTemperature', - start=now, - order="LAST_TO_FIRST").get(timeout=10) - print('Query Result', result) - assert (len(result['values']) == 1) - (now_date, now_time) = now.split("T") - if now_time[-1:] == 'Z': - now_time = now_time[:-1] - assert (result['values'][0][0] == now_date + 'T' + now_time + '+00:00') - assert (result['values'][0][1] == approx(mixed_reading)) - - -@pytest.mark.historian -@pytest.mark.forwarder -def test_analysis_topic_no_header(publish_agent, query_agent): - """ - Test if devices topic message is getting forwarded to historian running on - another instance. Test if topic name substitutions happened. - Publish to topic - 'analysis/PNNL/BUILDING_1/Device/MixedAirTemperature' in volttron_instance1 and - query for topic - 'PNNL/BUILDING1_ANON/Device/MixedAirTemperature' in volttron_instance - - @param publish_agent: Fake agent used to publish messages to bus in - volttron_instance1. Calling this fixture makes sure all the dependant - fixtures are called to setup and start volttron_instance1 and forwareder - agent and returns the instance of fake agent to publish - @param query_agent: Fake agent used to query sqlhistorian in - volttron_instance. Calling this fixture makes sure all the dependant - fixtures are called to setup and start volttron_instance and sqlhistorian - agent and returns the instance of a fake agent to query the historian - """ - print("\n** test_analysis_topic **") - # Publish fake data. The format mimics the format used by VOLTTRON drivers. - # Make some random readings - oat_reading = random.uniform(30, 100) - mixed_reading = oat_reading + random.uniform(-5, 5) - damper_reading = random.uniform(0, 100) - - # Create a message for all points. - all_message = [{'OutsideAirTemperature': oat_reading, - 'MixedAirTemperature': mixed_reading, - 'DamperSignal': damper_reading}, - {'OutsideAirTemperature': {'units': 'F', 'tz': 'UTC', - 'type': 'float'}, - 'MixedAirTemperature': {'units': 'F', 'tz': 'UTC', - 'type': 'float'}, - 'DamperSignal': {'units': '%', 'tz': 'UTC', - 'type': 'float'} - }] - - # Create timestamp - now = datetime.utcnow().isoformat() + 'Z' - print("now is ", now) - - # Publish messages - publish(publish_agent, 'analysis/PNNL/BUILDING_1/Device', - None, all_message) - gevent.sleep(0.5) - - # pytest.set_trace() - # Query the historian - result = query_agent.vip.rpc.call( - 'platform.historian', - 'query', - topic='PNNL/BUILDING1_ANON/Device/MixedAirTemperature', - start=now, - order="LAST_TO_FIRST").get(timeout=10) - print('Query Result', result) - assert (len(result['values']) == 1) - assert (result['values'][0][1] == approx(mixed_reading)) - - -@pytest.mark.historian -@pytest.mark.forwarder -def test_log_topic(publish_agent, query_agent): - """ - Test if log topic message is getting forwarded to historian running on - another instance. Test if topic name substitutions happened. - Publish to topic - 'datalogger/PNNL/BUILDING_1/Device' in volttron_instance1 and - query for topic - 'datalogger/PNNL/BUILDING1_ANON/Device/MixedAirTemperature' in - volttron_instance - Expected result: - Record should get entered into database with current time at time of - insertion and should ignore timestamp in header. Topic name - substitution should have happened - - - @param publish_agent: Fake agent used to publish messages to bus in - volttron_instance1. Calling this fixture makes sure all the dependant - fixtures are called to setup and start volttron_instance1 and forwareder - agent and returns the instance of fake agent to publish - @param query_agent: Fake agent used to query sqlhistorian in - volttron_instance. Calling this fixture makes sure all the dependant - fixtures are called to setup and start volttron_instance and sqlhistorian - agent and returns the instance of a fake agent to query the historian - """ - - print("\n** test_log_topic **") - # Publish fake data. The format mimics the format used by VOLTTRON drivers. - # Make some random readings - oat_reading = random.uniform(30, 100) - mixed_reading = oat_reading + random.uniform(-5, 5) - - # Create a message for all points. - message = {'MixedAirTemperature': {'Readings': mixed_reading, - 'Units': 'F', - 'tz': 'UTC', - 'type': 'float'}} - # pytest.set_trace() - # Create timestamp - current_time = utils.format_timestamp(datetime.utcnow()) - print("current_time is ", current_time) - future_time = '2017-12-02T00:00:00' - headers = { - headers_mod.DATE: future_time, - headers_mod.TIMESTAMP: future_time - } - print("time in header is ", future_time) - - # Publish messages - publish(publish_agent, "datalogger/PNNL/BUILDING_1/Device", headers, message) - gevent.sleep(1) - - # Query the historian - result = query_agent.vip.rpc.call( - 'platform.historian', - 'query', - start=current_time, - topic="datalogger/PNNL/BUILDING1_ANON/Device/MixedAirTemperature", - order="LAST_TO_FIRST").get(timeout=10) - print('Query Result', result) - assert (len(result['values']) == 1) - assert (result['values'][0][1] == approx(mixed_reading)) - - -@pytest.mark.historian -@pytest.mark.forwarder -def test_log_topic_no_header(publish_agent, query_agent): - """ - Test if log topic message is getting forwarded to historian running on - another instance. Test if topic name substitutions happened. - Publish to topic - 'datalogger/PNNL/BUILDING_1/Device' in volttron_instance1 and - query for topic - 'datalogger/PNNL/BUILDING1_ANON/Device/MixedAirTemperature' in - volttron_instance - - @param publish_agent: Fake agent used to publish messages to bus in - volttron_instance1. Calling this fixture makes sure all the dependant - fixtures are called to setup and start volttron_instance1 and forwareder - agent and returns the instance of fake agent to publish - @param query_agent: Fake agent used to query sqlhistorian in - volttron_instance. Calling this fixture makes sure all the dependant - fixtures are called to setup and start volttron_instance and sqlhistorian - agent and returns the instance of a fake agent to query the historian - """ - - print("\n** test_log_topic **") - # Publish fake data. The format mimics the format used by VOLTTRON drivers. - # Make some random readings - oat_reading = random.uniform(30, 100) - mixed_reading = oat_reading + random.uniform(-5, 5) - current_time = datetime.utcnow().isoformat() - # Create a message for all points. - message = {'MixedAirTemperature': {'Readings': mixed_reading, - 'Units': 'F', - 'tz': 'UTC', - 'type': 'float'}} - gevent.sleep(1) # sleep so that there is no side effect from earlier test - # Publish messages - publish(publish_agent, "datalogger/PNNL/BUILDING_1/Device", None, message) - gevent.sleep(0.5) - - # Query the historian - result = query_agent.vip.rpc.call( - 'platform.historian', - 'query', - topic="datalogger/PNNL/BUILDING1_ANON/Device/MixedAirTemperature", - start=current_time, - order="LAST_TO_FIRST").get(timeout=10) - print('Query Result', result) - assert (len(result['values']) == 1) - assert (result['values'][0][1] == approx(mixed_reading)) - - -@pytest.mark.historian -@pytest.mark.forwarder -def test_old_config(volttron_instances, forwarder): - """ - Test adding 'agentid' and 'identity' to config. identity should be - supported with "deprecated warning" and "agentid" should get ignored with a - warning message - """ - - print("\n** test_old_config **") - - global forwarder_config - - forwarder_config['agentid'] = "test_forwarder_agent_id" - forwarder_config['identity'] = "second forwarder" - - # 1: Install historian agent - # Install and start sqlhistorian agent in instance2 - forwarder_uuid = volttron_instance1.install_agent( - agent_dir=get_services_core("ForwardHistorian"), - config_file=forwarder_config, start=True) - - print("forwarder agent id: ", forwarder_uuid) - - -# @pytest.mark.historian -# @pytest.mark.forwarder -# def test_actuator_topic(publish_agent, query_agent): -# print("\n** test_actuator_topic **") -# global volttron_instance1, volttron_instance2 -# -# # Create master driver config and 4 fake devices each with 6 points -# process = Popen(['python', 'config_builder.py', '--count=1', -# '--publish-only-depth-all', -# 'fake', 'fake_unit_testing.csv', 'null'], -# env=volttron_instance1.env, -# cwd='scripts/scalability-testing', -# stdout=subprocess.PIPE, stderr=subprocess.PIPE) -# result = process.wait() -# print(result) -# assert result == 0 -# -# # Start the master driver agent which would intern start the fake driver -# # using the configs created above -# master_uuid = volttron_instance1.install_agent( -# agent_dir="services/core/MasterDriverAgent", -# config_file="scripts/scalability-testing/configs/config", -# start=True) -# print("agent id: ", master_uuid) -# gevent.sleep(2) # wait for the agent to start and start the devices -# -# # Start the actuator agent through which publish agent should communicate -# # to fake device. Start the master driver agent which would intern start -# # the fake driver using the configs created above -# actuator_uuid = volttron_instance1.install_agent( -# agent_dir="services/core/ActuatorAgent", -# config_file="services/core/ActuatorAgent/tests/actuator.config", -# start=True) -# print("agent id: ", actuator_uuid) -# -# listener_uuid = volttron_instance2.install_agent( -# agent_dir="examples/ListenerAgent", -# config_file="examples/ListenerAgent/config", -# start=True) -# print("agent id: ", listener_uuid) -# -# try: -# # Make query agent running in instance two subscribe to -# # actuator_schedule_result topic -# # query_agent.callback = types.MethodType(callback, query_agent) -# query_agent.callback = MagicMock(name="callback") -# # subscribe to schedule response topic -# query_agent.vip.pubsub.subscribe( -# peer='pubsub', -# prefix=topics.ACTUATOR_SCHEDULE_RESULT, -# callback=query_agent.callback).get() -# -# # Now publish in volttron_instance1 -# -# start = str(datetime.now()) -# end = str(datetime.now() + timedelta(seconds=2)) -# header = { -# 'type': 'NEW_SCHEDULE', -# 'requesterID': 'test-agent', # The name of the requesting agent. -# 'taskID': 'task_schedule_response', -# 'priority': 'LOW' # ('HIGH, 'LOW', 'LOW_PREEMPT'). -# } -# msg = [ -# ['fakedriver0', start, end] -# ] -# # reset mock to ignore any previous callback -# publish(publish_agent, topics.ACTUATOR_SCHEDULE_REQUEST, header, msg) -# gevent.sleep(1) # wait for topic to be forwarded and callback to happen -# -# # assert query_agent.callback.call_count == 1 -# print ('call args ', query_agent.callback.call_args_list) -# # assert query_agent.callback.call_args[0][1] == 'platform.actuator' -# assert query_agent.callback.call_args[0][3] == \ -# topics.ACTUATOR_SCHEDULE_RESULT -# result_header = query_agent.callback.call_args[0][4] -# result_message = query_agent.callback.call_args[0][5] -# assert result_header['type'] == 'NEW_SCHEDULE' -# assert result_header['taskID'] == 'task_schedule_response' -# assert result_header['requesterID'] in ['test-agent', 'pubsub.compat'] -# assert result_message['result'] == 'SUCCESS' -# finally: -# volttron_instance1.stop_agent(master_uuid) -# volttron_instance1.remove_agent(master_uuid) -# volttron_instance1.stop_agent(actuator_uuid) -# volttron_instance1.remove_agent(actuator_uuid) -# volttron_instance2.stop_agent(listener_uuid) -# volttron_instance2.remove_agent(listener_uuid) - - -@pytest.mark.historian -@pytest.mark.forwarder -def test_nan_value(publish_agent, query_agent): - """ - Test if devices topic message is getting forwarded to historian running on - another instance. Test if topic name substitutions happened. - Publish to 'devices/PNNL/BUILDING_1/Device/all' in volttron_instance1 and query - for topic 'devices/PNNL/BUILDING1_ANON/Device/all' in volttron_instance2 - - @param publish_agent: Fake agent used to publish messages to bus in - volttron_instance1. Calling this fixture makes sure all the dependant - fixtures are called to setup and start volttron_instance1 and forwareder - agent and returns the instance of fake agent to publish - @param query_agent: Fake agent used to query sqlhistorian in - volttron_instance2. Calling this fixture makes sure all the dependant - fixtures are called to setup and start volttron_instance2 and sqlhistorian - agent and returns the instance of a fake agent to query the historian - """ - import math - print("\n** test_devices_topic **") - oat_reading = random.uniform(30, 100) - float_meta = {'units': 'F', 'tz': 'UTC', 'type': 'float'} - # Create a message for all points. - all_message = [{'nan_value': float("NaN")}, - {'nan_value': float_meta}] - - # Publish messages twice - time1 = utils.format_timestamp(datetime.utcnow()) - headers = { - headers_mod.DATE: time1, - headers_mod.TIMESTAMP: time1 - } - publish(publish_agent, 'devices/PNNL/BUILDING_1/Device/all', headers, all_message) - gevent.sleep(1) - - # Verify topic name replacement by querying the replaced topic name - # PNNL/BUILDING_1 should be replaced with PNNL/BUILDING1_ANON - result = query_agent.vip.rpc.call( - 'platform.historian', - 'query', - topic='PNNL/BUILDING1_ANON/Device/nan_value', - start=time1, - count=20, - order="LAST_TO_FIRST").get(timeout=10) - - assert (len(result['values']) == 1) - (time1_date, time1_time) = time1.split("T") - assert (result['values'][0][0] == time1_date + 'T' + time1_time + '+00:00') - assert (math.isnan(result['values'][0][1])) - assert set(result['metadata'].items()) == set(float_meta.items()) - diff --git a/services/core/ForwardHistorian/tests/test_multi_messagebus_forwarder.py b/services/core/ForwardHistorian/tests/test_multi_messagebus_forwarder.py index bc5ca5be46..1f8eb55d96 100644 --- a/services/core/ForwardHistorian/tests/test_multi_messagebus_forwarder.py +++ b/services/core/ForwardHistorian/tests/test_multi_messagebus_forwarder.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- {{{ # vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et: # -# Copyright 2019, Battelle Memorial Institute. +# Copyright 2020, Battelle Memorial Institute. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -104,8 +104,8 @@ def test_multi_messagebus_forwarder(multi_messagebus_forwarder): subscriber_agent.callback = MagicMock(name="callback") subscriber_agent.callback.reset_mock() subscriber_agent.vip.pubsub.subscribe(peer='pubsub', - prefix='devices', - callback=subscriber_agent.callback).get() + prefix='devices', + callback=subscriber_agent.callback).get() subscriber_agent.analysis_callback = MagicMock(name="analysis_callback") subscriber_agent.analysis_callback.reset_mock() @@ -165,7 +165,7 @@ def test_multi_messagebus_custom_topic_forwarder(multi_messagebus_forwarder): value = 78.5 + i publish(publish_agent, topic, headers, value) gevent.sleep(0.1) - gevent.sleep(1) + gevent.sleep(5) assert subscriber_agent.callback.call_count == 5 @@ -176,6 +176,7 @@ def test_multi_messagebus_forwarder_reconnection(multi_messagebus_forwarder): :return: """ from_instance, to_instance = multi_messagebus_forwarder + orig_to_instance_skip_cleanup = to_instance.skip_cleanup to_instance.skip_cleanup = True # Restart target platform @@ -184,6 +185,8 @@ def test_multi_messagebus_forwarder_reconnection(multi_messagebus_forwarder): to_instance.restart_platform() gevent.sleep(5) + to_instance.skip_cleanup = orig_to_instance_skip_cleanup + publish_agent = from_instance.dynamic_agent subscriber_agent = to_instance.dynamic_agent @@ -209,3 +212,4 @@ def test_multi_messagebus_forwarder_reconnection(multi_messagebus_forwarder): gevent.sleep(3) assert subscriber_agent.callback.call_count == 3 + diff --git a/services/core/IEEE2030_5Agent/IEEE2030_5/agent.py b/services/core/IEEE2030_5Agent/IEEE2030_5/agent.py index 42cfc8774c..332834b2e3 100644 --- a/services/core/IEEE2030_5Agent/IEEE2030_5/agent.py +++ b/services/core/IEEE2030_5Agent/IEEE2030_5/agent.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- {{{ # vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et: # -# Copyright 2019, Battelle Memorial Institute. +# Copyright 2020, Battelle Memorial Institute. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -98,7 +98,7 @@ class IEEE2030_5Agent(Agent): End device configuration is outlined in the agent config file. IEEE 2030.5 data is exposed via get_point(), get_points() and set_point() calls. - A IEEE 2030.5 device driver (IEEE2030_5.py under MasterDriverAgent) can be configured, + A IEEE 2030.5 device driver (IEEE2030_5.py under PlatformDriverAgent) can be configured, which gets and sets data by sending RPCs to this agent. For further information about this subsystem, please see the VOLTTRON @@ -108,7 +108,7 @@ class IEEE2030_5Agent(Agent): This agent can be installed as follows: export IEEE2030_5_ROOT=$VOLTTRON_ROOT/services/core/IEEE2030_5Agent cd $VOLTTRON_ROOT - python scripts/install-agent.py -s $IEEE2030_5_ROOT -i IEEE2030_5agent -c $IEEE2030_5_ROOT/IEEE2030_5.config + python scripts/install-agent.py -s $IEEE2030_5_ROOT -i IEEE2030_5agent -c $IEEE2030_5_ROOT/config -t IEEE2030_5agent -f """ diff --git a/services/core/IEEE2030_5Agent/README.md b/services/core/IEEE2030_5Agent/README.md new file mode 100644 index 0000000000..62792c4b82 --- /dev/null +++ b/services/core/IEEE2030_5Agent/README.md @@ -0,0 +1,37 @@ +# IEEE2030_5 Agent +Agent that handles IEEE 2030.5 communication. +IEEE2030_5Agent uses the VOLTTRON web service to communicate with IEEE 2030.5 end devices. +End device configuration is outlined in the agent config file. + +IEEE 2030.5 data is exposed via get_point(), get_points() and set_point() calls. +A IEEE 2030.5 device driver (IEEE2030_5.py under PlatformDriverAgent) can be configured, +which gets and sets data by sending RPCs to this agent. + +For further information about this subsystem, please see the VOLTTRON +IEEE 2030.5 DER Support specification, which is located in VOLTTRON readthedocs +under specifications/IEEE2030_5_agent.html. + +## Configuration + +``` {.python} +{ + "devices": [ + { + "sfdi": "097935300833", + "lfdi": "247bd68e3378fe57ba604e3c8bdf9e3f78a3d743", + "load_shed_device_category": "0200", + "pin_code": "130178" + }, + { + "sfdi": "111576577659", + "lfdi": "2990c58a59935a7d5838c952b1a453c967341a07", + "load_shed_device_category": "0200", + "pin_code": "130178" + } + ], + "IEEE2030_5_server_sfdi": "413707194130", + "IEEE2030_5_server_lfdi": "29834592834729384728374562039847629", + "load_shed_device_category": "0020", + "timezone": "America/Los_Angeles" +} +``` diff --git a/services/core/IEEE2030_5Agent/ieee2030_5.config b/services/core/IEEE2030_5Agent/config similarity index 100% rename from services/core/IEEE2030_5Agent/ieee2030_5.config rename to services/core/IEEE2030_5Agent/config diff --git a/services/core/IEEE2030_5Agent/setup.py b/services/core/IEEE2030_5Agent/setup.py index eaf3948ea7..cc64769bff 100644 --- a/services/core/IEEE2030_5Agent/setup.py +++ b/services/core/IEEE2030_5Agent/setup.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- {{{ # vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et: # -# Copyright 2019, Battelle Memorial Institute. +# Copyright 2020, Battelle Memorial Institute. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/services/core/IEEE2030_5Agent/tests/IEEE2030_5DriverTestAgent/test_agent/agent.py b/services/core/IEEE2030_5Agent/tests/IEEE2030_5DriverTestAgent/test_agent/agent.py index 7b9d97088d..6f23f157e7 100644 --- a/services/core/IEEE2030_5Agent/tests/IEEE2030_5DriverTestAgent/test_agent/agent.py +++ b/services/core/IEEE2030_5Agent/tests/IEEE2030_5DriverTestAgent/test_agent/agent.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- {{{ # vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et: # -# Copyright 2019, Battelle Memorial Institute. +# Copyright 2020, Battelle Memorial Institute. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/services/core/IEEE2030_5Agent/tests/test_IEEE2030_5_agent.py b/services/core/IEEE2030_5Agent/tests/test_IEEE2030_5_agent.py index 239db33cff..f1d94b8ce3 100644 --- a/services/core/IEEE2030_5Agent/tests/test_IEEE2030_5_agent.py +++ b/services/core/IEEE2030_5Agent/tests/test_IEEE2030_5_agent.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- {{{ # vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et: # -# Copyright 2019, Battelle Memorial Institute. +# Copyright 2020, Battelle Memorial Institute. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -105,7 +105,7 @@ def agent(request, volttron_instance_module_web): test_agent = volttron_instance_module_web.build_agent(identity="test_agent") capabilities = {'edit_config_store': {'identity': PLATFORM_DRIVER}} volttron_instance_module_web.add_capabilities(test_agent.core.publickey, capabilities) - # Configure a IEEE 2030.5 device in the Master Driver + # Configure a IEEE 2030.5 device in the Platform Driver test_agent.vip.rpc.call('config.store', 'manage_delete_store', PLATFORM_DRIVER).get(timeout=10) test_agent.vip.rpc.call('config.store', 'manage_store', PLATFORM_DRIVER, 'devices/{}'.format(DRIVER_NAME), @@ -136,11 +136,11 @@ def agent(request, volttron_instance_module_web): start=True) print('IEEE2030_5 agent id: ', IEEE2030_5_id) - # Install and start a MasterDriverAgent - md_id = volttron_instance_module_web.install_agent(agent_dir=get_services_core("MasterDriverAgent"), + # Install and start a PlatformDriverAgent + md_id = volttron_instance_module_web.install_agent(agent_dir=get_services_core("PlatformDriverAgent"), config_file={}, start=True) - print('master driver agent id: ', md_id) + print('platform driver agent id: ', md_id) global web_address web_address = volttron_instance_module_web.bind_web_address diff --git a/services/core/IEEE2030_5Agent/tests/test_IEEE2030_5_driver.py b/services/core/IEEE2030_5Agent/tests/test_IEEE2030_5_driver.py index e01f94e42f..40e76206ac 100644 --- a/services/core/IEEE2030_5Agent/tests/test_IEEE2030_5_driver.py +++ b/services/core/IEEE2030_5Agent/tests/test_IEEE2030_5_driver.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- {{{ # vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et: # -# Copyright 2019, Battelle Memorial Institute. +# Copyright 2020, Battelle Memorial Institute. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -129,7 +129,7 @@ def agent(request, volttron_instance_module_web): test_agent = volttron_instance_module_web.build_agent() - # Configure a IEEE 2030.5 device in the Master Driver + # Configure a IEEE 2030.5 device in the Platform Driver test_agent.vip.rpc.call('config.store', 'manage_delete_store', 'platform.driver').get(timeout=10) test_agent.vip.rpc.call('config.store', 'manage_store', 'platform.driver', 'devices/{}'.format(DRIVER_NAME), @@ -153,11 +153,11 @@ def agent(request, volttron_instance_module_web): REGISTRY_CONFIG_STRING, 'csv').get(timeout=10) - # Install and start a MasterDriverAgent - md_id = volttron_instance_module_web.install_agent(agent_dir=get_services_core("MasterDriverAgent"), + # Install and start a PlatformDriverAgent + md_id = volttron_instance_module_web.install_agent(agent_dir=get_services_core("PlatformDriverAgent"), config_file={}, start=True) - print('master driver agent id: ', md_id) + print('platform driver agent id: ', md_id) # Install and start a IEEE2030_5Agent IEEE2030_5_id = volttron_instance_module_web.install_agent(agent_dir=get_services_core("IEEE2030_5Agent"), diff --git a/services/core/InfluxdbHistorian/README.md b/services/core/InfluxdbHistorian/README.md new file mode 100644 index 0000000000..4ded997dcc --- /dev/null +++ b/services/core/InfluxdbHistorian/README.md @@ -0,0 +1,185 @@ +# Influxdb Historian + +InfluxDB is an open source time series database with a fast, scalable +engine and high availability. It\'s often used to build DevOps +Monitoring (Infrastructure Monitoring, Application Monitoring, Cloud +Monitoring), IoT Monitoring, and Real-Time Analytics solutions. + +More information about InfluxDB is available from +. + +## Prerequisites + +### InfluxDB Installation + +To install InfluxDB on an Ubuntu or Debian operating system, run the +script: + +> services/core/InfluxdbHistorian/scripts/install-influx.sh + +For installation on other operating systems, see +. + +### Authentication in InfluxDB + +By default, the InfluxDB *Authentication* option is disabled, and no +user authentication is required to access any InfluxDB database. You can +enable authentication by updating the InfluxDB configuration file. For +detailed information on enabling authentication, see: +. + +If *Authentication* is enabled, authorization privileges are enforced. +There must be at least one defined admin user with access to +administrative queries as outlined in the linked document above. +Additionally, you must pre-create the `user` and `database` that are +specified in the configuration file (the default configuration file for +InfluxDB is `services/core/InfluxdbHistorian/config`). If your `user` is +a non-admin user, they must be granted a full set of privileges on the +desired `database`. + +### InfluxDB Driver + +In order to connect to an InfluxDb client, the Python library for +InfluxDB must be installed in VOLTTRON\'s virtual environment. From the +command line, after enabling the virtual environment, install the +InfluxDB library as follows: + +> pip install influxdb + +## Configuration + +The default configuration file for VOLTTRON\'s InfluxDBHistorian agent +should be in the format: + +``` {.python} +{ + "connection": { + "params": { + "host": "localhost", + "port": 8086, # Don't change this unless default bind port + # in influxdb config is changed + "database": "historian", + "user": "historian", # user is optional if authentication is turned off + "passwd": "historian" # passwd is optional if authentication is turned off + } + }, + "aggregations": { + "use_calendar_time_periods": true + } +} +``` + +The InfluxDBHistorian agent can be packaged, installed and started +according to the standard VOLTTRON agent creation procedure. A sample +VOLTTRON configuration file has been provided: +`services/core/InfluxdbHistorian/config`. + + + +## Connection + +The `host`, `database`, `user` and `passwd` values in the VOLTTRON +configuration file can be modified. `user` and `passwd` are optional if +InfluxDB *Authentication* is disabled. + +> **_Note:_** +Be sure to initialize or pre-create the `database` and `user` that you +defined in the configuration file, and if `user` is a non-admin user, be +make sure to grant privileges for the user on the specified `database`. +For more information, see [Authentication in +InfluxDB](#authentication-in-influxdb). + + +## Aggregations + +In order to use aggregations, the VOLTTRON configuration file must also +specify a value, either `true` or `false`, for +`use_calendar_time_periods`, indicating whether the aggregation period +should align to calendar time periods. If this value is omitted from the +configuration file, aggregations cannot be used. + +For more information on historian aggregations, see: +`Aggregate Historian Agent Specification `{.interpreted-text +role="ref"}. + +Supported Influxdb aggregation functions: + +> Aggregations: COUNT(), DISTINCT(), INTEGRAL(), MEAN(), MEDIAN(), +> MODE(), SPREAD(), STDDEV(), SUM() +> +> Selectors: FIRST(), LAST(), MAX(), MIN() +> +> Transformations: CEILING(),CUMULATIVE_SUM(), DERIVATIVE(), +> DIFFERENCE(), ELAPSED(), NON_NEGATIVE_DERIVATIVE(), +> NON_NEGATIVE_DIFFERENCE() + +More information how to use those functions: + + +> **_Note:_** +Historian aggregations in InfluxDB are different from aggregations +employed by other historian agents in VOLTTRON. InfluxDB doesn\'t have a +separate agent for aggregations. Instead, aggregation is supported +through the `query_historian` function. Other agents can execute an +aggregation query directly in InfluxDB by calling the *RPC.export* +method `query`. For an example, see +[Aggregate Historian Agent Specification](https://volttron.readthedocs.io/en/develop/developing-volttron/developing-agents/specifications/aggregate.html) + + +# Database Schema + +Each InfluxDB database has a `meta` table as well as other tables for +different measurements, e.g. one table for \"power_kw\", one table for +\"energy\", one table for \"voltage\", etc. (An InfluxDB `measurement` +is similar to a relational table, so for easier understanding, InfluxDB +measurements will be referred to below as tables.) + +## Measurement Table + +Example: If a topic name is *\"CampusA/Building1/Device1/Power_KW\"*, +the `power_kw` table might look as follows: + + |time | building | campus | device | source | value | + |--------------------------------|-----------|---------|----------|---------|-------| + |2017-12-28T20:41:00.004260096Z | building1 | campusa | device1 | scrape | 123.4 | + |2017-12-30T01:05:00.004435616Z | building1 | campusa | device1 | scrape | 567.8 | + |2018-01-15T18:08:00.126345Z | building1 | campusa | device1 | scrape | 10 | + +`building`, `campus`, `device`, and `source` are InfluxDB *tags*. +`value` is an InfluxDB *field*. + +> **_Note:_** +The topic is converted to all lowercase before being stored in the +table. In other words, a set of *tag* names, as well as a table name, +are created by splitting `topic_id` into substrings ([Meta Table section below](#Meta-Table)). +::: + +So in this example, where the typical format of a topic name is +`///`, `campus`, `building` and +`device` are each stored as tags in the database. + +A topic name might not confirm to that convention: + +1. The topic name might contain additional substrings, e.g. *CampusA/Building1/LAB/Device/OutsideAirTemperature*. +In this case, `campus` will be *campusa/building*, `building` will be *lab*, and `device` will be *device*. +2. The topic name might contain fewer substrings, e.g. *LAB/Device/OutsideAirTemperature*. In this case, the `campus` +tag will be empty, `building` will be *lab*, and `device` will be *device*. + +## Meta Table + +The meta table will be structured as in the following example: + + + |time | last_updated | meta_dict | topic | topic_id | + |----------------------|----------------------------------|-----------------------------------------|--------------------------------------|--------------------------------------| + |1970-01-01T00:00:00Z | 2017-12-28T20:47:00.003051+00:00 | {u\'units\': u\'kw\',
u\'tz\': u\'US/Pacific\',
u\'type\': u\'float\'} | CampusA/Building1/Device1/Power_KW | campusa/building1/device1/power_kw | + |1970-01-01T00:00:00Z | 2017-12-28T20:47:00.003051+00:00 | {u\'units\': u\'kwh\',
u\'tz\': u\'US/Pacific\',
u\'type\': u\'float\'} | CampusA/Building1/Device1/Energy_KWH | campusa/building1/device1/energy_kwh | + + |----------------------|----------------------------------|-----------------------------------------|--------------------------------------|--------------------------------------| + +In the InfluxDB, `last_updated`, `meta_dict` and `topic` are *fields* and `topic_id` is a *tag*. + +Since InfluxDB is a time series database, the `time` column is required, and a dummy value +(`time=0`, which is 1970-01-01T00:00:00Z based on epoch unix time) is assigned to all topics for easier metadata +updating. Hence, if the contents of `meta_dict` change for a specific topic, both `last_updated` and `meta_dict` values +for that topic will be replaced in the table. diff --git a/services/core/InfluxdbHistorian/README.rst b/services/core/InfluxdbHistorian/README.rst deleted file mode 100644 index b54583a0b3..0000000000 --- a/services/core/InfluxdbHistorian/README.rst +++ /dev/null @@ -1,186 +0,0 @@ -.. _Influxdb-Historian: - -################## -Influxdb Historian -################## - -InfluxDB is an open source time series database with a fast, scalable engine and high availability. -It's often used to build DevOps Monitoring (Infrastructure Monitoring, Application Monitoring, -Cloud Monitoring), IoT Monitoring, and Real-Time Analytics solutions. - -More information about InfluxDB is available from ``_. - - -Prerequisites -############# - -InfluxDB Installation -===================== - -To install InfluxDB on an Ubuntu or Debian operating system, run the script: - - :: - - services/core/InfluxdbHistorian/scripts/install-influx.sh - -For installation on other operating systems, -see ``_. - -Authentication in InfluxDB -========================== - -By default, the InfluxDB *Authentication* option is disabled, and no user authentication is -required to access any InfluxDB database. You can enable authentication by updating the -InfluxDB configuration file. For detailed information on enabling authentication, see: -``_. - -If *Authentication* is enabled, authorization privileges are enforced. There must be at least -one defined admin user with access to administrative queries as outlined in the linked document -above. Additionally, you must pre-create the ``user`` and ``database`` that are specified in the -configuration file (the default configuration file for InfluxDB -is ``services/core/InfluxdbHistorian/config``). -If your ``user`` is a non-admin user, they must be granted a full set of privileges on the -desired ``database``. - -InfluxDB Driver -=============== - -In order to connect to an InfluxDb client, the Python library for InfluxDB must be installed -in VOLTTRON's virtual environment. From the command line, after enabling the virtual environment, -install the InfluxDB library as follows: - - :: - - pip install influxdb - - -Configuration -############# - -The default configuration file for VOLTTRON's InfluxDBHistorian agent should be in the format: - -.. code-block:: python - - { - "connection": { - "params": { - "host": "localhost", - "port": 8086, # Don't change this unless default bind port - # in influxdb config is changed - "database": "historian", - "user": "historian", # user is optional if authentication is turned off - "passwd": "historian" # passwd is optional if authentication is turned off - } - }, - "aggregations": { - "use_calendar_time_periods": true - } - } - - -The InfluxDBHistorian agent can be packaged, installed and started according to the standard -VOLTTRON agent creation procedure. A sample VOLTTRON configuration file has been -provided: ``services/core/InfluxdbHistorian/config``. - -.. seealso:: :ref:`Agent Development Walkthrough ` - -Connection -========== - -The ``host``, ``database``, ``user`` and ``passwd`` values in the VOLTTRON configuration file -can be modified. ``user`` and ``passwd`` are optional if InfluxDB *Authentication* is disabled. - -.. note:: Be sure to initialize or pre-create the ``database`` and ``user`` that you defined in - the configuration file, and if ``user`` is a non-admin user, be make sure to grant - privileges for the user on the specified ``database``. - For more information, see `Authentication in InfluxDB`_. - -Aggregations -============ - -In order to use aggregations, the VOLTTRON configuration file must also specify a value, -either ``true`` or ``false``, for ``use_calendar_time_periods``, indicating whether the -aggregation period should align to calendar time periods. If this value is omitted from the -configuration file, aggregations cannot be used. - -For more information on historian aggregations, -see: :ref:`Aggregate Historian Agent Specification `. - -Supported Influxdb aggregation functions: - - Aggregations: COUNT(), DISTINCT(), INTEGRAL(), MEAN(), MEDIAN(), MODE(), SPREAD(), STDDEV(), SUM() - - Selectors: FIRST(), LAST(), MAX(), MIN() - - Transformations: CEILING(),CUMULATIVE_SUM(), DERIVATIVE(), DIFFERENCE(), ELAPSED(), NON_NEGATIVE_DERIVATIVE(), NON_NEGATIVE_DIFFERENCE() - -More information how to use those functions: ``_ - -.. note:: Historian aggregations in InfluxDB are different from aggregations employed - by other historian agents in VOLTTRON. InfluxDB doesn't have a separate agent for aggregations. - Instead, aggregation is supported through the ``query_historian`` function. Other agents can - execute an aggregation query directly in InfluxDB by calling the *RPC.export* method ``query``. - For an example, see :ref:`Aggregate Historian Agent Specification ` - -Database Schema -############### - -Each InfluxDB database has a ``meta`` table as well as other tables for different measurements, -e.g. one table for "power_kw", one table for "energy", one table for "voltage", etc. -(An InfluxDB ``measurement`` is similar to a relational table, so for easier understanding, InfluxDB -measurements will be referred to below as tables.) - -Measurement Table -================= - -Example: If a topic name is *"CampusA/Building1/Device1/Power_KW"*, the ``power_kw`` table might look as follows: - -+-------------------------------+-----------+---------+----------+-------+------+ -|time |building |campus |device |source |value | -+-------------------------------+-----------+---------+----------+-------+------+ -|2017-12-28T20:41:00.004260096Z |building1 |campusa |device1 |scrape |123.4 | -+-------------------------------+-----------+---------+----------+-------+------+ -|2017-12-30T01:05:00.004435616Z |building1 |campusa |device1 |scrape |567.8 | -+-------------------------------+-----------+---------+----------+-------+------+ -|2018-01-15T18:08:00.126345Z |building1 |campusa |device1 |scrape |10 | -+-------------------------------+-----------+---------+----------+-------+------+ - -``building``, ``campus``, ``device``, and ``source`` are InfluxDB *tags*. ``value`` is an InfluxDB *field*. - -.. note:: The topic is converted to all lowercase before being stored in the table. - In other words, a set of *tag* names, as well as a table name, are created by - splitting ``topic_id`` into substrings (see `meta table`_ below). - - -So in this example, where the typical format of a topic name is ``///``, -``campus``, ``building`` and ``device`` are each stored as tags in the database. - -A topic name might not confirm to that convention: - - #. The topic name might contain additional substrings, e.g. - *CampusA/Building1/LAB/Device/OutsideAirTemperature*. In this case, - ``campus`` will be *campusa/building*, ``building`` will be *lab*, and ``device`` will be *device*. - - #. The topic name might contain fewer substrings, e.g. *LAB/Device/OutsideAirTemperature*. - In this case, the ``campus`` tag will be empty, ``building`` will be *lab*, - and ``device`` will be *device*. - -Meta Table -========== - -The meta table will be structured as in the following example: - -+---------------------+---------------------------------+------------------------------------------------------------------+-------------------------------------+--------------------------------------+ -|time |last_updated |meta_dict |topic |topic_id | -+---------------------+---------------------------------+------------------------------------------------------------------+-------------------------------------+--------------------------------------+ -|1970-01-01T00:00:00Z |2017-12-28T20:47:00.003051+00:00 |{u'units': u'kw', u'tz': u'US/Pacific', u'type': u'float'} |CampusA/Building1/Device1/Power_KW |campusa/building1/device1/power_kw | -+---------------------+---------------------------------+------------------------------------------------------------------+-------------------------------------+--------------------------------------+ -|1970-01-01T00:00:00Z |2017-12-28T20:47:00.003051+00:00 |{u'units': u'kwh', u'tz': u'US/Pacific', u'type': u'float'} |CampusA/Building1/Device1/Energy_KWH |campusa/building1/device1/energy_kwh | -+---------------------+---------------------------------+------------------------------------------------------------------+-------------------------------------+--------------------------------------+ - -In the InfluxDB, ``last_updated``, ``meta_dict`` and ``topic`` are *fields* and ``topic_id`` is a *tag*. - -Since InfluxDB is a time series database, the ``time`` column is required, and a dummy value (``time=0``, -which is 1970-01-01T00:00:00Z based on epoch unix time) is assigned to all topics for easier -metadata updating. Hence, if the contents of ``meta_dict`` change for a specific topic, both ``last_updated`` -and ``meta_dict`` values for that topic will be replaced in the table. diff --git a/services/core/InfluxdbHistorian/config b/services/core/InfluxdbHistorian/config index 341663aa0b..d5c253f84a 100644 --- a/services/core/InfluxdbHistorian/config +++ b/services/core/InfluxdbHistorian/config @@ -1,14 +1,14 @@ { - "connection": { - "params": { - "host": "localhost", - "port": 8086, - "database": "historian", - "user": "admin", - "passwd": "admin" + "connection": { + "params": { + "host": "localhost", + "port": 8086, + "database": "historian", + "user": "admin", + "passwd": "admin" + } + }, + "aggregations": { + "use_calendar_time_periods": true } - }, - "aggregations": { - "use_calendar_time_periods": true - } -} \ No newline at end of file +} diff --git a/services/core/InfluxdbHistorian/conftest.py b/services/core/InfluxdbHistorian/conftest.py index 8559470457..68e5e611b1 100644 --- a/services/core/InfluxdbHistorian/conftest.py +++ b/services/core/InfluxdbHistorian/conftest.py @@ -3,4 +3,4 @@ from volttrontesting.fixtures.volttron_platform_fixtures import * # Add system path of the agent's directory -sys.path.insert(0, os.path.abspath(os.path.dirname(__file__))) \ No newline at end of file +sys.path.insert(0, os.path.abspath(os.path.dirname(__file__))) diff --git a/services/core/InfluxdbHistorian/influx/historian.py b/services/core/InfluxdbHistorian/influx/historian.py index 15a5c0b048..0cf382e304 100644 --- a/services/core/InfluxdbHistorian/influx/historian.py +++ b/services/core/InfluxdbHistorian/influx/historian.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- {{{ # vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et: # -# Copyright 2019, Battelle Memorial Institute. +# Copyright 2020, Battelle Memorial Institute. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -322,7 +322,7 @@ def query_historian(self, topic, start=None, end=None, agg_type=None, self._use_calendar_time_periods) values[topic_name] = value else: - _log.warn('No such topic {}'.format(topic_name)) + _log.warning('No such topic {}'.format(topic_name)) results = { "values": values, diff --git a/services/core/InfluxdbHistorian/setup.py b/services/core/InfluxdbHistorian/setup.py index f513a83c9a..203cd3945b 100644 --- a/services/core/InfluxdbHistorian/setup.py +++ b/services/core/InfluxdbHistorian/setup.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- {{{ # vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et: # -# Copyright 2019, Battelle Memorial Institute. +# Copyright 2020, Battelle Memorial Institute. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/services/core/InfluxdbHistorian/tests/test_influxdb_historian.py b/services/core/InfluxdbHistorian/tests/test_influxdb_historian.py index cfd20463d1..af6f7f7391 100644 --- a/services/core/InfluxdbHistorian/tests/test_influxdb_historian.py +++ b/services/core/InfluxdbHistorian/tests/test_influxdb_historian.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- {{{ # vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et: # -# Copyright 2019, Battelle Memorial Institute. +# Copyright 2020, Battelle Memorial Institute. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -41,17 +41,16 @@ import pytest import gevent import pytz +import os +import json from pytest import approx from datetime import datetime, timedelta from dateutil import parser from volttron.platform import get_services_core, jsonapi -from volttron.platform.agent.utils import format_timestamp, \ - parse_timestamp_string, \ - get_aware_utc_now +from volttron.platform.agent.utils import format_timestamp, parse_timestamp_string, get_aware_utc_now from volttron.platform.messaging import headers as headers_mod - try: from influxdb import InfluxDBClient HAS_INFLUXDB = True @@ -63,7 +62,6 @@ from fixtures import * - def clean_database(client, clean_updated_database=False): db = influxdb_config['connection']['params']['database'] client.drop_database(db) @@ -82,10 +80,7 @@ def publish_some_fake_data(publish_agent, data_count, value_type='float'): """ Generate some random data for all query_topics and uses the passed publish_agent's vip pubsub to publish all messages. - - Timestamp of these data points will be in the range of today 12:00AM to 23:59PM - :param publish_agent: publish agent used to publish data :param data_count: number of data points generated E.g: if data_count = 10 and number of topics is 3, @@ -184,7 +179,6 @@ def publish_data_with_updated_meta(publish_agent): """ Publish a new data point containing an updated meta dictionary for some topics - :param publish_agent: publish agent used to publish data :return: updated meta dictionaries for all topics and updated_time @@ -291,12 +285,8 @@ def publish_data_with_updated_topic_case(publish_agent, data_count): @pytest.mark.skipif(not HAS_INFLUXDB, reason='No influxdb library. Please run \'pip install influxdb\'') def test_installation_and_connection(volttron_instance, influxdb_client): """ - Test installing the InfluxdbHistorian agent and then - connect to influxdb client. - - - When it first connect to the client, there should be no - database yet. If database already existed, clean database. + Test installing the InfluxdbHistorian agent and then connect to influxdb client. + When it first connect to the client, there should be no database yet. If database already existed, clean database. """ clean_database(influxdb_client) @@ -325,7 +315,6 @@ def test_installation_and_connection(volttron_instance, influxdb_client): def test_publish_to_historian(volttron_instance, influxdb_client): """ Test basic functionality of publish_to_historian. - Inserts a specific number of data and checks if all of them got into the database. """ @@ -568,9 +557,7 @@ def test_publish_with_changed_value_type(volttron_instance, influxdb_client): def test_query_topic_list(volttron_instance, influxdb_client): """ Test basic functionality of query_topic_list method in InfluxdbHistorian. - - Inserts a specific number of data and call 'get_topic_list' method through - a new built agent. + Inserts a specific number of data and call 'get_topic_list' method through a new built agent. """ clean_database(influxdb_client) @@ -605,22 +592,22 @@ def test_query_topic_list(volttron_instance, influxdb_client): @pytest.mark.skipif(not HAS_INFLUXDB, reason='No influxdb library. Please run \'pip install influxdb\'') def test_query_historian_all_topics(volttron_instance, influxdb_client): """ - Test basic functionality of query_historian method in InfluxdbHistorian. + Test basic functionality of query_historian method in InfluxdbHistorian. - Inserts a specific number of data and call 'query' method through - a new built agent. The topic argument can be a single topic or a - list of topics. + Inserts a specific number of data and call 'query' method through + a new built agent. The topic argument can be a single topic or a + list of topics. - We test a list of topics in this case. + We test a list of topics in this case. - The method 'query' actually executes multiple times the following queries: + The method 'query' actually executes multiple times the following queries: - .. code-block:: python + .. code-block:: python - SELECT value FROM WHERE campus='' and building='' and device='' - LIMIT 30 + SELECT value FROM WHERE campus='' and building='' and device='' + LIMIT 30 - :note: , , and are parsed from topic_id + :note: , , and are parsed from topic_id """ clean_database(influxdb_client) @@ -1297,13 +1284,11 @@ def test_update_topic_case(volttron_instance, influxdb_client): publish_some_fake_data(publisher, 3) - old_topic_list = publisher.vip.rpc.call('influxdb.historian', - 'get_topic_list').get(timeout=5) + old_topic_list = publisher.vip.rpc.call('influxdb.historian', 'get_topic_list').get(timeout=5) publish_data_with_updated_topic_case(publisher, 3) - new_topic_list = publisher.vip.rpc.call('influxdb.historian', - 'get_topic_list').get(timeout=5) + new_topic_list = publisher.vip.rpc.call('influxdb.historian', 'get_topic_list').get(timeout=5) assert old_topic_list != new_topic_list @@ -1316,17 +1301,13 @@ def test_update_topic_case(volttron_instance, influxdb_client): # Any test run after this one has to switch to 'historian' database again. -# Hence, for convenience, put this test at last in the file -# because it will drop 'test' database at last. +# Hence, for convenience, put this test at last in the file because it will drop 'test' database at last. @pytest.mark.historian @pytest.mark.skipif(not HAS_INFLUXDB, reason='No influxdb library. Please run \'pip install influxdb\'') def test_update_config_store(volttron_instance, influxdb_client): """ - Test the case when user updates config store while an - InfluxdbHistorian Agent is running. - - In this test, database name is updated and data should be - stored in the updated one. + Test the case when user updates config store while an InfluxdbHistorian Agent is running. + In this test, database name is updated and data should be stored in the updated one. """ clean_database(influxdb_client) db = influxdb_config['connection']['params']['database'] @@ -1349,8 +1330,7 @@ def test_update_config_store(volttron_instance, influxdb_client): publish_some_fake_data(publisher, 5) # Update config store - publisher.vip.rpc.call('config.store', 'manage_store', - 'influxdb.historian','config', + publisher.vip.rpc.call('config.store', 'manage_store', 'influxdb.historian', 'config', jsonapi.dumps(updated_influxdb_config), config_type="json").get(timeout=10) publish_some_fake_data(publisher, 5) @@ -1377,3 +1357,54 @@ def test_update_config_store(volttron_instance, influxdb_client): volttron_instance.remove_agent(agent_uuid) clean_database(influxdb_client, clean_updated_database=True) + +@pytest.mark.historian +@pytest.mark.skipif(not HAS_INFLUXDB, reason='No influxdb library. Please run \'pip install influxdb\'') +def test_default_config(volttron_instance, influxdb_client): + """ + Test installing the InfluxdbHistorian agent and then connect to influxdb client. + When it first connect to the client, there should be no database yet. If database already existed, clean database. + """ + clean_database(influxdb_client) + + config_path = os.path.join(get_services_core("InfluxdbHistorian"), "config") + with open(config_path, "r") as config_file: + config_json = json.load(config_file) + assert isinstance(config_json, dict) + + clean_database(influxdb_client) + db = config_json['connection']['params']['database'] + influxdb_client.create_database(db) + + agent_uuid = start_influxdb_instance(volttron_instance, config_json) + assert agent_uuid is not None + assert volttron_instance.is_agent_running(agent_uuid) + + try: + # query the table to check publishes, do a minimal comparison + publisher = volttron_instance.build_agent() + assert publisher is not None + expected = publish_some_fake_data(publisher, 10) + + rs = influxdb_client.get_list_database() + + # the databases historian + assert {'name': 'historian'} in rs + + # Check for measurement OutsideAirTemperature + query = 'SELECT value FROM outsideairtemperature ' \ + 'WHERE campus=\'building\' and building=\'lab\' and device=\'device\'' + rs = influxdb_client.query(query) + rs = list(rs.get_points()) + topic = query_topics["oat_point"] + + assert len(rs) == 10 + + for point in rs: + ts = parser.parse(point['time']) + ts = format_timestamp(ts) + assert point["value"] == approx(expected['data'][ts][topic]) + finally: + volttron_instance.stop_agent(agent_uuid) + volttron_instance.remove_agent(agent_uuid) + clean_database(influxdb_client) diff --git a/services/core/MQTTHistorian/README.md b/services/core/MQTTHistorian/README.md new file mode 100644 index 0000000000..5279322a09 --- /dev/null +++ b/services/core/MQTTHistorian/README.md @@ -0,0 +1,78 @@ +# MQTT Historian + +## Overview + +The MQTT Historian agent publishes data to an MQTT broker. + +The mqttlistener.py script will connect to the broker and print all +messages. + +## Dependencies + +The Paho MQTT library from Eclipse is needed for the agent and can be +installed with: + + pip install paho-mqtt + +The Mosquitto MQTT broker may be useful for testing and can be installed +with + + apt-get install mosquitto + +## Configuration + +The following is an example configuration file: + + { + "connection: { + # Optional backup limit in gigabytes. Default is no backup limit. + # "backup_storage_limit_gb": null, + + # Quality of service level for MQTT publishes. Default is 0. + # "mqtt_qos": 0, + + # Set messages to be retained. Default is False + # "mqtt_retain": false, + + # Address of broker to connect to. Default is localhost. + "mqtt_hostname": "localhost", + + # Port on broker accepting connections. Default is 1883 + "mqtt_port": 1883 + + # If a client id is not provided one will be generated by paho-mqtt. + # Default is an empty string. + # "mqtt_client_id": "", + + # Keepalive timeout for the client. Default is 60 seconds + # "mqtt_keepalive": 60, + + # Optional will is published when the client disconnects. Default is None. + # If used then QOS defaults to 0 and retain defaults to False. + # "mqtt_will": { + # "topic": "", + # "payload":", + # "qos":, + # "retain": + # }, + + # MQTT authentication info. Defaults to None. + # "mqtt_auth": { + # "username": "", + # "password": "" + # }, + + # MQTT TLS parameters. If used then CA Certs is required. Otherwise the + # default is None. + # "mqtt_tls": { + # "ca_certs":"", + # "certfile":"", + # "keyfile":"", + # "tls_version":"", + # "ciphers":" + # } + + # Protocol versions MQTTv311 and MQTTv31 are supported. Default is MQTTv311. + # "mqtt_protocol": "MQTTv311" + } + } diff --git a/services/core/MQTTHistorian/README.rst b/services/core/MQTTHistorian/README.rst deleted file mode 100644 index 18f3b86186..0000000000 --- a/services/core/MQTTHistorian/README.rst +++ /dev/null @@ -1,24 +0,0 @@ -MQTT Historian -============== - -Overview --------- -The MQTT Historian agent publishes data to an MQTT broker. - -The mqttlistener.py script will connect to the broker and print -all messages. - -Dependencies ------------- -The Paho MQTT library from Eclipse is needed for the agent and can -be installed with: - -:: - - pip install paho-mqtt - -The Mosquitto MQTT broker may be useful for testing and can be installed with - -:: - - apt-get install mosquitto diff --git a/services/core/MQTTHistorian/Tests/test_mqtt_historian.py b/services/core/MQTTHistorian/Tests/test_mqtt_historian.py new file mode 100644 index 0000000000..a8e788535c --- /dev/null +++ b/services/core/MQTTHistorian/Tests/test_mqtt_historian.py @@ -0,0 +1,216 @@ +# -*- coding: utf-8 -*- {{{ +# vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et: +# +# Copyright 2020, Battelle Memorial Institute. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# This material was prepared as an account of work sponsored by an agency of +# the United States Government. Neither the United States Government nor the +# United States Department of Energy, nor Battelle, nor any of their +# employees, nor any jurisdiction or organization that has cooperated in the +# development of these materials, makes any warranty, express or +# implied, or assumes any legal liability or responsibility for the accuracy, +# completeness, or usefulness or any information, apparatus, product, +# software, or process disclosed, or represents that its use would not infringe +# privately owned rights. Reference herein to any specific commercial product, +# process, or service by trade name, trademark, manufacturer, or otherwise +# does not necessarily constitute or imply its endorsement, recommendation, or +# favoring by the United States Government or any agency thereof, or +# Battelle Memorial Institute. The views and opinions of authors expressed +# herein do not necessarily state or reflect those of the +# United States Government or any agency thereof. +# +# PACIFIC NORTHWEST NATIONAL LABORATORY operated by +# BATTELLE for the UNITED STATES DEPARTMENT OF ENERGY +# under Contract DE-AC05-76RL01830 +# }}} + +import os +import json +import gevent +import pytest +import paho.mqtt.client as mqtt_client +from paho.mqtt.client import MQTTv311, MQTTv31 + +from volttron.platform import get_services_core +from volttron.platform.agent import utils +from volttron.platform.messaging import headers as headers_mod +from volttron.platform.messaging.health import STATUS_GOOD + +default_config_path = os.path.join(get_services_core("MQTTHistorian"), "config") +with open(default_config_path, "r") as config_file: + DEFAULT_CONFIG = json.load(config_file) +assert isinstance(DEFAULT_CONFIG, dict) +mqtt_connection = DEFAULT_CONFIG.get('connection') +assert mqtt_connection and isinstance(mqtt_connection, dict) +MQTT_PORT = int(mqtt_connection.get('mqtt_port')) if 'mqtt_port' in mqtt_connection else 1883 +MQTT_PROTOCOL = mqtt_connection.get('mqtt_protocol') if 'mqtt_protocol' in mqtt_connection else "MQTTv311" +if MQTT_PROTOCOL == "MQTTv311": + MQTT_PROTOCOL = MQTTv311 +elif MQTT_PROTOCOL == "MQTTv31": + MQTT_PROTOCOL = MQTTv31 +else: + raise ValueError('MQTT protocol expects MQTTv311 or MQTTv31') + +TEST_PUBLISH = [{'SampleLong1': 50, 'SampleBool1': True, 'SampleFloat1': 10.0, }, {'SampleFloat1': {'units': 'PPM', " \ + "'type': 'integer', 'tz': 'US/Pacific'}, 'SampleLong1': {'units': 'Enumeration', 'type': 'integer', " \ + "'tz': 'US/Pacific'}, 'SampleBool1': {'units': 'On / Off', 'type': 'integer', 'tz': 'US/Pacific'}}] +TEST_TOPIC = 'devices/campus/building/fake/all' + + +@pytest.fixture(scope="module") +def helper_agent(request, volttron_instance): + # 1: Start a fake agent to query the historian agent in volttron_instance + agent = volttron_instance.build_agent() + + # 2: add a tear down method to stop the fake agent that published to message bus + def stop_agent(): + print("In teardown method of helper_agent") + agent.core.stop() + + request.addfinalizer(stop_agent) + return agent + + +@pytest.fixture(scope='module') +def mqtt_broker_client(request): + client = mqtt_client.Client() + + def on_connect(client, userdata, flags, rc): + print('Starting Paho-MQTT client') + client.subscribe('#') + + client.on_connect = on_connect + + client.connect_async('localhost', port=MQTT_PORT) + client.loop_start() + + def stop_client(): + client.loop_stop() + client.disconnect() + + request.addfinalizer(stop_client) + return client + + +@pytest.mark.historian +def test_can_publish_individual_points(volttron_instance, helper_agent, mqtt_broker_client): + """ + Test the agent will by default subscribe to devices topics and publish "all" publish data as individual points + """ + # publish a device "all" publish with a few points, by default we should see each point from the all publish as + # a single publish on the MQTT broker + uuid = volttron_instance.install_agent( + agent_dir=get_services_core("MQTTHistorian"), + config_file=DEFAULT_CONFIG, + start=True, + vip_identity="mqtt_historian") + + assert helper_agent.vip.rpc.call("mqtt_historian", "health.get_status").get(timeout=10).get('status') == STATUS_GOOD + + callback_messages = {} + + def on_message(client, userdata, message): + if message.topic not in callback_messages: + callback_messages[message.topic] = [] + callback_messages[message.topic].append(message.payload.decode('utf-8')) + + mqtt_broker_client.on_message = on_message + gevent.sleep(1) + + utcnow = utils.get_aware_utc_now() + utcnow_string = utils.format_timestamp(utcnow) + headers = { + headers_mod.DATE: utcnow_string, + headers_mod.TIMESTAMP: utcnow_string, + headers_mod.SYNC_TIMESTAMP: utcnow_string + } + + print('Sending device data to VOLTTRON messagebus') + helper_agent.vip.pubsub.publish('pubsub', + TEST_TOPIC, + headers=headers, + message=TEST_PUBLISH).get(timeout=10.0) + + gevent.sleep(3) + + assert len(callback_messages) == len(TEST_PUBLISH[0]) + for topic, value in callback_messages.items(): + point = topic.split("/")[-1] + assert point in TEST_PUBLISH[0] + for datapoint in value: + # account for True in Python dict being capitalized but lower in JSON + if datapoint.lower() == 'true': + assert datapoint.lower() == str(TEST_PUBLISH[0][point]).lower() + else: + assert datapoint == str(TEST_PUBLISH[0][point]) + + volttron_instance.stop_agent(uuid) + + +@pytest.mark.historian +def test_can_publish_all_topic(volttron_instance, helper_agent, mqtt_broker_client): + """ + Test that using custom topics we can output the entirety of a device "all" publish to a single topic on the broker + """ + # publish a device "all" publish with a few points, using a different config we should be able to get the entire + # "all" publish as a single topic on the broker + # prevent capturing device data normally, capture device topics using the record data capture method + config_additions = {"capture_device_data": False, + "custom_topics": { + "capture_record_data": ["devices"]}} + config = DEFAULT_CONFIG.copy() + config.update(config_additions) + + uuid = volttron_instance.install_agent( + agent_dir=get_services_core("MQTTHistorian"), + config_file=config, + start=True, + vip_identity="mqtt_all_publish") + + assert helper_agent.vip.rpc.call("mqtt_all_publish", "health.get_status").get(timeout=10).get('status') == STATUS_GOOD + + callback_messages = {} + + def on_message(client, userdata, message): + if message.topic not in callback_messages: + callback_messages[message.topic] = [] + callback_messages[message.topic].append(message.payload.decode('utf-8')) + + mqtt_broker_client.on_message = on_message + + gevent.sleep(1) + + utcnow = utils.get_aware_utc_now() + utcnow_string = utils.format_timestamp(utcnow) + headers = { + headers_mod.DATE: utcnow_string, + headers_mod.TIMESTAMP: utcnow_string, + headers_mod.SYNC_TIMESTAMP: utcnow_string + } + + print('Sending device data to VOLTTRON messagebus') + helper_agent.vip.pubsub.publish('pubsub', + TEST_TOPIC, + headers=headers, + message=TEST_PUBLISH).get(timeout=10.0) + + gevent.sleep(3) + + assert len(callback_messages) == 1 + for topic, value in callback_messages.items(): + assert topic == TEST_TOPIC + assert json.loads(value[0]) == TEST_PUBLISH + + volttron_instance.stop_agent(uuid) diff --git a/services/core/MQTTHistorian/config b/services/core/MQTTHistorian/config index 4a084b0aa1..115ed3a0b4 100644 --- a/services/core/MQTTHistorian/config +++ b/services/core/MQTTHistorian/config @@ -1,51 +1,6 @@ { - # Optional backup limit in gigabytes. Default is no backup limit. - # "backup_storage_limit_gb": null, - - # Quality of service level for MQTT publishes. Default is 0. - # "mqtt_qos": 0, - - # Set messages to be retained. Default is False - # "mqtt_retain": false, - - # Address of broker to connect to. Default is localhost. - "mqtt_hostname": "localhost", - - # Port on broker accepting connections. Default is 1883 - "mqtt_port": 1883 - - # If a client id is not provided one will be generated by paho-mqtt. - # Default is an empty string. - # "mqtt_client_id": "", - - # Keepalive timeout for the client. Default is 60 seconds - # "mqtt_keepalive": 60, - - # Optional will is published when the client disconnects. Default is None. - # If used then QOS defaults to 0 and retain defaults to False. - # "mqtt_will": { - # "topic": "", - # "payload":", - # "qos":, - # "retain": - # }, - - # MQTT authentication info. Defaults to None. - # "mqtt_auth": { - # "username": "", - # "password": "" - # }, - - # MQTT TLS parameters. If used then CA Certs is required. Otherwise the - # default is None. - # "mqtt_tls": { - # "ca_certs":"", - # "certfile":"", - # "keyfile":"", - # "tls_version":"", - # "ciphers":" - # } - - # Protocol versions MQTTv311 and MQTTv31 are supported. Default is MQTTv311. - # "mqtt_protocol": "MQTTv311" + "connection": { + "mqtt_hostname": "localhost", + "mqtt_port": 1883 + } } diff --git a/services/core/MQTTHistorian/conftest.py b/services/core/MQTTHistorian/conftest.py new file mode 100644 index 0000000000..68e5e611b1 --- /dev/null +++ b/services/core/MQTTHistorian/conftest.py @@ -0,0 +1,6 @@ +import sys + +from volttrontesting.fixtures.volttron_platform_fixtures import * + +# Add system path of the agent's directory +sys.path.insert(0, os.path.abspath(os.path.dirname(__file__))) diff --git a/services/core/MQTTHistorian/mqtt_historian/agent.py b/services/core/MQTTHistorian/mqtt_historian/agent.py index dee5342dc8..6ee394486b 100644 --- a/services/core/MQTTHistorian/mqtt_historian/agent.py +++ b/services/core/MQTTHistorian/mqtt_historian/agent.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- {{{ # vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et: # -# Copyright 2019, Battelle Memorial Institute. +# Copyright 2020, Battelle Memorial Institute. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -41,9 +41,9 @@ import logging import sys import time -import gevent -from volttron.platform.agent.base_historian import BaseHistorian, add_timing_data_to_header +from volttron.platform import jsonapi +from volttron.platform.agent.base_historian import BaseHistorian from volttron.platform.agent import utils from paho.mqtt.client import MQTTv311, MQTTv31 @@ -55,28 +55,40 @@ __version__ = '0.2' -class MQTTHistorian(BaseHistorian): - """This historian publishes data to MQTT. - """ +def historian(config_path, **kwargs): + if isinstance(config_path, dict): + config_dict = config_path + else: + config_dict = utils.load_config(config_path) + + connection = config_dict.get('connection', None) + assert connection is not None + + utils.update_kwargs_with_config(kwargs, config_dict) + + return MQTTHistorian(**kwargs) - def __init__(self, config_path, **kwargs): - config = utils.load_config(config_path) +class MQTTHistorian(BaseHistorian): + """ + This historian publishes data to a MQTT Broker. + """ + def __init__(self, connection, **kwargs): # We pass every optional parameter to the MQTT library functions so they # default to the same values that paho uses as defaults. - self.mqtt_qos = config.get('mqtt_qos', 0) - self.mqtt_retain = config.get('mqtt_retain', False) - - self.mqtt_hostname = config.get('mqtt_hostname', 'localhost') - self.mqtt_port = config.get('mqtt_port', 1883) - self.mqtt_client_id = config.get('mqtt_client_id', '') - self.mqtt_keepalive = config.get('mqtt_keepalive', 60) - self.mqtt_will = config.get('mqtt_will', None) - self.mqtt_auth = config.get('mqtt_auth', None) - self.mqtt_tls = config.get('mqtt_tls', None) - - protocol = config.get('mqtt_protocol', MQTTv311) + self.mqtt_qos = connection.get('mqtt_qos', 0) + self.mqtt_retain = connection.get('mqtt_retain', False) + + self.mqtt_hostname = connection.get('mqtt_hostname', 'localhost') + self.mqtt_port = connection.get('mqtt_port', 1883) + self.mqtt_client_id = connection.get('mqtt_client_id', '') + self.mqtt_keepalive = connection.get('mqtt_keepalive', 60) + self.mqtt_will = connection.get('mqtt_will', None) + self.mqtt_auth = connection.get('mqtt_auth', None) + self.mqtt_tls = connection.get('mqtt_tls', None) + + protocol = connection.get('mqtt_protocol', MQTTv311) if protocol == "MQTTv311": protocol = MQTTv311 elif protocol == "MQTTv31": @@ -96,10 +108,7 @@ def timestamp(self): return time.mktime(datetime.datetime.now().timetuple()) def publish_to_historian(self, to_publish_list): - _log.debug("publish_to_historian number of items: {}" - .format(len(to_publish_list))) - current_time = self.timestamp() - + _log.debug("publish_to_historian number of items: {}".format(len(to_publish_list))) if self._last_error: # if we failed we need to wait 60 seconds before we go on. if self.timestamp() < self._last_error + 60: @@ -112,7 +121,8 @@ def publish_to_historian(self, to_publish_list): # Construct payload from data in the publish item. # Available fields: 'value', 'headers', and 'meta' - payload = x['value'] + payload = jsonapi.dumps(x['value']) + _log.debug(f'payload: {payload}, topic {topic}') to_send.append({'topic': topic, 'payload': payload, @@ -131,14 +141,16 @@ def publish_to_historian(self, to_publish_list): protocol=self.mqtt_protocol) self.report_all_handled() except Exception as e: - _log.warning("Exception ({}) raised by publish: {}".format( - e.__class__.__name__, - e)) + _log.warning("Exception ({}) raised by publish: {}".format(e.__class__.__name__, e)) self._last_error = self.timestamp() def main(argv=sys.argv): - utils.vip_main(MQTTHistorian) + try: + utils.vip_main(historian, version=__version__) + except Exception as e: + print(e) + _log.exception('unhandled exception') if __name__ == '__main__': diff --git a/services/core/MQTTHistorian/mqttlistener.py b/services/core/MQTTHistorian/mqttlistener.py index 07ca0e5f6e..368f5d5a2e 100644 --- a/services/core/MQTTHistorian/mqttlistener.py +++ b/services/core/MQTTHistorian/mqttlistener.py @@ -4,9 +4,11 @@ PORT = 5000 PROTOCOL = MQTTv311 + # Callback function to print out message topics and payloads def listen(client, userdata, message): print(message.topic, message.payload) + # Subscribe to all messages and loop forever callback(listen, '#', port=PORT, protocol=PROTOCOL) diff --git a/services/core/MQTTHistorian/requirements.txt b/services/core/MQTTHistorian/requirements.txt index 97b87664e3..8579e8b225 100644 --- a/services/core/MQTTHistorian/requirements.txt +++ b/services/core/MQTTHistorian/requirements.txt @@ -1 +1 @@ -paho +paho-mqtt diff --git a/services/core/MQTTHistorian/setup.py b/services/core/MQTTHistorian/setup.py index eaf3948ea7..cc64769bff 100644 --- a/services/core/MQTTHistorian/setup.py +++ b/services/core/MQTTHistorian/setup.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- {{{ # vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et: # -# Copyright 2019, Battelle Memorial Institute. +# Copyright 2020, Battelle Memorial Institute. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/services/core/MarketServiceAgent/README.md b/services/core/MarketServiceAgent/README.md new file mode 100644 index 0000000000..7255d7735f --- /dev/null +++ b/services/core/MarketServiceAgent/README.md @@ -0,0 +1,30 @@ +# Market Service Agent + +The Market Service Agent is used to allow agents to use transactive markets +to implement transactive control strategies. The Market Service Agent provides +an implementation of double blind auction markets that can be used by multiple agents. + +Agents that want to use the Market Service Agent inherit from the :ref:`base MarketAgent`. +The base MarketAgent handles all of the communication between the agent and the MarketServiceAgent. + +## Configuration + +1. "market_period" - The time allowed for a market cycle in seconds. After this amount of time the market starts again. + Defaults to 300. +2. "reservation_delay" - The time delay between the start of a market cycle and the start of gathering market + reservations in seconds. Defaults to 0. +3. "offer_delay" - The time delay between the start of gathering market reservations and the start of gathering market + bids/offers in seconds. Defaults to 120. +4. "verbose_logging" - If True this enables verbose logging. If False, there is little or no logging. Defaults to True. + + +## Sample configuration file + +``` {.python} + { + "market_period": 300, + "reservation_delay": 0, + "offer_delay": 120, + "verbose_logging": True + } +``` diff --git a/services/core/MarketServiceAgent/market_service/agent.py b/services/core/MarketServiceAgent/market_service/agent.py index 8524bd073a..5803a07cc3 100644 --- a/services/core/MarketServiceAgent/market_service/agent.py +++ b/services/core/MarketServiceAgent/market_service/agent.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- {{{ # vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et: # -# Copyright 2019, Battelle Memorial Institute. +# Copyright 2020, Battelle Memorial Institute. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -50,14 +50,14 @@ ================================ "market_period" - The time allowed for a market cycle in seconds. After this amount of time the market starts again. - Defaults to 300. + The time allowed for a market cycle in seconds. After this amount of time the market starts again. + Defaults to 300. "reservation_delay" - The time delay between the start of a market cycle and the start of gathering market reservations -  in seconds. Defaults to 0. + The time delay between the start of a market cycle and the start of gathering market reservations + in seconds. Defaults to 0. "offer_delay" - The time delay between the start of gathering market reservations and the start of gathering market bids/offers -  in seconds. Defaults to 120. + The time delay between the start of gathering market reservations and the start of gathering market bids/offers + in seconds. Defaults to 120. "verbose_logging" If True this enables verbose logging. If False, there is little or no logging. Defaults to True. @@ -81,54 +81,30 @@ import logging import sys +import gevent from transitions import Machine from volttron.platform.agent.known_identities import PLATFORM_MARKET_SERVICE from volttron.platform.agent import utils from volttron.platform.messaging.topics import MARKET_RESERVE, MARKET_BID from volttron.platform.vip.agent import Agent, Core, RPC -from market_service.director import Director -from market_service.market_list import MarketList -from market_service.market_participant import MarketParticipant from volttron.platform.agent.base_market_agent.poly_line_factory import PolyLineFactory +from .market_list import MarketList +from .market_participant import MarketParticipant +from .director import Director + _tlog = logging.getLogger('transitions.core') _tlog.setLevel(logging.WARNING) _log = logging.getLogger(__name__) utils.setup_logging() -__version__ = "0.01" +__version__ = "1.0" INITIAL_WAIT = 'service_initial_wait' COLLECT_RESERVATIONS = 'service_collect_reservations' COLLECT_OFFERS = 'service_collect_offers' NO_MARKETS = 'service_has_no_markets' -def market_service_agent(config_path, **kwargs): - """Parses the Market Service Agent configuration and returns an instance of - the agent created using that configuation. - - :param config_path: Path to a configuation file. - - :type config_path: str - :returns: Market Service Agent - :rtype: MarketServiceAgent - """ - _log.debug("Starting MarketServiceAgent") - try: - config = utils.load_config(config_path) - except Exception: - config = {} - - if not config: - _log.info("Using Market Service Agent defaults for starting configuration.") - - market_period = int(config.get('market_period', 300)) - reservation_delay = int(config.get('reservation_delay', 0)) - offer_delay = int(config.get('offer_delay', 120)) - verbose_logging = int(config.get('verbose_logging', True)) - - return MarketServiceAgent(market_period, reservation_delay, offer_delay, verbose_logging, **kwargs) - class MarketServiceAgent(Agent): states = [INITIAL_WAIT, COLLECT_RESERVATIONS, COLLECT_OFFERS, NO_MARKETS] @@ -140,25 +116,43 @@ class MarketServiceAgent(Agent): {'trigger': 'start_reservations', 'source': NO_MARKETS, 'dest': COLLECT_RESERVATIONS}, ] - def __init__(self, market_period=300, reservation_delay=0, offer_delay=120, verbose_logging = True, **kwargs): + def __init__(self, config_path, **kwargs): super(MarketServiceAgent, self).__init__(**kwargs) - _log.debug("vip_identity: {}".format(self.core.identity)) - _log.debug("market_period: {}".format(market_period)) - _log.debug("reservation_delay: {}".format(reservation_delay)) - _log.debug("offer_delay: {}".format(offer_delay)) - _log.debug("verbose_logging: {}".format(verbose_logging)) + config = utils.load_config(config_path) + self.agent_name = config.get('agent_name', 'MixMarketService') + self.market_period = int(config.get('market_period', 300)) + self.reservation_delay = int(config.get('reservation_delay', 0)) + self.offer_delay = int(config.get('offer_delay', 120)) + self.verbose_logging = int(config.get('verbose_logging', True)) + self.director = None + # This can be periodic or event_driven + self.market_type = config.get("market_type", "event_driven") + if self.market_type not in ["periodic", "event_driven"]: + self.market_type = "event_driven" self.state_machine = Machine(model=self, states=MarketServiceAgent.states, transitions= MarketServiceAgent.transitions, initial=INITIAL_WAIT) - self.market_list = None - self.verbose_logging = verbose_logging - self.director = Director(market_period, reservation_delay, offer_delay) + self.market_list = MarketList(self.vip.pubsub.publish, self.verbose_logging) @Core.receiver("onstart") def onstart(self, sender, **kwargs): - self.market_list = MarketList(self.vip.pubsub.publish, self.verbose_logging) - self.director.start(self) + if self.market_type == "periodic": + self.director = Director(self.market_period, self.reservation_delay, self.offer_delay) + self.director.start(self) + else: + # Listen to the new_cycle signal + self.vip.pubsub.subscribe(peer='pubsub', + prefix='mixmarket/start_new_cycle', + callback=self.start_new_cycle) + + def start_new_cycle(self, peer, sender, bus, topic, headers, message): + _log.debug("Trigger market period for Market agent.") + gevent.sleep(self.reservation_delay) + self.send_collect_reservations_request(utils.get_aware_utc_now()) + + gevent.sleep(self.offer_delay) + self.send_collect_offers_request(utils.get_aware_utc_now()) def send_collect_reservations_request(self, timestamp): _log.debug("send_collect_reservations_request at {}".format(timestamp)) @@ -170,7 +164,7 @@ def send_collect_reservations_request(self, timestamp): message=utils.format_timestamp(timestamp)) def send_collect_offers_request(self, timestamp): - if (self.has_any_markets()): + if self.has_any_markets(): self.begin_collect_offers(timestamp) else: self.start_offers_no_markets() @@ -186,14 +180,20 @@ def begin_collect_offers(self, timestamp): @RPC.export def make_reservation(self, market_name, buyer_seller): - identity = bytes(self.vip.rpc.context.vip_message.peer).decode("utf-8") + import time + start = time.time() + + identity = bytes(self.vip.rpc.context.vip_message.peer, "utf8") log_message = "Received {} reservation for market {} from agent {}".format(buyer_seller, market_name, identity) _log.debug(log_message) - if (self.state == COLLECT_RESERVATIONS): + if self.state == COLLECT_RESERVATIONS: self.accept_reservation(buyer_seller, identity, market_name) else: self.reject_reservation(buyer_seller, identity, market_name) + end = time.time() + print(end - start) + def accept_reservation(self, buyer_seller, identity, market_name): _log.info("Reservation on Market: {} {} made by {} was accepted.".format(market_name, buyer_seller, identity)) participant = MarketParticipant(buyer_seller, identity) @@ -205,10 +205,10 @@ def reject_reservation(self, buyer_seller, identity, market_name): @RPC.export def make_offer(self, market_name, buyer_seller, offer): - identity = bytes(self.vip.rpc.context.vip_message.peer).decode("utf-8") + identity = bytes(self.vip.rpc.context.vip_message.peer, "utf8") log_message = "Received {} offer for market {} from agent {}".format(buyer_seller, market_name, identity) _log.debug(log_message) - if (self.state == COLLECT_OFFERS): + if self.state == COLLECT_OFFERS: self.accept_offer(buyer_seller, identity, market_name, offer) else: self.reject_offer(buyer_seller, identity, market_name, offer) @@ -227,9 +227,11 @@ def has_any_markets(self): unformed_markets = self.market_list.unformed_market_list() return len(unformed_markets) < self.market_list.market_count() + def main(): """Main method called to start the agent.""" - utils.vip_main(market_service_agent, identity=PLATFORM_MARKET_SERVICE, + utils.vip_main(MarketServiceAgent, + identity=PLATFORM_MARKET_SERVICE, version=__version__) diff --git a/services/core/MarketServiceAgent/market_service/director.py b/services/core/MarketServiceAgent/market_service/director.py index c96759fa72..55679d6890 100644 --- a/services/core/MarketServiceAgent/market_service/director.py +++ b/services/core/MarketServiceAgent/market_service/director.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- {{{ # vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et: # -# Copyright 2019, Battelle Memorial Institute. +# Copyright 2020, Battelle Memorial Institute. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/services/core/MarketServiceAgent/market_service/market.py b/services/core/MarketServiceAgent/market_service/market.py index af2f181137..ed397bca80 100644 --- a/services/core/MarketServiceAgent/market_service/market.py +++ b/services/core/MarketServiceAgent/market_service/market.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- {{{ # vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et: # -# Copyright 2019, Battelle Memorial Institute. +# Copyright 2020, Battelle Memorial Institute. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -46,23 +46,29 @@ import logging from transitions import Machine from volttron.platform.agent import utils -from market_service.offer_manager import OfferManager -from market_service.reservation_manager import ReservationManager from volttron.platform.agent.base_market_agent.error_codes import NOT_FORMED, SHORT_OFFERS, BAD_STATE, NO_INTERSECT from volttron.platform.agent.base_market_agent.buy_sell import BUYER, SELLER from volttron.platform.messaging.topics import MARKET_AGGREGATE, MARKET_CLEAR, MARKET_ERROR, MARKET_RECORD +from .offer_manager import OfferManager +from .reservation_manager import ReservationManager + _tlog = logging.getLogger('transitions.core') _tlog.setLevel(logging.WARNING) _log = logging.getLogger(__name__) utils.setup_logging() + class MarketFailureError(Exception): """Base class for exceptions in this module.""" - def __init__(self, market_name, market_state, object_type): + def __init__(self, market_name, market_state, object_type, participant): + name = role = '' + if participant is not None: + name = participant.identity + role = participant.buyer_seller super(MarketFailureError, self).__init__('The market {} is not accepting {} ' - 'at this time. The state is {}.'.format(market_name, - object_type, market_state)) + 'at this time. The state is {}. Participant info: {} {}.'.format(market_name, + object_type, market_state, name, role)) class Market(object): @@ -120,7 +126,7 @@ def make_reservation(self, participant): self.receive_reservation() market_already_formed = self.has_market_formed() if self.state not in [ACCEPT_RESERVATIONS, ACCEPT_RESERVATIONS_HAS_FORMED]: - raise MarketFailureError(self.market_name, self.state, 'reservations') + raise MarketFailureError(self.market_name, self.state, 'reservations', participant) self.reservations.make_reservation(participant) if self.verbose_logging: if participant.buyer_seller == BUYER: @@ -136,7 +142,6 @@ def make_reservation(self, participant): participant.buyer_seller, self.state)) - def make_offer(self, participant, curve): if self.verbose_logging: _log.debug("Make offer Market: {} {} entered in state {}".format(self.market_name, @@ -147,7 +152,7 @@ def make_offer(self, participant, curve): else: self.receive_buy_offer() if self.state not in [ACCEPT_ALL_OFFERS, ACCEPT_BUY_OFFERS, ACCEPT_SELL_OFFERS]: - raise MarketFailureError(self.market_name, self.state, 'offers') + raise MarketFailureError(self.market_name, self.state, 'offers', participant) self.reservations.take_reservation(participant) if self.verbose_logging: if participant.buyer_seller == BUYER: @@ -162,10 +167,14 @@ def make_offer(self, participant, curve): self.last_sell_offer() else: self.last_buy_offer() + + # Aggregate curve aggregate_curve = self.offers.aggregate_curves(participant.buyer_seller) if self.verbose_logging: _log.debug("Report aggregate Market: {} {} Curve: {}".format(self.market_name, participant.buyer_seller, aggregate_curve.tuppleize())) + + # Publish message with clearing price & aggregate curve if aggregate_curve is not None: timestamp = self._get_time() timestamp_string = utils.format_timestamp(timestamp) @@ -175,6 +184,7 @@ def make_offer(self, participant, curve): participant.buyer_seller, aggregate_curve.tuppleize()]) if self.is_market_done(): self.clear_market() + if self.verbose_logging: _log.debug("Make offer Market: {} {} exited in state {}".format(self.market_name, participant.buyer_seller, @@ -191,17 +201,21 @@ def clear_market(self): aux = {} if (self.state in [ACCEPT_ALL_OFFERS, ACCEPT_BUY_OFFERS, ACCEPT_SELL_OFFERS]): error_code = SHORT_OFFERS - error_message = 'The market {} failed to recieve all the expected offers. The state is {}.'.format(self.market_name, self.state) + error_message = 'The market {} failed to receive all the expected offers. ' \ + 'The state is {}.'.format(self.market_name, self.state) elif (self.state != MARKET_DONE): error_code = BAD_STATE - error_message = 'Programming error in Market class. State of {} and clear market signal arrived. This represents a logic error.'.format(self.state) + error_message = 'Programming error in Market class. State of {} and clear market signal arrived. ' \ + 'This represents a logic error.'.format(self.state) else: if not self.has_market_formed(): error_code = NOT_FORMED error_message = 'The market {} has not received a buy and a sell reservation.'.format(self.market_name) else: quantity, price, aux = self.offers.settle() - if price is None: + _log.info("Clearing mixmarket: {} Price: {} Qty: {}".format(self.market_name, price, quantity)) + aux = {} + if price is None or quantity is None: error_code = NO_INTERSECT error_message = "Error: The supply and demand curves do not intersect. The market {} failed to clear.".format(self.market_name) _log.info("Clearing price for Market: {} Price: {} Qty: {}".format(self.market_name, price, quantity)) diff --git a/services/core/MarketServiceAgent/market_service/market_list.py b/services/core/MarketServiceAgent/market_service/market_list.py index 45934f0b13..994f544806 100644 --- a/services/core/MarketServiceAgent/market_service/market_list.py +++ b/services/core/MarketServiceAgent/market_service/market_list.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- {{{ # vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et: # -# Copyright 2019, Battelle Memorial Institute. +# Copyright 2020, Battelle Memorial Institute. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -39,15 +39,17 @@ import logging from volttron.platform.agent import utils -from market_service.market import Market +from .market import Market _log = logging.getLogger(__name__) utils.setup_logging() + class NoSuchMarketError(Exception): """Base class for exceptions in this module.""" pass + class MarketList(object): def __init__(self, publish = None, verbose_logging = True): self.markets = {} @@ -70,7 +72,7 @@ def clear_reservations(self): self.markets.clear() def collect_offers(self): - for market in self.markets.values(): + for market in list(self.markets.values()): market.collect_offers() def get_market(self, market_name): @@ -91,7 +93,7 @@ def has_market_formed(self, market_name): return market_has_formed def send_market_failure_errors(self): - for market in self.markets.values(): + for market in list(self.markets.values()): # We have already sent unformed market failures if market.has_market_formed(): # If the market has not cleared trying to clear it will send an error. @@ -102,9 +104,8 @@ def market_count(self): return len(self.markets) def unformed_market_list(self): - list = [] - for market in self.markets.values(): - if not market.has_market_formed(): - list.append(market.market_name) - return list - + _list = [] + for market in list(self.markets.values()): + if not market.has_market_formed(): + _list.append(market.market_name) + return _list diff --git a/services/core/MarketServiceAgent/market_service/market_participant.py b/services/core/MarketServiceAgent/market_service/market_participant.py index 3951b702f1..9365bf0cdc 100644 --- a/services/core/MarketServiceAgent/market_service/market_participant.py +++ b/services/core/MarketServiceAgent/market_service/market_participant.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- {{{ # vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et: # -# Copyright 2019, Battelle Memorial Institute. +# Copyright 2020, Battelle Memorial Institute. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -38,6 +38,7 @@ from volttron.platform.agent.base_market_agent.buy_sell import BUYER, SELLER + class MarketParticipant(object): def __init__(self, buyer_seller, identity): self.buyer_seller = buyer_seller diff --git a/services/core/MarketServiceAgent/market_service/market_state.py b/services/core/MarketServiceAgent/market_service/market_state.py index 1899200ffc..0724714941 100644 --- a/services/core/MarketServiceAgent/market_service/market_state.py +++ b/services/core/MarketServiceAgent/market_service/market_state.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- {{{ # vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et: # -# Copyright 2019, Battelle Memorial Institute. +# Copyright 2020, Battelle Memorial Institute. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -38,6 +38,7 @@ ACCEPT_RESERVATIONS = 0 + class MarketStateMachine(object): def __init__(self, market_name, reservation): diff --git a/services/core/MarketServiceAgent/market_service/offer_manager.py b/services/core/MarketServiceAgent/market_service/offer_manager.py index 40ab479e19..01aeec9136 100644 --- a/services/core/MarketServiceAgent/market_service/offer_manager.py +++ b/services/core/MarketServiceAgent/market_service/offer_manager.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- {{{ # vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et: # -# Copyright 2019, Battelle Memorial Institute. +# Copyright 2020, Battelle Memorial Institute. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -46,6 +46,7 @@ _log = logging.getLogger(__name__) utils.setup_logging() + class OfferManager(object): def __init__(self): @@ -67,7 +68,8 @@ def aggregate_curves(self, buyer_seller): return curve def _aggregate(self, collection): - curve = PolyLineFactory.combine(collection, self.increment) +# curve = PolyLineFactory.combine(collection, self.increment) + curve = PolyLineFactory.combine_withoutincrement(collection) return curve def settle(self): diff --git a/services/core/MarketServiceAgent/market_service/reservation_manager.py b/services/core/MarketServiceAgent/market_service/reservation_manager.py index 73d1e01d47..efa0096591 100644 --- a/services/core/MarketServiceAgent/market_service/reservation_manager.py +++ b/services/core/MarketServiceAgent/market_service/reservation_manager.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- {{{ # vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et: # -# Copyright 2019, Battelle Memorial Institute. +# Copyright 2020, Battelle Memorial Institute. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -36,10 +36,12 @@ # under Contract DE-AC05-76RL01830 # }}} + class MarketReservationError(Exception): """Base class for exceptions in this module.""" pass + class ReservationManager(object): def __init__(self): diff --git a/services/core/MarketServiceAgent/setup.py b/services/core/MarketServiceAgent/setup.py index 6783705b24..38bbd09f51 100644 --- a/services/core/MarketServiceAgent/setup.py +++ b/services/core/MarketServiceAgent/setup.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- {{{ # vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et: # -# Copyright 2019, Battelle Memorial Institute. +# Copyright 2020, Battelle Memorial Institute. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/services/core/MarketServiceAgent/tests/test_market.py b/services/core/MarketServiceAgent/tests/test_market.py index 99709394c0..b0d240ea95 100644 --- a/services/core/MarketServiceAgent/tests/test_market.py +++ b/services/core/MarketServiceAgent/tests/test_market.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- {{{ # vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et: # -# Copyright 2019, Battelle Memorial Institute. +# Copyright 2020, Battelle Memorial Institute. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -44,27 +44,32 @@ except ImportError: pytest.skip("Market service requirements not installed.", allow_module_level=True) + @pytest.mark.market def test_market_state_create_name(): market_name = 'test_market' market = build_test_machine(market_name, BUYER) assert market_name == market.market_name -def build_test_machine(market_name = 'test_market', buyer_seller = BUYER): + +def build_test_machine(market_name='test_market', buyer_seller = BUYER): participant = MarketParticipant(buyer_seller, 'agent_id') publisher = Publisher() market = Market(market_name, participant, publisher.publish) return market + @pytest.mark.market def test_market_state_create_state(): market = build_test_machine() assert market.state == ACCEPT_RESERVATIONS + @pytest.mark.market def test_market_state_create_has_formed_false(): market = build_test_machine() - assert market.has_market_formed() == False + assert market.has_market_formed() is False + @pytest.mark.market def test_market_state_create_has_formed_true(): @@ -74,6 +79,7 @@ def test_market_state_create_has_formed_true(): market.make_reservation(participant) assert market.has_market_formed() + class Publisher(object): def __init__(self): pass diff --git a/services/core/MarketServiceAgent/tests/test_market_list.py b/services/core/MarketServiceAgent/tests/test_market_list.py index 212c1bef59..3255ef248d 100644 --- a/services/core/MarketServiceAgent/tests/test_market_list.py +++ b/services/core/MarketServiceAgent/tests/test_market_list.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- {{{ # vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et: # -# Copyright 2019, Battelle Memorial Institute. +# Copyright 2020, Battelle Memorial Institute. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -54,7 +54,7 @@ @pytest.mark.market def test_market_participants_no_market(): market_list = MarketList() - assert market_list.has_market('no_market') == False + assert market_list.has_market('no_market') is False @pytest.mark.market @@ -63,14 +63,14 @@ def test_market_participants_has_market(): market_name = 'test_market' seller_participant = MarketParticipant(SELLER, 'agent_id') market_list.make_reservation(market_name, seller_participant) - assert market_list.has_market(market_name) == True + assert market_list.has_market(market_name) is True @pytest.mark.market def test_market_participants_market_not_formed_no_market(): market_list = MarketList() market_name = 'test_market' - assert market_list.has_market_formed(market_name) == False + assert market_list.has_market_formed(market_name) is False @pytest.mark.market @@ -79,7 +79,7 @@ def test_market_participants_market_not_formed_one_seller(): market_name = 'test_market' seller_participant = MarketParticipant(SELLER, 'agent_id') market_list.make_reservation(market_name, seller_participant) - assert market_list.has_market_formed(market_name) == False + assert market_list.has_market_formed(market_name) is False @pytest.mark.market @@ -127,4 +127,3 @@ def test_market_unformed_market_list(): assert market_list.market_count() == 2 unformed_markets = market_list.unformed_market_list() assert len(unformed_markets) > 0 - diff --git a/services/core/MarketServiceAgent/tests/test_market_service_agent.py b/services/core/MarketServiceAgent/tests/test_market_service_agent.py new file mode 100644 index 0000000000..e97bbe10f4 --- /dev/null +++ b/services/core/MarketServiceAgent/tests/test_market_service_agent.py @@ -0,0 +1,77 @@ +# -*- coding: utf-8 -*- {{{ +# vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et: +# +# Copyright 2020, Battelle Memorial Institute. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# This material was prepared as an account of work sponsored by an agency of +# the United States Government. Neither the United States Government nor the +# United States Department of Energy, nor Battelle, nor any of their +# employees, nor any jurisdiction or organization that has cooperated in the +# development of these materials, makes any warranty, express or +# implied, or assumes any legal liability or responsibility for the accuracy, +# completeness, or usefulness or any information, apparatus, product, +# software, or process disclosed, or represents that its use would not infringe +# privately owned rights. Reference herein to any specific commercial product, +# process, or service by trade name, trademark, manufacturer, or otherwise +# does not necessarily constitute or imply its endorsement, recommendation, or +# favoring by the United States Government or any agency thereof, or +# Battelle Memorial Institute. The views and opinions of authors expressed +# herein do not necessarily state or reflect those of the +# United States Government or any agency thereof. +# +# PACIFIC NORTHWEST NATIONAL LABORATORY operated by +# BATTELLE for the UNITED STATES DEPARTMENT OF ENERGY +# under Contract DE-AC05-76RL01830 +# }}} + +import os +import json +import gevent +import pytest + +from market_service.market_participant import MarketParticipant +from volttron.platform.agent.base_market_agent.buy_sell import BUYER +from volttron.platform.messaging.health import STATUS_GOOD +from volttron.platform import get_services_core + +""" +Integration tests for Market Service Agent +""" + + +@pytest.mark.market +def test_default_config(volttron_instance): + """ + Test the default configuration file included with the agent + """ + publish_agent = volttron_instance.build_agent(identity="test_agent") + gevent.sleep(1) + + config_path = os.path.join(get_services_core("MarketServiceAgent"), "config") + with open(config_path, "r") as config_file: + config_json = json.load(config_file) + assert isinstance(config_json, dict) + volttron_instance.install_agent( + agent_dir=get_services_core("MarketServiceAgent"), + config_file=config_json, + start=True, + vip_identity="health_test") + assert publish_agent.vip.rpc.call("health_test", "health.get_status").get(timeout=10).get('status') == STATUS_GOOD + + # perform basic sanity check + market_name = 'test_market' + buyer_participant = MarketParticipant(BUYER, 'agent_id1') + + publish_agent.vip.rpc.call("health_test", "make_reservation", market_name, buyer_participant.buyer_seller) diff --git a/services/core/MarketServiceAgent/tests/test_offer.py b/services/core/MarketServiceAgent/tests/test_offer.py index 8c064d7ce9..c51c29520d 100644 --- a/services/core/MarketServiceAgent/tests/test_offer.py +++ b/services/core/MarketServiceAgent/tests/test_offer.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- {{{ # vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et: # -# Copyright 2019, Battelle Memorial Institute. +# Copyright 2020, Battelle Memorial Institute. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -45,6 +45,7 @@ except ImportError: pytest.skip("Market service requirements not installed.", allow_module_level=True) + @pytest.mark.market def test_offer_settle_no_intersection(): demand1 = create_demand_curve() @@ -59,8 +60,8 @@ def create_demand_curve(): demand_curve = PolyLine() price = 0 quantity = 1000 - demand_curve.add(Point(price,quantity)) + demand_curve.add(Point(price, quantity)) price = 1000 quantity = 0 - demand_curve.add(Point(price,quantity)) + demand_curve.add(Point(price, quantity)) return demand_curve diff --git a/services/core/MasterDriverAgent/master_driver/interfaces/universal.py b/services/core/MasterDriverAgent/master_driver/interfaces/universal.py deleted file mode 100644 index bb052aafdc..0000000000 --- a/services/core/MasterDriverAgent/master_driver/interfaces/universal.py +++ /dev/null @@ -1,273 +0,0 @@ -# -*- coding: utf-8 -*- {{{ -# vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et: -# -# Copyright 2019, Battelle Memorial Institute. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# This material was prepared as an account of work sponsored by an agency of -# the United States Government. Neither the United States Government nor the -# United States Department of Energy, nor Battelle, nor any of their -# employees, nor any jurisdiction or organization that has cooperated in the -# development of these materials, makes any warranty, express or -# implied, or assumes any legal liability or responsibility for the accuracy, -# completeness, or usefulness or any information, apparatus, product, -# software, or process disclosed, or represents that its use would not infringe -# privately owned rights. Reference herein to any specific commercial product, -# process, or service by trade name, trademark, manufacturer, or otherwise -# does not necessarily constitute or imply its endorsement, recommendation, or -# favoring by the United States Government or any agency thereof, or -# Battelle Memorial Institute. The views and opinions of authors expressed -# herein do not necessarily state or reflect those of the -# United States Government or any agency thereof. -# -# PACIFIC NORTHWEST NATIONAL LABORATORY operated by -# BATTELLE for the UNITED STATES DEPARTMENT OF ENERGY -# under Contract DE-AC05-76RL01830 -# }}} - -''' - -------------------------------------------------------------------------------- - History - 03/30/16 - Initial. - 08/15/16 - Remove whitespace in config file. - 10/11/16 - Pass only device_id to VehicleDriver. - 03/01/17 - Call agent.GetPoint in get_point. - 04/17/17 - Updated for Volttron 4.0. -------------------------------------------------------------------------------- -''' -__author1__ = 'Carl Miller ' -__copyright__ = 'Copyright (c) 2019, Battelle Memorial Institute' -__license__ = 'Apache 2.0' -__version__ = '0.2.0' - -import random -from volttron.platform.agent import utils -try: - from master_driver.interfaces import BaseInterface, BaseRegister, BasicRevert -except: - from services.core.MasterDriverAgent.master_driver.interfaces import BaseInterface, BaseRegister, BasicRevert - -from csv import DictReader -from StringIO import StringIO -import gevent -import logging -import sys -import os - -# set DRIVER_PATH to path to your specific driver agent -DRIVER_PATH = "/home/volttron/GridAgents/VolttronAgents/Drivers" -sys.path.insert( 0, DRIVER_PATH ) -from heaters.agent import HeaterDriver -from meters.agent import MeterDriver -from hvac.agent import ThermostatDriver -from blinds.agent import BlindsDriver -from vehicles.agent import VehicleDriver - -_log = logging.getLogger(__name__) - -# UDI - Universal Driver Interface -class Interface(BasicRevert, BaseInterface): - def __init__(self, **kwargs): - super(Interface, self).__init__(**kwargs) - # the following are new in bacnet 4.0 driver, do we need to do too? - #self.register_count = 10000 - #self.register_count_divisor = 1 - - self.agent = None - import argparse - parser = argparse.ArgumentParser() - parser.add_argument('-v', '--verbose', action='count' , dest="verbosity", default=0) - args = parser.parse_args() - self._verboseness = args.verbosity - - if( self._verboseness == 0 ): - verbiage = logging.ERROR - if( self._verboseness == 1 ): - verbiage = logging.WARNING # '-v' - elif( self._verboseness == 2 ): - verbiage = logging.INFO # '-vv' - elif( self._verboseness >= 3 ): - verbiage = logging.DEBUG # '-vvv' - _log.setLevel(verbiage) - ''' - config_dict: 'filename'.config, specified in the 'master-driver.agent' file. - registry_config_str: points csv file - def configure(self, config_dict, registry_config_str): - when 4.0 master driver is started, class ConfigStore is instantiated: - volttron/platform/vip/agent/subsystems/configstore.py which exports initial_update() - which calls volttron/platform/store.py: def get_configs(self): - self.vip.rpc.call(identity, "config.initial_update" sets list of registry_configs - - scripts/install_master_driver_configs.py calls 'manage_store' rpc, which is in volttron/platform/store.py - which calls process_raw_config(), which stores it as a dict. - process_raw_config() is also called by process_store() in store.py - when the platform starts ( class ConfigStoreService): - processing_raw_config 'registry_configs/meter.csv' (config_type: csv) - process_store() is called by _setup using a 'PersistentDict', i.e.: - store_path '/home/carl/.volttron/configuration_store/platform.driver.store' - - install_master_driver_configs.py stores them as config_type="csv", it is useful for batch processing alot - of files at once, like when upgrading from 3.5 to 4.0 - - to add single config to store, activate and start platform then: - List current configs: - volttron-ctl config list platform.driver - config - devices/PNNL/LABHOME_B/METER1 - registry_configs/meter.csv - Delete current configs: - volttron-ctl config delete platform.driver registry_configs/meter.csv # note lack of prefix './GridAgents/configs/' - volttron-ctl config delete platform.driver devices/PNNL/LABHOME_B/METER1 - To store the driver configuration run the command: - delete any files from ../GridAgents/configs - volttron-ctl config store platform.driver devices/PNNL/LABHOME_B ../GridAgents/configs/devices/PNNL/LABHOME_B/METER1 - - To store the registry configuration run the command (note the **--raw option) - volttron-ctl config store platform.driver registry_configs/meter.csv ../GridAgents/configs/registry_configs/meter.csv --raw - - ***** NOTE: you MUST install the csv file in --raw mode for universal drivers. ***** - - ''' - - def configure(self, config_dict, registry_config_dict): # 4.0 passes in a reg DICT not string now - try: - device_type = config_dict['device_type'] - ''' see ./volttron/volttron/platform/vip/agent/__init__.py for Agent object definition - every agent has a .core and .vip: - vip.ping - vip.rpc - vip.hello - vip.pubsub - vip.health - vip.heartbeat - vip.config - ''' - if(device_type == "heater" ): - self.agent = HeaterDriver(None, config_dict['device_id'] ) - elif( device_type == "meter" ): - self.agent = MeterDriver( None, config_dict['device_id'], ) - elif( device_type == "thermostat" ): - self.agent = ThermostatDriver( None, config_dict['device_id'] ) - elif( device_type == "blinds" ): - self.agent = BlindsDriver( None, config_dict['device_id'] ) - elif( device_type == "vehicle" ): - self.agent = VehicleDriver( None, config_dict['device_id'] ) - else: - raise RuntimeError("Unsupported Device Type: '{}'".format(device_type)) - - self.parse_config(self.agent, device_type, config_dict, registry_config_dict) - - event = gevent.event.Event() - gevent.spawn(self.agent.core.run, event) - event.wait(timeout=5) - - except KeyError as e: - _log.fatal("configure Failed accessing Key({}) in configuration file: {}".format(e,config_dict)) - raise SystemExit - - except RuntimeError as e: - _log.fatal("configure Failed using configuration file: {}".format(config_dict)) - raise SystemExit(e) - - except Exception as e: - _log.fatal("configure Failed({}) using configuration file: {}".format(e,config_dict)) - raise SystemExit - - # get_point - def get_point(self, point_name): - register = self.get_register_by_name(point_name) - value = self.agent.GetPoint( register ) - #if( self._verboseness == 2 ): - # _log.debug( "Universal get_point called for '{}', value: {}.".format(point_name, value)) - return value - - # _set_point - def _set_point(self, point_name, value): - register = self.get_register_by_name(point_name) - if register.read_only: - raise IOError("Trying to write to a point configured read only: " + point_name) - - if( self.agent.SetPoint( register, value ) ): - register._value = register.reg_type(value) - self.point_map[point_name]._value = register._value - return register._value - - # this gets called periodically via DriverAgent::periodic_read() - # ( on behalf of MasterDriverAgent ) - def _scrape_all(self): - result = {} - read_registers = self.get_registers_by_type("byte", True) - write_registers = self.get_registers_by_type("byte", False) - for register in read_registers + write_registers: - if( self._verboseness == 2 ): - _log.info( "Universal Scraping Value for '{}': {}".format(register.point_name, register._value)) - result[register.point_name] = register._value - return result - - # this set each register to its default value (if it has one) - def _reset_all(self): - read_registers = self.get_registers_by_type("byte", True) - write_registers = self.get_registers_by_type("byte", False) - for register in read_registers + write_registers: - old_value = register._value - register._value = register._default_value - #_log.info( "point_map[register]._value = {}".format(self.point_map[register.point_name]._value)) - if( self._verboseness == 2 ): - _log.info( "Hardware not reachable, Resetting Value for '{}' from {} to {}".format(register.point_name, old_value, register._value)) - - ''' - We maybe could have used revert_point( register.point_name ), but that is more for reverting the hardware to its default - value (calls set_point, which complains for read_only points), _reset_all is used to set the registry values to a default - when the hardware is not reachable.... - - if register in self.defaults: - self.point_map[register]._value = self.defaults[register] - if( self._verboseness == 2 ): - _log.info( "Universal Resetting Value for '{}' from {} to {}".format(register.point_name, old_value, register._value)) - else: - if( self._verboseness == 2 ): - _log.info( "No Default Value Found while Resetting '{}'.".format(register.point_name)) - ''' - - ''' - parse_config - ***** NOTE: you MUST install the csv file in --raw mode for universal drivers. ***** - volttron-ctl config store platform.driver registry_configs/meter.csv - ../GridAgents/configs/registry_configs/meter.csv --raw - ''' - def parse_config(self, agent, device_type, config_dict, reg_config_str): - if reg_config_str is None: - return - - config_str = (utils.strip_comments(reg_config_str).lstrip()).rstrip() - - import re - # remove whitespace after delimiter, but not within delimited value: - config_str = re.sub(r',[\s]+', ',', config_str) - - # remove trailing whitespace within delimited value: - config_str = re.sub(r'[\s]+,', ',', config_str) - - # remove trailing whitespace at end of line: - # re.MULTILINE - When specified, '^' matches the beginning of the string andbeginning of each line (immediately following each newline) - # and '$' matches end of the string and end of each line (immediately preceding each newline). - config_str = re.sub(r'[\s]+$', '', config_str, flags=re.MULTILINE) - - _log.debug('Configuring {} Driver with {} and config_str {}'.format(device_type, config_dict, config_str)) - - f = StringIO(config_str) - regDict = DictReader(f) - - agent.ConfigureAgent(self, config_dict, regDict ) diff --git a/services/core/MongodbAggregateHistorian/README.md b/services/core/MongodbAggregateHistorian/README.md new file mode 100644 index 0000000000..19570a2f6a --- /dev/null +++ b/services/core/MongodbAggregateHistorian/README.md @@ -0,0 +1,114 @@ +# Mongo Aggregate Historian + +An aggregate historian computes aggregates of data stored in a given +volttron historian\'s data store. It runs periodically to compute +aggregate data and store it in new tables/collections in the +historian\'s data store. Each historian implementation would use a +corresponding aggregate historian to compute and store aggregates. + +Aggregates can be defined for a specific time interval and can be +calculated for one or more topics. For example, 15 minute average of +topic1 or 15 minute average of values of topic1 and topic2. Current +version of this agent only computes aggregates supported by underlying +data store. When aggregation is done over more than one topic a unique +aggregation topic name should be configured by user. This topic name can +be used in historian\'s query api to query the collected aggregate data. + +Note: This agent doesn\'t not compute dynamic aggregates. It is only +useful when you know what kind of aggregate you would need before hand +and have them be collected periodically so that retrieval of that data +at a later point would be faster + +## Data flow between historian and aggregate historian + +> 1. Historian collects data from devices and stores it in its data +> store +> 2. Aggregate historian periodically queries historian\'s data store +> for data within configured time period. +> 3. Aggregate historian computes aggregates and stores it in +> historian\'s data store +> 4. Historian\'s query api queries aggregate data when used with +> additional parameters - agg_type, agg_period + +## Configuration + +``` {.python} +{ + # configuration from mongo historian - START + "connection": { + "type": "mongodb", + "params": { + "host": "localhost", + "port": 27017, + "database": "test_historian", + "user": "historian", + "passwd": "historian" + } + }, + # configuration from mongo historian - END + # If you are using a differnt historian(mysql, sqlite etc.) replace the + # above with connection details from the corresponding historian. + # the rest of the configuration would be the same for all aggregate + # historians + + "aggregations":[ + # list of aggregation groups each with unique aggregation_period and + # list of points that needs to be collected. value of "aggregations" is + # a list. you can configure this agent to collect multiple aggregates. + # aggregation_time_periiod + aggregation topic(s) together uniquely + # identify an aggregation + + { + # can be minutes(m), hours(h), weeks(w), or months(M) + + "aggregation_period": "1m", + + # Should aggregation period align to calendar time periods. + # Default False + # Example, + # if "aggregation_period":"1h" and "use_calendar_time_periods": False + # example periods: 10.15-11.15, 11.15-12.15, 12.15-13.15 etc. + # if "aggregation_period":"1h" and "use_calendar_time_periods": True + # example periods: 10.00-11.00, 11.00-12.00, 12.00-13.00 etc. + + "use_calendar_time_periods": "true", + + # topics to be aggregated + + "points": [ + { + # here since aggregation is done over a single topic + # name same topic name is used for the aggregation topic + "topic_names": ["device1/out_temp"], + "aggregation_type": "sum", + #minimum required records in the aggregation time + #period for aggregate to be recorded + "min_count": 2 + }, + { + "topic_names": ["device1/in_temp"], + "aggregation_type": "sum", + "min_count": 2 + } + ] + }, + { + "aggregation_period": "2m", + "use_calendar_time_periods": "false", + "points": [ + { + # aggregation over more than one topic so + # aggregation_topic_name should be specified + "topic_names": ["Building/device/point1", "Building/device/point2"], + "aggregation_topic_name":"building/device/point1_2/month_sum", + "aggregation_type": "avg", + "min_count": 2 + } + ] + } + ] +} +``` + +## See Also +[Aggregate Historian Specification](https://volttron.readthedocs.io/en/develop/developing-volttron/developing-agents/specifications/aggregate.html) diff --git a/services/core/MongodbAggregateHistorian/README.rst b/services/core/MongodbAggregateHistorian/README.rst deleted file mode 100644 index f967de1e61..0000000000 --- a/services/core/MongodbAggregateHistorian/README.rst +++ /dev/null @@ -1,121 +0,0 @@ -.. _Mongo_Aggregate_Historian: - -========================= -Mongo Aggregate Historian -========================= - -An aggregate historian computes aggregates of data stored in a given volttron -historian's data store. It runs periodically to compute aggregate data -and store it in new tables/collections in the historian's data store. Each -historian implementation would use a corresponding aggregate historian to -compute and store aggregates. - -Aggregates can be defined for a specific time interval and can be calculated -for one or more topics. For example, 15 minute average of topic1 or 15 minute -average of values of topic1 and topic2. Current version of this agent only -computes aggregates supported by underlying data store. When aggregation is -done over more than one topic a unique aggregation topic name should be -configured by user. This topic name can be used in historian's query api to -query the collected aggregate data. - -Note: This agent doesn't not compute dynamic aggregates. It is only useful when -you know what kind of aggregate you would need before hand and have them be -collected periodically so that retrieval of that data at a later point would be -faster - -Data flow between historian and aggregate historian ---------------------------------------------------- - - 1. Historian collects data from devices and stores it in its data store - 2. Aggregate historian periodically queries historian's data store for data - within configured time period. - 3. Aggregate historian computes aggregates and stores it in historian's - data store - 3. Historian's query api queries aggregate data when used with additional - parameters - agg_type, agg_period - -Configuration -------------- - -.. code-block:: python - - { - # configuration from mongo historian - START - "connection": { - "type": "mongodb", - "params": { - "host": "localhost", - "port": 27017, - "database": "test_historian", - "user": "historian", - "passwd": "historian" - } - }, - # configuration from mongo historian - END - # If you are using a differnt historian(mysql, sqlite etc.) replace the - # above with connection details from the corresponding historian. - # the rest of the configuration would be the same for all aggregate - # historians - - "aggregations":[ - # list of aggregation groups each with unique aggregation_period and - # list of points that needs to be collected. value of "aggregations" is - # a list. you can configure this agent to collect multiple aggregates. - # aggregation_time_periiod + aggregation topic(s) together uniquely - # identify an aggregation - - { - # can be minutes(m), hours(h), weeks(w), or months(M) - - "aggregation_period": "1m", - - # Should aggregation period align to calendar time periods. - # Default False - # Example, - # if "aggregation_period":"1h" and "use_calendar_time_periods": False - # example periods: 10.15-11.15, 11.15-12.15, 12.15-13.15 etc. - # if "aggregation_period":"1h" and "use_calendar_time_periods": True - # example periods: 10.00-11.00, 11.00-12.00, 12.00-13.00 etc. - - "use_calendar_time_periods": "true", - - # topics to be aggregated - - "points": [ - { - # here since aggregation is done over a single topic - # name same topic name is used for the aggregation topic - "topic_names": ["device1/out_temp"], - "aggregation_type": "sum", - #minimum required records in the aggregation time - #period for aggregate to be recorded - "min_count": 2 - }, - { - "topic_names": ["device1/in_temp"], - "aggregation_type": "sum", - "min_count": 2 - } - ] - }, - { - "aggregation_period": "2m", - "use_calendar_time_periods": "false", - "points": [ - { - # aggregation over more than one topic so - # aggregation_topic_name should be specified - "topic_names": ["Building/device/point1", "Building/device/point2"], - "aggregation_topic_name":"building/device/point1_2/month_sum", - "aggregation_type": "avg", - "min_count": 2 - } - ] - } - ] - } - - -See Also --------- - `AggregateHistorianSpec`_ diff --git a/services/core/MongodbAggregateHistorian/aggregator/aggregator.py b/services/core/MongodbAggregateHistorian/aggregator/aggregator.py index 9fae0b4785..30f3417203 100644 --- a/services/core/MongodbAggregateHistorian/aggregator/aggregator.py +++ b/services/core/MongodbAggregateHistorian/aggregator/aggregator.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- {{{ # vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et: # -# Copyright 2019, Battelle Memorial Institute. +# Copyright 2020, Battelle Memorial Institute. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/services/core/MongodbAggregateHistorian/config b/services/core/MongodbAggregateHistorian/config index 02aa107d30..9a683c202d 100644 --- a/services/core/MongodbAggregateHistorian/config +++ b/services/core/MongodbAggregateHistorian/config @@ -1,5 +1,4 @@ { - # configuration from mongo historian - START "connection": { "type": "mongodb", "params": { @@ -10,24 +9,22 @@ "passwd": "historian" } }, - # configuration from mongo historian - END - "aggregations":[ { - "aggregation_period": "2m", - "use_calendar_time_periods": True, - "points": [ - { - "topic_name": "device1/out_temp", - "aggregation_type": "sum", - "min_count": 2 - }, - { - "topic_name": "device1/in_temp", - "aggregation_type": "sum", - "min_count": 2 - } - ] + "aggregation_period": "2m", + "use_calendar_time_periods": true, + "points": [ + { + "topic_name": "device1/out_temp", + "aggregation_type": "sum", + "min_count": 2 + }, + { + "topic_name": "device1/in_temp", + "aggregation_type": "sum", + "min_count": 2 + } + ] } ] -} \ No newline at end of file +} diff --git a/services/core/MongodbAggregateHistorian/conftest.py b/services/core/MongodbAggregateHistorian/conftest.py new file mode 100644 index 0000000000..68e5e611b1 --- /dev/null +++ b/services/core/MongodbAggregateHistorian/conftest.py @@ -0,0 +1,6 @@ +import sys + +from volttrontesting.fixtures.volttron_platform_fixtures import * + +# Add system path of the agent's directory +sys.path.insert(0, os.path.abspath(os.path.dirname(__file__))) diff --git a/services/core/MongodbAggregateHistorian/setup.py b/services/core/MongodbAggregateHistorian/setup.py index 9ec0dbcd76..d2a4ab07cd 100644 --- a/services/core/MongodbAggregateHistorian/setup.py +++ b/services/core/MongodbAggregateHistorian/setup.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- {{{ # vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et: # -# Copyright 2019, Battelle Memorial Institute. +# Copyright 2020, Battelle Memorial Institute. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/services/core/MongodbHistorian/README.rst b/services/core/MongodbHistorian/README.md similarity index 61% rename from services/core/MongodbHistorian/README.rst rename to services/core/MongodbHistorian/README.md index 1165bc77ff..ce2fbd39de 100644 --- a/services/core/MongodbHistorian/README.rst +++ b/services/core/MongodbHistorian/README.md @@ -1,121 +1,105 @@ -.. _Mongo_Historian: +# Mongo Historian -=============== -Mongo Historian -=============== -This is a historian that stores its data in mongodb. Data is store in three -different collection as +This is a historian that stores its data in mongodb. Data is store in +three different collection as - 1. raw data - 2. hourly grouped/rolled up data - 3. daily grouped/rolled up data +1. raw data +2. hourly grouped/rolled up data +3. daily grouped/rolled up data -The hourly_data and daily_data collections store data grouped together by time -to allow for faster queries. It does not aggregate data (point values). Data -gets loaded into hourly and daily collections through a periodic batch process -and hence might be lagging behind when compared to raw data collection. The -lag time would depend on the load on the system and hence needs to be set in -the configuration. Query API of mongo historian is designed to handle this. It -will combine results from rollup data and raw data table as needed. +The hourly_data and daily_data collections store data grouped together +by time to allow for faster queries. It does not aggregate data (point +values). Data gets loaded into hourly and daily collections through a +periodic batch process and hence might be lagging behind when compared +to raw data collection. The lag time would depend on the load on the +system and hence needs to be set in the configuration. Query API of +mongo historian is designed to handle this. It will combine results from +rollup data and raw data table as needed. -Prerequisites -~~~~~~~~~~~~~ +## Prerequisites -1. Mongodb ----------- +### 1. Mongodb Setup mongodb based on using one of the three below scripts. -1. Install as root on Redhat or Cent OS - - :: - - sudo scripts/historian-scripts/root_install_mongo_rhel.sh - - The above script will prompt user for os version, db user name, password - and database name. Once installed you can start and stop the service - using the command: - - **sudo service mongod [start|stop|service]** - -2. Install as root on Ubuntu - - :: - - sudo scripts/historian-scripts/root_install_mongo_ubuntu.sh - - The above script will prompt user for os version, db user name, password - and database name. Once installed you can start and stop the service - using the command: - - **sudo service mongod [start|stop|service]** - -3. Install as non root user on any Linux machine - - :: - - scripts/historian-scripts/install_mongodb.sh - +1. Install as root on Redhat or Cent OS + ``` + sudo scripts/historian-scripts/root_install_mongo_rhel.sh + ``` + The above script will prompt user for os version, db user name, + password and database name. Once installed you can start and stop + the service using the command: + ``` + sudo service mongod \[start\|stop\|service\] + ``` + +2. Install as root on Ubuntu + ``` + sudo scripts/historian-scripts/root_install_mongo_ubuntu.sh + ``` + + The above script will prompt user for os version, db user name, + password and database name. Once installed you can start and stop + the service using the command: + ``` + sudo service mongod \[start\|stop\|service\] + ``` +3. Install as non root user on any Linux machine + ``` + scripts/historian-scripts/install_mongodb.sh + ``` Usage: - install_mongodb.sh [-h] [-d download_url] [-i install_dir] [-c config_file] [-s] + ``` + install_mongodb.sh \[-h\] \[-d download_url\] \[-i install_dir\] \[-c config_file\] \[-s\] + ``` + Optional arguments: - -s setup admin user and test collection after install and startup - - -d download url. defaults to https://fastdl.mongodb.org/linux/mongodb-linux-x86_64-3.2.4.tgz - - -i install_dir. defaults to current_dir/mongo_install - - -c config file to be used for mongodb startup. Defaults to - default_mongodb.conf in the same directory as this script.Any datapath - mentioned in the config file should already exist and should have write - access to the current user - - -h print this help message - -2. Create database and user ---------------------------- + + **-s** setup admin user and test collection after install and startup + + **-d** download url. defaults to + + **-i** install_dir. defaults to current_dir/mongo_install + + **-c** config file to be used for mongodb startup. Defaults to default_mongodb.conf in the same directory as this + script. Any datapath mentioned in the config file should already exist and should have write access to the current user + + **-h** print this help message + +### 2. Create database and user You also need to pre-create your database in MongoDB before running this historian. For example, to create a local MongoDB, do the followings in Mongo shell: -- Switch to the new database "volttron_guide": - - :: - - use volttron_guide +- Switch to the new database \"volttron_guide\": + ``` + use volttron_guide + ``` -- Create a new user for "volttron_guide": +- Create a new user for \"volttron_guide\": + ``` + db.createUser({user: "admin", pwd: "admin", roles: ["readWrite"] }) + ``` + - :: +### 3. Mongodb connector - db.createUser({user: "admin", pwd: "admin", roles: ["readWrite"] }) - -3. Mongodb connector --------------------- This historian requires a mongodb connector installed in your activated volttron environment to talk to mongodb. Please execute the following from an activated shell in order to install it. -:: - pip install pymongo The Mongodb Historian also requires the following libraries: -:: - bson, ujson, dateutil And install with -:: - pip install -Configuration -~~~~~~~~~~~~~ -:: +## Configuration { #mandatory connection details @@ -188,5 +172,3 @@ Configuration "rollup_topic_pattern": "^Economizer_RCx|^Airside_RCx" } - - diff --git a/services/core/MongodbHistorian/config b/services/core/MongodbHistorian/config index 2966e6c756..0783d6cdd9 100644 --- a/services/core/MongodbHistorian/config +++ b/services/core/MongodbHistorian/config @@ -6,20 +6,13 @@ "port": 27017, "database": "mongo_test", "user": "test", - "passwd": "test", - "authSource": "admin" + "passwd": "test" } }, "readonly":false, - - ## configurations related to rollup data creation - "initial_rollup_start_time":"2018-01-21T00:00:00.000000", "periodic_rollup_initial_wait":0.1, "periodic_rollup_frequency":1, - - ## configuration related to using rolled up data for queries - "rollup_query_start":"2017-01-01T00:00:00.000000", "rollup_query_end":5, "rollup_topic_pattern": "^Economizer_RCx|^Airside_RCx" diff --git a/services/core/MongodbHistorian/mongodb/historian.py b/services/core/MongodbHistorian/mongodb/historian.py index a3d3537d10..b3f6880826 100644 --- a/services/core/MongodbHistorian/mongodb/historian.py +++ b/services/core/MongodbHistorian/mongodb/historian.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- {{{ # vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et: # -# Copyright 2019, Battelle Memorial Institute. +# Copyright 2020, Battelle Memorial Institute. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -65,13 +65,8 @@ from volttron.utils.docs import doc_inherit try: - import ujson + from ujson import dumps, loads - def dumps(data): - return ujson.dumps(data, double_precision=15) - - def loads(data_string): - return ujson.loads(data_string, precise_float=True) except ImportError: from volttron.platform.jsonapi import dumps, loads @@ -335,10 +330,10 @@ def periodic_rollup(self): if d_errors or h_errors: # something failed in bulk write. try from last err # row during the next periodic call - _log.warn("bulk publish errors. last_processed_data would " - "have got recorded in collection. returning from " - "periodic call to try again during next scheduled " - "call") + _log.warning("bulk publish errors. last_processed_data would " + "have got recorded in collection. returning from " + "periodic call to try again during next scheduled " + "call") return gevent.sleep(0.2) @@ -587,7 +582,7 @@ def query_historian(self, topic, start=None, end=None, agg_type=None, topic_ids.append(topic_id) id_name_map[ObjectId(topic_id)] = topic else: - _log.warn('No such topic {}'.format(topic)) + _log.warning('No such topic {}'.format(topic)) if not topic_ids: return {} diff --git a/services/core/MongodbHistorian/requirements.txt b/services/core/MongodbHistorian/requirements.txt index 8d723c0de4..be3baf0119 100644 --- a/services/core/MongodbHistorian/requirements.txt +++ b/services/core/MongodbHistorian/requirements.txt @@ -1,4 +1,3 @@ pymongo bson ujson -dateutil diff --git a/services/core/MongodbHistorian/setup.py b/services/core/MongodbHistorian/setup.py index f513a83c9a..203cd3945b 100644 --- a/services/core/MongodbHistorian/setup.py +++ b/services/core/MongodbHistorian/setup.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- {{{ # vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et: # -# Copyright 2019, Battelle Memorial Institute. +# Copyright 2020, Battelle Memorial Institute. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/services/core/MongodbHistorian/tests/fixtures.py b/services/core/MongodbHistorian/tests/fixtures.py index fa92e27d8a..2a73a165d3 100644 --- a/services/core/MongodbHistorian/tests/fixtures.py +++ b/services/core/MongodbHistorian/tests/fixtures.py @@ -1,5 +1,3 @@ -import pytest - # Module level variables BASE_DEVICE_TOPIC = "devices/Building/LAB/Device" BASE_ANALYSIS_TOPIC = "analysis/Economizer/Building/LAB/Device" @@ -13,27 +11,28 @@ "host": "localhost", "port": 27017, "database": "mongo_test", - "user": "test", - "passwd": "test" + "user": "historian", + "passwd": "historian", + "authSource": "test" } } } -# @pytest.fixture def mongo_connection_string(): mongo_conn_str = 'mongodb://{user}:{passwd}@{host}:{port}/{database}' + params = mongo_connection_params() + if params.get('authSource'): + mongo_conn_str = mongo_conn_str + '?authSource={authSource}' mongo_conn_str = mongo_conn_str.format(**params) return mongo_conn_str -# @pytest.fixture def mongo_agent_config(): return mongo_platform -# @pytest.fixture def mongo_connection_params(): global mongo_platform mongo_params = mongo_platform['connection']['params'] diff --git a/services/core/MongodbHistorian/tests/test_mongohistorian.py b/services/core/MongodbHistorian/tests/test_mongohistorian.py index 0ae007cf93..b82da3ee33 100644 --- a/services/core/MongodbHistorian/tests/test_mongohistorian.py +++ b/services/core/MongodbHistorian/tests/test_mongohistorian.py @@ -1,21 +1,53 @@ -# Example file using the weather agent. +# -*- coding: utf-8 -*- {{{ +# vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et: # -# Requirements -# - A VOLTTRON instance must be started -# - A weatheragnet must be running prior to running this code. +# Copyright 2020, Battelle Memorial Institute. # -# Author: Craig Allwardt -import random -from datetime import datetime -from datetime import timedelta +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# This material was prepared as an account of work sponsored by an agency of +# the United States Government. Neither the United States Government nor the +# United States Department of Energy, nor Battelle, nor any of their +# employees, nor any jurisdiction or organization that has cooperated in the +# development of these materials, makes any warranty, express or +# implied, or assumes any legal liability or responsibility for the accuracy, +# completeness, or usefulness or any information, apparatus, product, +# software, or process disclosed, or represents that its use would not infringe +# privately owned rights. Reference herein to any specific commercial product, +# process, or service by trade name, trademark, manufacturer, or otherwise +# does not necessarily constitute or imply its endorsement, recommendation, or +# favoring by the United States Government or any agency thereof, or +# Battelle Memorial Institute. The views and opinions of authors expressed +# herein do not necessarily state or reflect those of the +# United States Government or any agency thereof. +# +# PACIFIC NORTHWEST NATIONAL LABORATORY operated by +# BATTELLE for the UNITED STATES DEPARTMENT OF ENERGY +# under Contract DE-AC05-76RL01830 +# }}} +import os +import json +import random import gevent import pytest from dateutil.tz import tzutc +from datetime import datetime +from datetime import timedelta from volttron.platform import get_services_core from volttron.platform.agent import utils -from volttron.platform.agent.utils import (get_aware_utc_now, format_timestamp) +from volttron.platform.agent.utils import get_aware_utc_now, format_timestamp from volttron.platform.messaging import headers as headers_mod try: @@ -25,9 +57,9 @@ except: HAS_PYMONGO = False -from fixtures import (ALL_TOPIC, BASE_ANALYSIS_TOPIC, BASE_DEVICE_TOPIC, - mongo_connection_params, mongo_agent_config, - mongo_connection_string) +from services.core.MongodbHistorian.tests.fixtures import (ALL_TOPIC, BASE_ANALYSIS_TOPIC, BASE_DEVICE_TOPIC, + mongo_connection_params, mongo_agent_config, + mongo_connection_string) query_points = {"oat_point": "Building/LAB/Device/OutsideAirTemperature", "mixed_point": "Building/LAB/Device/MixedAirTemperature", @@ -41,8 +73,7 @@ def clean_db(client): # Create a mark for use within params of a fixture. -pymongo_mark = pytest.mark.skipif(not HAS_PYMONGO, - reason='No pymongo client available.') +pymongo_mark = pytest.mark.skipif(not HAS_PYMONGO, reason='No pymongo client available.') CLEANUP_CLIENT = True @@ -65,13 +96,11 @@ def close_client(): def install_historian_agent(volttron_instance, config_file): - agent_uuid = volttron_instance.install_agent( - agent_dir=get_services_core("MongodbHistorian"), config_file=config_file, - start=True, vip_identity="platform.historian") + agent_uuid = volttron_instance.install_agent(agent_dir=get_services_core("MongodbHistorian"), + config_file=config_file, start=True, vip_identity="platform.historian") return agent_uuid -# # # Fixtures for setup and teardown # @pytest.fixture(scope="module", # params=[ @@ -112,11 +141,10 @@ def database_name(request): @pytest.mark.mongodb @pytest.mark.skipif(not HAS_PYMONGO, reason='No pymongo driver') def test_can_connect(database_client): - """ Tests whether we can connect to the mongo database at all. - - Test that we can read/write data on the database while we are at it. This - test assumes that the same information that is used in the mongodbhistorian - will be able to used in this test. + """ + Tests whether we can connect to the mongo database at all. Test that we can read/write data on the database while we + are at it. This test assumes that the same information that is used in the mongodbhistorian will be able to used in + this test. """ db = database_client[mongo_connection_params()['database']] result = db.test.insert_one({'x': 1}) @@ -172,33 +200,28 @@ def test_can_connect(database_client): # if agent_uuid is not None: # volttron_instance.remove_agent(agent_uuid) + @pytest.mark.historian @pytest.mark.mongodb @pytest.mark.skipif(not HAS_PYMONGO, reason='No pymongo driver') def test_two_hours_of_publishing(volttron_instance, database_client): clean_db(database_client) - # Install the historian agent (after this call the agent should be running - # on the platform). - agent_uuid = install_historian_agent(volttron_instance, - mongo_agent_config()) + # Install the historian agent (after this call the agent should be running on the platform). + agent_uuid = install_historian_agent(volttron_instance, mongo_agent_config()) assert agent_uuid is not None assert volttron_instance.is_agent_running(agent_uuid) try: - # Create a publisher and publish to the message bus some fake data. - # Keep - # track of the published data so that we can query the historian. + # Create a publisher and publish to the message bus some fake data. Keep track of the published data so that we + # can query the historian. publisher = volttron_instance.build_agent() assert publisher is not None expected = publish_minute_data_for_two_hours(publisher) - # The mongo historian now should have published 2 hours worth of data. - # Based upon the structure that we expect the database to be in we - # should - # now have 3 topics present in the database and 2 records for each - # of the + # The mongo historian now should have published 2 hours worth of data. Based upon the structure that we expect + # the database to be in we should now have 3 topics present in the database and 2 records for each of the # 3 data items. db = database_client.get_default_database() @@ -214,8 +237,7 @@ def test_two_hours_of_publishing(volttron_instance, database_client): assert db['data'].find({'ts': d}).count() == 3 for t, _id in topic_to_id.items(): - value = db['data'].find_one({'ts': d, 'topic_id': _id})[ - 'value'] + value = db['data'].find_one({'ts': d, 'topic_id': _id})['value'] assert value == v[t] finally: volttron_instance.stop_agent(agent_uuid) @@ -224,22 +246,19 @@ def test_two_hours_of_publishing(volttron_instance, database_client): def publish_minute_data_for_two_hours(agent): now = get_aware_utc_now() - # expection[datetime]={oat:b,mixed:c,damper:d} + # expectation[datetime]={oat:b,mixed:c,damper:d} expectation = {} for h in range(2): data_by_time = {} for m in range(60): - # Because timestamps in mongo are only concerned with the first - # three digits after the decimal we do this to give some - # randomness here. + # Because timestamps in mongo are only concerned with the first three digits after the decimal we do this to + # give some randomness here. micro = str(random.randint(0, 999999)) - now = datetime(now.year, now.month, now.day, h, m, - random.randint(0, 59), int(micro)) - # Make some random readings. round to 14 digit precision - # as mongo only store 14 digit precision + now = datetime(now.year, now.month, now.day, h, m, random.randint(0, 59), int(micro)) + # Make some random readings. round to 14 digit precision as mongo only store 14 digit precision oat_reading = round(random.uniform(30, 100), 14) mixed_reading = round(oat_reading + random.uniform(-5, 5), 14) damper_reading = round(random.uniform(0, 100), 14) @@ -270,8 +289,7 @@ def publish_minute_data_for_two_hours(agent): } # Publish messages - agent.vip.pubsub.publish( - 'pubsub', ALL_TOPIC, headers, all_message).get(timeout=10) + agent.vip.pubsub.publish('pubsub', ALL_TOPIC, headers, all_message).get(timeout=10) expectation[now] = { query_points['oat_point']: oat_reading, @@ -297,9 +315,6 @@ def publish_fake_data(agent, now=None, value=None): "damper_reading": number } """ - - except_all = ALL_TOPIC[:ALL_TOPIC.rindex('/')] - # Make some random readings if value: oat_reading = value @@ -314,19 +329,15 @@ def publish_fake_data(agent, now=None, value=None): all_message = [{'OutsideAirTemperature': oat_reading, 'MixedAirTemperature': mixed_reading, 'DamperSignal': damper_reading}, { - 'OutsideAirTemperature': {'units': 'F', 'tz': 'UTC', - 'type': 'float'}, - 'MixedAirTemperature': {'units': 'F', 'tz': 'UTC', - 'type': 'float'}, - 'DamperSignal': {'units': '%', 'tz': 'UTC', - 'type': 'float'}}] + 'OutsideAirTemperature': {'units': 'F', 'tz': 'UTC', 'type': 'float'}, + 'MixedAirTemperature': {'units': 'F', 'tz': 'UTC', 'type': 'float'}, + 'DamperSignal': {'units': '%', 'tz': 'UTC', 'type': 'float'}}] # Create timestamp (no parameter to isoformat so the result is a T # separator) The now value is a string after this function is called. # now = now.replace(microsecond=random.randint(0,100)) - # now = datetime(now.year, now.month, now.day, now.hour, - # now.minute, now.second) + # now = datetime(now.year, now.month, now.day, now.hour, now.minute, now.second) # now = now.isoformat() if not now: now = datetime.utcnow() @@ -339,13 +350,10 @@ def publish_fake_data(agent, now=None, value=None): } # Publish messages - agent.vip.pubsub.publish('pubsub', ALL_TOPIC, headers, all_message).get( - timeout=10) + agent.vip.pubsub.publish('pubsub', ALL_TOPIC, headers, all_message).get(timeout=10) - # The keys for these should be the exact same that are in the query_points - # dictionary. - return {"datetime": now, "oat_point": oat_reading, - "mixed_point": mixed_reading, "damper_point": damper_reading} + # The keys for these should be the exact same that are in the query_points dictionary. + return {"datetime": now, "oat_point": oat_reading, "mixed_point": mixed_reading, "damper_point": damper_reading} @pytest.mark.historian @@ -354,15 +362,11 @@ def publish_fake_data(agent, now=None, value=None): def test_insert_duplicate(volttron_instance, database_client): clean_db(database_client) data_collection = database_client.get_default_database()['data'] - index_model = pymongo.IndexModel( - [("topic_id", pymongo.DESCENDING), ("ts", pymongo.DESCENDING)], - unique=True) + index_model = pymongo.IndexModel([("topic_id", pymongo.DESCENDING), ("ts", pymongo.DESCENDING)], unique=True) # make sure the data collection has the unique constraint. data_collection.create_indexes([index_model]) - # Install the historian agent (after this call the agent should be running - # on the platform). - agent_uuid = install_historian_agent(volttron_instance, - mongo_agent_config()) + # Install the historian agent (after this call the agent should be running on the platform). + agent_uuid = install_historian_agent(volttron_instance, mongo_agent_config()) assert agent_uuid is not None assert volttron_instance.is_agent_running(agent_uuid) @@ -370,33 +374,28 @@ def test_insert_duplicate(volttron_instance, database_client): oat_reading = round(random.uniform(30, 100), 14) all_message = [{'OutsideAirTemperature': oat_reading}, { - 'OutsideAirTemperature': {'units': 'F', 'tz': 'UTC', - 'type': 'float'}}] + 'OutsideAirTemperature': {'units': 'F', 'tz': 'UTC', 'type': 'float'}}] publisher = volttron_instance.build_agent() # Create timestamp (no parameter to isoformat so the result is a T # separator) The now value is a string after this function is called. now = get_aware_utc_now() # now = now.replace(microsecond=random.randint(0,100)) - # now = datetime(now.year, now.month, now.day, now.hour, - # now.minute, now.second) + # now = datetime(now.year, now.month, now.day, now.hour, now.minute, now.second) # now = now.isoformat() print('NOW IS: ', now) - # now = '2015-12-02T00:00:00' headers = { headers_mod.DATE: format_timestamp(now), headers_mod.TIMESTAMP: format_timestamp(now) } # Publish messages - publisher.vip.pubsub.publish( - 'pubsub', ALL_TOPIC, headers, all_message).get(timeout=10) + publisher.vip.pubsub.publish('pubsub', ALL_TOPIC, headers, all_message).get(timeout=10) gevent.sleep(0.5) - publisher.vip.pubsub.publish( - 'pubsub', ALL_TOPIC, headers, all_message).get(timeout=10) + publisher.vip.pubsub.publish('pubsub', ALL_TOPIC, headers, all_message).get(timeout=10) finally: volttron_instance.stop_agent(agent_uuid) @@ -414,8 +413,7 @@ def publish_data(publisher, topic, message, now=None): } # Publish messages - publisher.vip.pubsub.publish('pubsub', topic, headers, message).get( - timeout=10) + publisher.vip.pubsub.publish('pubsub', topic, headers, message).get(timeout=10) gevent.sleep(0.5) return now @@ -425,8 +423,7 @@ def publish_data(publisher, topic, message, now=None): @pytest.mark.mongodb @pytest.mark.skipif(not HAS_PYMONGO, reason='No pymongo driver') def test_analysis_topic(volttron_instance, database_client): - agent_uuid = install_historian_agent(volttron_instance, - mongo_agent_config()) + agent_uuid = install_historian_agent(volttron_instance, mongo_agent_config()) try: publisher = volttron_instance.build_agent() @@ -438,15 +435,13 @@ def test_analysis_topic(volttron_instance, database_client): gevent.sleep(0.1) lister = volttron_instance.build_agent() - topic_list = lister.vip.rpc.call('platform.historian', - 'get_topic_list').get(timeout=5) + topic_list = lister.vip.rpc.call('platform.historian', 'get_topic_list').get(timeout=5) assert topic_list is not None assert len(topic_list) == 1 assert 'FluffyWidgets' in topic_list[0] result = lister.vip.rpc.call('platform.historian', 'query', - topic=BASE_ANALYSIS_TOPIC[9:] - + '/FluffyWidgets').get(timeout=5) + topic=BASE_ANALYSIS_TOPIC[9:] + '/FluffyWidgets').get(timeout=5) assert result is not None assert len(result['values']) == 1 assert isinstance(result['values'], list) @@ -461,46 +456,39 @@ def test_analysis_topic(volttron_instance, database_client): @pytest.mark.mongodb @pytest.mark.skipif(not HAS_PYMONGO, reason='No pymongo driver') def test_get_topic_map(volttron_instance, database_client): - agent_uuid = install_historian_agent(volttron_instance, - mongo_agent_config()) + agent_uuid = install_historian_agent(volttron_instance, mongo_agent_config()) try: oat_reading = round(random.uniform(30, 100), 14) all_message = [{'OutsideAirTemperature': oat_reading}, { - 'OutsideAirTemperature': {'units': 'F', 'tz': 'UTC', - 'type': 'float'}}] + 'OutsideAirTemperature': {'units': 'F', 'tz': 'UTC', 'type': 'float'}}] publisher = volttron_instance.build_agent() - publisheddt = publish_data(publisher, ALL_TOPIC, all_message) + publish_data(publisher, ALL_TOPIC, all_message) db = database_client.get_default_database() assert db.topics.count() == 1 lister = volttron_instance.build_agent() - topic_list = lister.vip.rpc.call('platform.historian', - 'get_topic_list').get(timeout=5) + topic_list = lister.vip.rpc.call('platform.historian', 'get_topic_list').get(timeout=5) assert topic_list is not None assert len(topic_list) == 1 # Publish data again for the next point. - publisheddt = publish_data(publisher, ALL_TOPIC, all_message) - topic_list = lister.vip.rpc.call('platform.historian', - 'get_topic_list').get(timeout=5) + publish_data(publisher, ALL_TOPIC, all_message) + topic_list = lister.vip.rpc.call('platform.historian', 'get_topic_list').get(timeout=5) # Same topic shouldn't add anything else. assert topic_list is not None assert len(topic_list) == 1 - assert topic_list[0] == BASE_DEVICE_TOPIC[ - 8:] + '/OutsideAirTemperature' + assert topic_list[0] == BASE_DEVICE_TOPIC[8:] + '/OutsideAirTemperature' mixed_reading = round(random.uniform(30, 100), 14) all_message = [{'MixedAirTemperature': mixed_reading}, { - 'MixedAirTemperature': {'units': 'F', 'tz': 'UTC', - 'type': 'float'}}] + 'MixedAirTemperature': {'units': 'F', 'tz': 'UTC', 'type': 'float'}}] - publisheddt = publish_data(publisher, ALL_TOPIC, all_message) - topic_list = lister.vip.rpc.call('platform.historian', - 'get_topic_list').get(timeout=5) + publish_data(publisher, ALL_TOPIC, all_message) + topic_list = lister.vip.rpc.call('platform.historian', 'get_topic_list').get(timeout=5) assert topic_list is not None assert len(topic_list) == 2 @@ -522,18 +510,15 @@ def test_basic_function(volttron_instance, database_client): """ global query_points - agent_uuid = install_historian_agent(volttron_instance, - mongo_agent_config()) + agent_uuid = install_historian_agent(volttron_instance, mongo_agent_config()) try: - # print('HOME', volttron_instance.volttron_home) print("\n** test_basic_function **") publish_agent = volttron_instance.build_agent() - # Publish data to message bus that should be recorded in the mongo - # database. - expected = publish_fake_data(publish_agent) + # Publish data to message bus that should be recorded in the mongo database. + publish_fake_data(publish_agent) expected = publish_fake_data(publish_agent) gevent.sleep(0.5) @@ -541,35 +526,29 @@ def test_basic_function(volttron_instance, database_client): result = publish_agent.vip.rpc.call('platform.historian', 'query', topic=query_points['oat_point'], count=20, - order="LAST_TO_FIRST").get( - timeout=100) + order="LAST_TO_FIRST").get(timeout=100) assert expected['datetime'].isoformat()[:-3] + '000+00:00' == result['values'][0][0] assert result['values'][0][1] == expected['oat_point'] result = publish_agent.vip.rpc.call('platform.historian', 'query', topic=query_points['mixed_point'], count=20, - order="LAST_TO_FIRST").get( - timeout=100) + order="LAST_TO_FIRST").get(timeout=100) - assert expected['datetime'].isoformat()[:-3] + '000+00:00' == \ - result['values'][0][0] + assert expected['datetime'].isoformat()[:-3] + '000+00:00' == result['values'][0][0] assert result['values'][0][1] == expected['mixed_point'] result = publish_agent.vip.rpc.call('platform.historian', 'query', topic=query_points['damper_point'], count=20, - order="LAST_TO_FIRST").get( - timeout=100) + order="LAST_TO_FIRST").get(timeout=100) - assert expected['datetime'].isoformat()[:-3] + '000+00:00' == \ - result['values'][0][0] + assert expected['datetime'].isoformat()[:-3] + '000+00:00' == result['values'][0][0] assert result['values'][0][1] == expected['damper_point'] finally: volttron_instance.stop_agent(agent_uuid) volttron_instance.remove_agent(agent_uuid) - @pytest.mark.historian @pytest.mark.mongodb @pytest.mark.skipif(not HAS_PYMONGO, reason='No pymongo driver') @@ -579,8 +558,7 @@ def test_topic_name_case_change(volttron_instance, database_client): Expected result: query result should be cases insensitive """ clean_db(database_client) - agent_uuid = install_historian_agent(volttron_instance, - mongo_agent_config()) + agent_uuid = install_historian_agent(volttron_instance, mongo_agent_config()) try: publisher = volttron_instance.build_agent() oat_reading = round(random.uniform(30, 100), 14) @@ -591,16 +569,13 @@ def test_topic_name_case_change(volttron_instance, database_client): gevent.sleep(0.1) lister = volttron_instance.build_agent() - topic_list = lister.vip.rpc.call('platform.historian', - 'get_topic_list').get(timeout=5) + topic_list = lister.vip.rpc.call('platform.historian', 'get_topic_list').get(timeout=5) assert topic_list is not None assert len(topic_list) == 1 assert 'FluffyWidgets' in topic_list[0] result = lister.vip.rpc.call('platform.historian', 'query', - topic=BASE_ANALYSIS_TOPIC[ - 9:] + '/FluffyWidgets').get( - timeout=5) + topic=BASE_ANALYSIS_TOPIC[9:] + '/FluffyWidgets').get(timeout=5) assert result is not None assert len(result['values']) == 1 assert isinstance(result['values'], list) @@ -612,8 +587,7 @@ def test_topic_name_case_change(volttron_instance, database_client): 'Fluffywidgets': {'units': 'F', 'tz': 'UTC', 'type': 'float'}}] publisheddt = publish_data(publisher, BASE_ANALYSIS_TOPIC, message) gevent.sleep(0.1) - topic_list = lister.vip.rpc.call('platform.historian', - 'get_topic_list').get(timeout=5) + topic_list = lister.vip.rpc.call('platform.historian', 'get_topic_list').get(timeout=5) assert topic_list is not None assert len(topic_list) == 1 assert 'Fluffywidgets' in topic_list[0] @@ -641,15 +615,13 @@ def test_empty_result(volttron_instance, database_client): When case of a topic name changes check if they are saved as two topics Expected result: query result should be cases insensitive """ - agent_uuid = install_historian_agent(volttron_instance, - mongo_agent_config()) + agent_uuid = install_historian_agent(volttron_instance, mongo_agent_config()) try: lister = volttron_instance.build_agent() - result = lister.vip.rpc.call( - 'platform.historian', 'query', - topic=BASE_ANALYSIS_TOPIC[9:] + '/FluffyWidgets').get(timeout=5) + result = lister.vip.rpc.call('platform.historian', 'query', + topic=BASE_ANALYSIS_TOPIC[9:] + '/FluffyWidgets').get(timeout=5) print("query result:", result) assert result == {} finally: @@ -669,46 +641,35 @@ def test_multi_topic(volttron_instance, database_client): """ global query_points - agent_uuid = install_historian_agent(volttron_instance, - mongo_agent_config()) + agent_uuid = install_historian_agent(volttron_instance, mongo_agent_config()) try: - # print('HOME', volttron_instance.volttron_home) print("\n** test_basic_function **") publish_agent = volttron_instance.build_agent() - # Publish data to message bus that should be recorded in the mongo - # database. + # Publish data to message bus that should be recorded in the mongo database. expected_result = {} - values_dict = {query_points['oat_point']: [], - query_points['mixed_point']: []} + values_dict = {query_points['oat_point']: [], query_points['mixed_point']: []} for x in range(0, 5): expected = publish_fake_data(publish_agent) gevent.sleep(0.5) if x < 3: values_dict[query_points['oat_point']].append( - [expected["datetime"].isoformat()[:-3] + '000+00:00', - expected["oat_point"]]) + [expected["datetime"].isoformat()[:-3] + '000+00:00', expected["oat_point"]]) values_dict[query_points['mixed_point']].append( - [expected["datetime"].isoformat()[:-3] + '000+00:00', - expected["mixed_point"]]) + [expected["datetime"].isoformat()[:-3] + '000+00:00', expected["mixed_point"]]) expected_result["values"] = values_dict expected_result["metadata"] = {} # Query the historian - result = publish_agent.vip.rpc.call( - 'platform.historian', 'query', - topic=[query_points['mixed_point'], query_points['oat_point']], - count=3, order="FIRST_TO_LAST").get(timeout=100) + result = publish_agent.vip.rpc.call('platform.historian', 'query', + topic=[query_points['mixed_point'], query_points['oat_point']], + count=3, order="FIRST_TO_LAST").get(timeout=100) - # print("expected result {}".format(expected_result)) - # print("result {}".format(result)) assert result["metadata"] == expected_result["metadata"] - assert result["values"][query_points['mixed_point']] == \ - expected_result["values"][query_points['mixed_point']] - assert result["values"][query_points['oat_point']] == \ - expected_result["values"][query_points['oat_point']] + assert result["values"][query_points['mixed_point']] == expected_result["values"][query_points['mixed_point']] + assert result["values"][query_points['oat_point']] == expected_result["values"][query_points['oat_point']] finally: volttron_instance.stop_agent(agent_uuid) volttron_instance.remove_agent(agent_uuid) @@ -719,15 +680,14 @@ def test_multi_topic(volttron_instance, database_client): @pytest.mark.skipif(not HAS_PYMONGO, reason='No pymongo driver') def test_data_rollup_insert(volttron_instance, database_client): """ - Test the creation of rolled up data in hourly, daily and monthly data - tables when data is published for new or existing topics + Test the creation of rolled up data in hourly, daily and monthly data tables when data is published for new or + existing topics :param database_client: :param volttron_instance: The instance against which the test is run """ global query_points agent_uuid = None try: - # print('HOME', volttron_instance.volttron_home) print("\n** test_data_rollup_insert **") # Clean data and roll up tables @@ -747,8 +707,7 @@ def test_data_rollup_insert(volttron_instance, database_client): publish_agent = volttron_instance.build_agent() - version = publish_agent.vip.rpc.call('platform.historian', - 'get_version').get(timeout=5) + version = publish_agent.vip.rpc.call('platform.historian', 'get_version').get(timeout=5) version_nums = version.split(".") if int(version_nums[0]) < 2: @@ -757,27 +716,20 @@ def test_data_rollup_insert(volttron_instance, database_client): # ################### # Initialization test # ################### - # Publish data to message bus that should be - # recorded in the mongo - # database. All topics are new - now = datetime(year=2016, month=3, day=1, hour=1, minute=1, - second=1, microsecond=123, tzinfo=tzutc()) + # Publish data to message bus that should be recorded in the mongo database. All topics are new + now = datetime(year=2016, month=3, day=1, hour=1, minute=1, second=1, microsecond=123, tzinfo=tzutc()) expected1 = publish_fake_data(publish_agent, now) - expected2 = publish_fake_data(publish_agent, now + timedelta( - minutes=1)) + expected2 = publish_fake_data(publish_agent, now + timedelta(minutes=1)) # publish again. this time topic is not new. rolled up data should # get append in the array initialized during last publish - expected3 = publish_fake_data(publish_agent, - now + timedelta(minutes=4)) + expected3 = publish_fake_data(publish_agent, now + timedelta(minutes=4)) gevent.sleep(0.5) - result = publish_agent.vip.rpc.call('platform.historian', 'query', - topic=query_points['oat_point'], count=20, + result = publish_agent.vip.rpc.call('platform.historian', 'query', topic=query_points['oat_point'], count=20, order="FIRST_TO_LAST").get(timeout=10) print(result) gevent.sleep(6) # allow for periodic rollup function to catchup - compare_query_results(db, expected1, expected2, expected3, - 'oat_point', result) + compare_query_results(db, expected1, expected2, expected3, 'oat_point', result) finally: if agent_uuid: @@ -806,8 +758,7 @@ def test_rollup_query_with_topic_pattern(volttron_instance, database_client): db['hourly_data'].drop() db['daily_data'].drop() - publish_t1 = datetime(year=2016, month=3, day=1, hour=1, minute=10, - second=1, microsecond=0, tzinfo=tzutc()) + publish_t1 = datetime(year=2016, month=3, day=1, hour=1, minute=10, second=1, microsecond=0, tzinfo=tzutc()) publish_t2 = publish_t1 + timedelta(minutes=1) publish_t3 = publish_t2 + timedelta(minutes=3) query_end = publish_t3 + timedelta(seconds=2) @@ -820,21 +771,16 @@ def test_rollup_query_with_topic_pattern(volttron_instance, database_client): config['periodic_rollup_initial_wait'] = 0.1 config['rollup_query_end'] = 0 config['periodic_rollup_frequency'] = 2 - config['rollup_query_start'] = query_start_day.strftime( - '%Y-%m-%dT%H:%M:%S.%f') - config['initial_rollup_start_time'] = query_start_day.strftime( - '%Y-%m-%dT%H:%M:%S.%f') - config['rollup_topic_pattern'] = \ - ".*/OutsideAirTemperature|.*/MixedAirTemperature" + config['rollup_query_start'] = query_start_day.strftime('%Y-%m-%dT%H:%M:%S.%f') + config['initial_rollup_start_time'] = query_start_day.strftime('%Y-%m-%dT%H:%M:%S.%f') + config['rollup_topic_pattern'] = ".*/OutsideAirTemperature|.*/MixedAirTemperature" agent_uuid = install_historian_agent(volttron_instance, config) - # print('HOME', volttron_instance.volttron_home) print("\n** test_data_rollup_insert **") publish_agent = volttron_instance.build_agent() - version = publish_agent.vip.rpc.call('platform.historian', - 'get_version').get(timeout=5) + version = publish_agent.vip.rpc.call('platform.historian', 'get_version').get(timeout=5) version_nums = version.split(".") if int(version_nums[0]) < 2: @@ -845,15 +791,13 @@ def test_rollup_query_with_topic_pattern(volttron_instance, database_client): expected3 = publish_fake_data(publish_agent, publish_t3) gevent.sleep(6) - # test query from data table for damper_point - point not in - # rollup_topic_pattern configured + # test query from data table for damper_point - point not in rollup_topic_pattern configured result = publish_agent.vip.rpc.call('platform.historian', 'query', topic=query_points['damper_point'], count=20, start=query_start.isoformat(), end=query_end.isoformat(), order="FIRST_TO_LAST").get(timeout=10) print(result) - compare_query_results(db, expected1, expected2, expected3, - 'damper_point', result) + compare_query_results(db, expected1, expected2, expected3, 'damper_point', result) # test query from hourly_data table # db['data'].drop() @@ -901,7 +845,6 @@ def test_rollup_query_with_topic_pattern(volttron_instance, database_client): # order="FIRST_TO_LAST").get(timeout=10) # assert result == {} - finally: if agent_uuid: volttron_instance.stop_agent(agent_uuid) @@ -929,8 +872,7 @@ def test_rollup_query(volttron_instance, database_client): db['hourly_data'].drop() db['daily_data'].drop() - publish_t1 = datetime(year=2016, month=3, day=1, hour=1, minute=10, - second=1, microsecond=0, tzinfo=tzutc()) + publish_t1 = datetime(year=2016, month=3, day=1, hour=1, minute=10, second=1, microsecond=0, tzinfo=tzutc()) publish_t2 = publish_t1 + timedelta(minutes=1) publish_t3 = publish_t2 + timedelta(minutes=3) query_end = publish_t3 + timedelta(seconds=2) @@ -943,19 +885,15 @@ def test_rollup_query(volttron_instance, database_client): config['periodic_rollup_initial_wait'] = 0.1 config['rollup_query_end'] = 0 config['periodic_rollup_frequency'] = 0.1 - config['rollup_query_start'] = query_start_day.strftime( - '%Y-%m-%dT%H:%M:%S.%f') - config['initial_rollup_start_time'] = query_start_day.strftime( - '%Y-%m-%dT%H:%M:%S.%f') + config['rollup_query_start'] = query_start_day.strftime('%Y-%m-%dT%H:%M:%S.%f') + config['initial_rollup_start_time'] = query_start_day.strftime('%Y-%m-%dT%H:%M:%S.%f') agent_uuid = install_historian_agent(volttron_instance, config) - # print('HOME', volttron_instance.volttron_home) print("\n** test_data_rollup_insert **") publish_agent = volttron_instance.build_agent() - version = publish_agent.vip.rpc.call('platform.historian', - 'get_version').get(timeout=5) + version = publish_agent.vip.rpc.call('platform.historian', 'get_version').get(timeout=5) version_nums = version.split(".") if int(version_nums[0]) < 2: @@ -977,8 +915,7 @@ def test_rollup_query(volttron_instance, database_client): end=query_end.isoformat(), order="FIRST_TO_LAST").get(timeout=10) print(result) - compare_query_results(db, expected1, expected2, expected3, - 'oat_point', result) + compare_query_results(db, expected1, expected2, expected3, 'oat_point', result) verify_hourly_collection(db, expected1, expected2, expected3) # Check query from daily_data @@ -990,8 +927,7 @@ def test_rollup_query(volttron_instance, database_client): order="LAST_TO_FIRST").get(timeout=10) print(result) - compare_query_results(db, expected3, expected2, expected1, - 'oat_point', result) + compare_query_results(db, expected3, expected2, expected1, 'oat_point', result) verify_daily_collection(db, expected3, expected2, expected1) finally: @@ -1003,13 +939,11 @@ def test_rollup_query(volttron_instance, database_client): @pytest.mark.historian @pytest.mark.mongodb @pytest.mark.skipif(not HAS_PYMONGO, reason='No pymongo driver') -def test_combined_results_from_rollup_and_raw_data(volttron_instance, - database_client): +def test_combined_results_from_rollup_and_raw_data(volttron_instance, database_client): """ - Test querying data with start date earlier than available rollup data - and query end date greater than available rollup data. Historian should - query available data from rolled up collection and get the rest - from raw data collection + Test querying data with start date earlier than available rollup data and query end date greater than available + rollup data. Historian should query available data from rolled up collection and get the rest from raw data + collection :param database_client: :param volttron_instance: The instance against which the test is run """ @@ -1024,8 +958,7 @@ def test_combined_results_from_rollup_and_raw_data(volttron_instance, db['hourly_data'].drop() db['daily_data'].drop() - publish_t1 = datetime(year=2016, month=3, day=1, hour=1, minute=10, - second=1, microsecond=0, tzinfo=tzutc()) + publish_t1 = datetime(year=2016, month=3, day=1, hour=1, minute=10, second=1, microsecond=0, tzinfo=tzutc()) publish_t2 = publish_t1 + timedelta(minutes=1) publish_t3 = utils.get_aware_utc_now() # query time period should be greater than 3 hours for historian to use @@ -1037,19 +970,15 @@ def test_combined_results_from_rollup_and_raw_data(volttron_instance, config['periodic_rollup_initial_wait'] = 0.1 config['rollup_query_end'] = 1 config['periodic_rollup_frequency'] = 1 - config['rollup_query_start'] = publish_t2.strftime( - '%Y-%m-%dT%H:%M:%S.%f') - config['initial_rollup_start_time'] = publish_t2.strftime( - '%Y-%m-%dT%H:%M:%S.%f') + config['rollup_query_start'] = publish_t2.strftime('%Y-%m-%dT%H:%M:%S.%f') + config['initial_rollup_start_time'] = publish_t2.strftime('%Y-%m-%dT%H:%M:%S.%f') agent_uuid = install_historian_agent(volttron_instance, config) - # print('HOME', volttron_instance.volttron_home) print("\n** test_data_rollup_insert **") publish_agent = volttron_instance.build_agent() - version = publish_agent.vip.rpc.call('platform.historian', - 'get_version').get(timeout=5) + version = publish_agent.vip.rpc.call('platform.historian', 'get_version').get(timeout=5) version_nums = version.split(".") if int(version_nums[0]) < 2: @@ -1064,10 +993,7 @@ def test_combined_results_from_rollup_and_raw_data(volttron_instance, # Remove publish_t2 entry from data collection print("removing {}".format(publish_t2)) db['data'].remove({'ts': publish_t2}) - db['daily_data'].remove({'ts': publish_t3.replace(hour=0, - minute=0, - second=0, - microsecond=0)}) + db['daily_data'].remove({'ts': publish_t3.replace(hour=0, minute=0, second=0, microsecond=0)}) # Check query result = publish_agent.vip.rpc.call('platform.historian', 'query', @@ -1076,8 +1002,7 @@ def test_combined_results_from_rollup_and_raw_data(volttron_instance, end=query_end.isoformat(), order="LAST_TO_FIRST").get(timeout=10) print(result) - compare_query_results(db, expected3, expected2, expected1, - 'oat_point', result) + compare_query_results(db, expected3, expected2, expected1, 'oat_point', result) finally: if agent_uuid: @@ -1088,13 +1013,11 @@ def test_combined_results_from_rollup_and_raw_data(volttron_instance, @pytest.mark.historian @pytest.mark.mongodb @pytest.mark.skipif(not HAS_PYMONGO, reason='No pymongo driver') -def test_combined_results_from_rollup_and_raw_data(volttron_instance, - database_client): +def test_combined_results_from_rollup_and_raw_data(volttron_instance, database_client): """ - Test querying data with start date earlier than available rollup data - and query end date greater than available rollup data. Historian should - query available data from rolled up collection and get the rest - from raw data collection + Test querying data with start date earlier than available rollup data and query end date greater than available + rollup data. Historian should query available data from rolled up collection and get the rest from raw data + collection :param database_client: :param volttron_instance: The instance against which the test is run """ @@ -1109,8 +1032,7 @@ def test_combined_results_from_rollup_and_raw_data(volttron_instance, db['hourly_data'].drop() db['daily_data'].drop() - publish_t1 = datetime(year=2016, month=3, day=1, hour=1, minute=10, - second=1, microsecond=0, tzinfo=tzutc()) + publish_t1 = datetime(year=2016, month=3, day=1, hour=1, minute=10, second=1, microsecond=0, tzinfo=tzutc()) publish_t2 = publish_t1 + timedelta(minutes=1) publish_t3 = utils.get_aware_utc_now() # query time period should be greater than 3 hours for historian to use @@ -1122,19 +1044,15 @@ def test_combined_results_from_rollup_and_raw_data(volttron_instance, config['periodic_rollup_initial_wait'] = 0.1 config['rollup_query_end'] = 1 config['periodic_rollup_frequency'] = 0.1 - config['rollup_query_start'] = publish_t2.strftime( - '%Y-%m-%dT%H:%M:%S.%f') - config['initial_rollup_start_time'] = publish_t2.strftime( - '%Y-%m-%dT%H:%M:%S.%f') + config['rollup_query_start'] = publish_t2.strftime('%Y-%m-%dT%H:%M:%S.%f') + config['initial_rollup_start_time'] = publish_t2.strftime('%Y-%m-%dT%H:%M:%S.%f') agent_uuid = install_historian_agent(volttron_instance, config) - # print('HOME', volttron_instance.volttron_home) print("\n** test_data_rollup_insert **") publish_agent = volttron_instance.build_agent() - version = publish_agent.vip.rpc.call('platform.historian', - 'get_version').get(timeout=5) + version = publish_agent.vip.rpc.call('platform.historian', 'get_version').get(timeout=5) version_nums = version.split(".") if int(version_nums[0]) < 2: @@ -1149,10 +1067,7 @@ def test_combined_results_from_rollup_and_raw_data(volttron_instance, # Remove publish_t2 entry from data collection print("removing {}".format(publish_t2)) db['data'].remove({'ts': publish_t2}) - db['daily_data'].remove({'ts': publish_t3.replace(hour=0, - minute=0, - second=0, - microsecond=0)}) + db['daily_data'].remove({'ts': publish_t3.replace(hour=0, minute=0, second=0, microsecond=0)}) # Check query result = publish_agent.vip.rpc.call('platform.historian', 'query', @@ -1161,24 +1076,22 @@ def test_combined_results_from_rollup_and_raw_data(volttron_instance, end=query_end.isoformat(), order="LAST_TO_FIRST").get(timeout=10) print(result) - compare_query_results(db, expected3, expected2, expected1, - 'oat_point', result) + compare_query_results(db, expected3, expected2, expected1, 'oat_point', result) finally: if agent_uuid: volttron_instance.stop_agent(agent_uuid) volttron_instance.remove_agent(agent_uuid) + @pytest.mark.historian @pytest.mark.mongodb @pytest.mark.skipif(not HAS_PYMONGO, reason='No pymongo driver') -def test_combined_results_rollup_and_raw_data_with_count(volttron_instance, - database_client): +def test_combined_results_rollup_and_raw_data_with_count(volttron_instance, database_client): """ - Test querying data with start date earlier than available rollup data - and query end date greater than available rollup data. Historian should - query available data from rolled up collection and get the rest - from raw data collection + Test querying data with start date earlier than available rollup data and query end date greater than available + rollup data. Historian should query available data from rolled up collection and get the rest from raw data + collection :param database_client: :param volttron_instance: The instance against which the test is run """ @@ -1193,8 +1106,7 @@ def test_combined_results_rollup_and_raw_data_with_count(volttron_instance, db['hourly_data'].drop() db['daily_data'].drop() - publish_t1 = datetime(year=2016, month=3, day=1, hour=1, minute=10, - second=1, microsecond=0, tzinfo=tzutc()) + publish_t1 = datetime(year=2016, month=3, day=1, hour=1, minute=10, second=1, microsecond=0, tzinfo=tzutc()) publish_t2 = publish_t1 + timedelta(minutes=1) publish_t3 = utils.get_aware_utc_now() - timedelta(minutes=1) publish_t4 = utils.get_aware_utc_now() @@ -1207,19 +1119,15 @@ def test_combined_results_rollup_and_raw_data_with_count(volttron_instance, config['periodic_rollup_initial_wait'] = 0.1 config['rollup_query_end'] = 1 config['periodic_rollup_frequency'] = 0.1 - config['rollup_query_start'] = publish_t2.strftime( - '%Y-%m-%dT%H:%M:%S.%f') - config['initial_rollup_start_time'] = publish_t2.strftime( - '%Y-%m-%dT%H:%M:%S.%f') + config['rollup_query_start'] = publish_t2.strftime('%Y-%m-%dT%H:%M:%S.%f') + config['initial_rollup_start_time'] = publish_t2.strftime('%Y-%m-%dT%H:%M:%S.%f') agent_uuid = install_historian_agent(volttron_instance, config) - # print('HOME', volttron_instance.volttron_home) print("\n** test_data_rollup_insert **") publish_agent = volttron_instance.build_agent() - version = publish_agent.vip.rpc.call('platform.historian', - 'get_version').get(timeout=5) + version = publish_agent.vip.rpc.call('platform.historian', 'get_version').get(timeout=5) version_nums = version.split(".") if int(version_nums[0]) < 2: @@ -1236,10 +1144,7 @@ def test_combined_results_rollup_and_raw_data_with_count(volttron_instance, # available in hourly and daily collections print("removing {}".format(publish_t2)) db['data'].remove({'ts': publish_t2}) - db['daily_data'].remove({'ts': publish_t3.replace(hour=0, - minute=0, - second=0, - microsecond=0)}) + db['daily_data'].remove({'ts': publish_t3.replace(hour=0, minute=0, second=0, microsecond=0)}) # result from data table alone result = publish_agent.vip.rpc.call( @@ -1251,8 +1156,7 @@ def test_combined_results_rollup_and_raw_data_with_count(volttron_instance, end=query_end.isoformat(), order="LAST_TO_FIRST").get(timeout=10) print("Case 1: {}".format(result)) - compare_query_results(db, expected4, None, None, - 'oat_point', result) + compare_query_results(db, expected4, None, None, 'oat_point', result) # result from data table alone result = publish_agent.vip.rpc.call( @@ -1285,8 +1189,7 @@ def test_combined_results_rollup_and_raw_data_with_count(volttron_instance, start=query_start_day.isoformat(), end=query_end.isoformat(), order="FIRST_TO_LAST").get(timeout=10) print("Case 4: {}".format(result)) - compare_query_results(db, expected1, expected2, None, 'oat_point', - result) + compare_query_results(db, expected1, expected2, None, 'oat_point', result) # combined result result = publish_agent.vip.rpc.call( @@ -1295,8 +1198,7 @@ def test_combined_results_rollup_and_raw_data_with_count(volttron_instance, start=query_start_day.isoformat(), end=query_end.isoformat(), order="LAST_TO_FIRST").get(timeout=10) print("Case 5: {}".format(result)) - compare_query_results(db, expected4, expected3, expected2, 'oat_point', - result) + compare_query_results(db, expected4, expected3, expected2, 'oat_point', result) # results only from raw data result = publish_agent.vip.rpc.call( @@ -1307,8 +1209,7 @@ def test_combined_results_rollup_and_raw_data_with_count(volttron_instance, end=query_end.isoformat(), order="LAST_TO_FIRST").get(timeout=10) print("Case 6: {}".format(result)) - compare_query_results(db, expected4, expected3, None, 'oat_point', - result) + compare_query_results(db, expected4, expected3, None, 'oat_point', result) finally: if agent_uuid: @@ -1336,8 +1237,7 @@ def test_dict_insert_special_character(volttron_instance, database_client): db['hourly_data'].drop() db['daily_data'].drop() - publish_t1 = datetime(year=2016, month=3, day=1, hour=1, minute=10, - second=1, microsecond=0, tzinfo=tzutc()) + publish_t1 = datetime(year=2016, month=3, day=1, hour=1, minute=10, second=1, microsecond=0, tzinfo=tzutc()) publish_t2 = publish_t1 + timedelta(minutes=1) query_end = publish_t2 + timedelta(seconds=2) @@ -1350,19 +1250,15 @@ def test_dict_insert_special_character(volttron_instance, database_client): config['periodic_rollup_initial_wait'] = 0.1 config['rollup_query_end'] = 0 config['periodic_rollup_frequency'] = 0.2 - config['rollup_query_start'] = query_start_day.strftime( - '%Y-%m-%dT%H:%M:%S.%f') - config['initial_rollup_start_time'] = query_start_day.strftime( - '%Y-%m-%dT%H:%M:%S.%f') + config['rollup_query_start'] = query_start_day.strftime('%Y-%m-%dT%H:%M:%S.%f') + config['initial_rollup_start_time'] = query_start_day.strftime('%Y-%m-%dT%H:%M:%S.%f') agent_uuid = install_historian_agent(volttron_instance, config) - # print('HOME', volttron_instance.volttron_home) print("\n** test_dict_insert_special_character **") publish_agent = volttron_instance.build_agent() - version = publish_agent.vip.rpc.call('platform.historian', - 'get_version').get(timeout=5) + version = publish_agent.vip.rpc.call('platform.historian', 'get_version').get(timeout=5) version_nums = version.split(".") if int(version_nums[0]) < 2: @@ -1384,8 +1280,7 @@ def test_dict_insert_special_character(volttron_instance, database_client): end=query_end.isoformat(), order="FIRST_TO_LAST").get(timeout=10) print(result) - compare_query_results(db, expected1, expected2, None, - 'oat_point', result) + compare_query_results(db, expected1, expected2, None, 'oat_point', result) # Check query from daily_data db['hourly_data'].drop() @@ -1396,8 +1291,7 @@ def test_dict_insert_special_character(volttron_instance, database_client): order="LAST_TO_FIRST").get(timeout=10) print(result) - compare_query_results(db, expected2, expected1, None, - 'oat_point', result) + compare_query_results(db, expected2, expected1, None, 'oat_point', result) finally: if agent_uuid: volttron_instance.stop_agent(agent_uuid) @@ -1406,11 +1300,9 @@ def test_dict_insert_special_character(volttron_instance, database_client): @pytest.mark.mongodb @pytest.mark.skipif(not HAS_PYMONGO, reason='No pymongo driver') -def test_insert_multiple_data_per_minute(volttron_instance, - database_client): +def test_insert_multiple_data_per_minute(volttron_instance, database_client): """ - Test the query of rolled up data from hourly, daily and monthly data - tables + Test the query of rolled up data from hourly, daily and monthly data tables :param database_client: :param volttron_instance: The instance against which the test is run """ @@ -1425,10 +1317,8 @@ def test_insert_multiple_data_per_minute(volttron_instance, db['hourly_data'].drop() db['daily_data'].drop() - publish_t1 = datetime(year=2016, month=3, day=1, hour=1, minute=10, - second=1, microsecond=0, tzinfo=tzutc()) - # test insert and query when there is more than 1 record in the - # same minute + publish_t1 = datetime(year=2016, month=3, day=1, hour=1, minute=10, second=1, microsecond=0, tzinfo=tzutc()) + # test insert and query when there is more than 1 record in the same minute publish_t2 = publish_t1 + timedelta(seconds=5) query_end = publish_t2 + timedelta(seconds=2) # query time period should be greater than 3 hours for historian to use @@ -1440,19 +1330,15 @@ def test_insert_multiple_data_per_minute(volttron_instance, config['periodic_rollup_initial_wait'] = 0.1 config['rollup_query_end'] = 0 config['periodic_rollup_frequency'] = 0.1 - config['rollup_query_start'] = query_start_day.strftime( - '%Y-%m-%dT%H:%M:%S.%f') - config['initial_rollup_start_time'] = query_start_day.strftime( - '%Y-%m-%dT%H:%M:%S.%f') + config['rollup_query_start'] = query_start_day.strftime('%Y-%m-%dT%H:%M:%S.%f') + config['initial_rollup_start_time'] = query_start_day.strftime('%Y-%m-%dT%H:%M:%S.%f') agent_uuid = install_historian_agent(volttron_instance, config) - # print('HOME', volttron_instance.volttron_home) print("\n** test_data_rollup_insert **") publish_agent = volttron_instance.build_agent() - version = publish_agent.vip.rpc.call('platform.historian', - 'get_version').get(timeout=5) + version = publish_agent.vip.rpc.call('platform.historian', 'get_version').get(timeout=5) version_nums = version.split(".") if int(version_nums[0]) < 2: @@ -1473,8 +1359,7 @@ def test_insert_multiple_data_per_minute(volttron_instance, end=query_end.isoformat(), order="FIRST_TO_LAST").get(timeout=10) print(result) - compare_query_results(db, expected1, expected2, None, - 'oat_point', result) + compare_query_results(db, expected1, expected2, None, 'oat_point', result) verify_hourly_collection(db, expected1, expected2) # Check query from daily_data @@ -1486,20 +1371,15 @@ def test_insert_multiple_data_per_minute(volttron_instance, order="FIRST_TO_LAST").get(timeout=10) print(result) - compare_query_results(db, expected1, expected2, None, - 'oat_point', result) + compare_query_results(db, expected1, expected2, None, 'oat_point', result) verify_daily_collection(db, expected1, expected2) - - finally: if agent_uuid: volttron_instance.stop_agent(agent_uuid) volttron_instance.remove_agent(agent_uuid) -def compare_query_results(db, expected1, expected2, - expected3, query_point, - result): +def compare_query_results(db, expected1, expected2, expected3, query_point, result): expected_t1 = format_expected_time(expected1) assert expected_t1 == result['values'][0][0] assert result['values'][0][1] == expected1[query_point] @@ -1519,6 +1399,7 @@ def verify_daily_collection(db, expected1, expected2, expected3=None): expected_t2 = format_expected_time(expected2) t1_hour_min = expected1['datetime'].replace(second=0, microsecond=0) t2_hour_min = expected2['datetime'].replace(second=0, microsecond=0) + expected_t3 = None if expected3: expected_t3 = format_expected_time(expected3) cursor = db['topics'].find({'topic_name': query_points['oat_point']}) @@ -1527,27 +1408,20 @@ def verify_daily_collection(db, expected1, expected2, expected3=None): cursor = db['daily_data'].find({'topic_id': id}) rows = list(cursor) assert len(rows[0]['data']) == 24 * 60 - # print(rows[0]['data']) # if it is same day and same minute if t1_hour_min == t2_hour_min: rolled_up_data1 = rows[0]['data'][ expected1['datetime'].hour * 60 + expected1['datetime'].minute][0] - rolled_up_data2 = rows[0]['data'][ - expected2['datetime'].hour * 60 + expected1['datetime'].minute][1] + rolled_up_data2 = rows[0]['data'][expected2['datetime'].hour * 60 + expected1['datetime'].minute][1] else: - rolled_up_data1 = rows[0]['data'][ - expected1['datetime'].hour * 60 + expected1['datetime'].minute][0] - rolled_up_data2 = rows[0]['data'][ - expected2['datetime'].hour * 60 + expected2['datetime'].minute][0] - compare_rolled_up_data(rolled_up_data1, expected_t1, - expected1['oat_point']) - compare_rolled_up_data(rolled_up_data2, expected_t2, - expected2['oat_point']) + rolled_up_data1 = rows[0]['data'][expected1['datetime'].hour * 60 + expected1['datetime'].minute][0] + rolled_up_data2 = rows[0]['data'][expected2['datetime'].hour * 60 + expected2['datetime'].minute][0] + compare_rolled_up_data(rolled_up_data1, expected_t1, expected1['oat_point']) + compare_rolled_up_data(rolled_up_data2, expected_t2, expected2['oat_point']) if expected3: rolled_up_data3 = rows[0]['data'][ expected3['datetime'].hour * 60 + expected3['datetime'].minute][0] - compare_rolled_up_data(rolled_up_data3, expected_t3, - expected3['oat_point']) + compare_rolled_up_data(rolled_up_data3, expected_t3, expected3['oat_point']) def verify_hourly_collection(db, expected1, expected2, expected3=None): @@ -1556,6 +1430,7 @@ def verify_hourly_collection(db, expected1, expected2, expected3=None): expected_t2 = format_expected_time(expected2) t1_hour = expected1['datetime'].replace(second=0, microsecond=0) t2_hour = expected2['datetime'].replace(second=0, microsecond=0) + expected_t3 = None if expected3: expected_t3 = format_expected_time(expected3) cursor = db['topics'].find({'topic_name': query_points['oat_point']}) @@ -1572,14 +1447,11 @@ def verify_hourly_collection(db, expected1, expected2, expected3=None): rolled_up_data1 = rows[0]['data'][expected1['datetime'].minute][0] rolled_up_data2 = rows[0]['data'][expected2['datetime'].minute][0] - compare_rolled_up_data(rolled_up_data1, expected_t1, - expected1['oat_point']) - compare_rolled_up_data(rolled_up_data2, expected_t2, - expected2['oat_point']) + compare_rolled_up_data(rolled_up_data1, expected_t1, expected1['oat_point']) + compare_rolled_up_data(rolled_up_data2, expected_t2, expected2['oat_point']) if expected3 and expected_t3: rolled_up_data3 = rows[0]['data'][expected3['datetime'].minute][0] - compare_rolled_up_data(rolled_up_data3, expected_t3, - expected3['oat_point']) + compare_rolled_up_data(rolled_up_data3, expected_t3, expected3['oat_point']) def format_expected_time(expected1): @@ -1589,8 +1461,7 @@ def format_expected_time(expected1): def compare_rolled_up_data(data_from_db_query, expected_time, expected_value): - assert utils.format_timestamp(data_from_db_query[0]) + '+00:00' == \ - expected_time + assert utils.format_timestamp(data_from_db_query[0]) + '+00:00' == expected_time assert data_from_db_query[1] == expected_value @@ -1604,9 +1475,8 @@ def test_manage_db_size(volttron_instance, database_client): config = dict(mongo_agent_config()) config["history_limit_days"] = 6 - # start up the angent - agent_uuid = install_historian_agent(volttron_instance, - config) + # start up the agent + agent_uuid = install_historian_agent(volttron_instance, config) assert agent_uuid is not None assert volttron_instance.is_agent_running(agent_uuid) diff --git a/services/core/MongodbHistorian/tests/test_prod_query_mongo.py b/services/core/MongodbHistorian/tests/test_prod_query_mongo.py index a49e24fc2c..4a78f0e5eb 100644 --- a/services/core/MongodbHistorian/tests/test_prod_query_mongo.py +++ b/services/core/MongodbHistorian/tests/test_prod_query_mongo.py @@ -96,7 +96,7 @@ @pytest.fixture(scope="function", params=[pymongo_mark(mongo_agent_config)]) -def database_client(request): +def database_client_prod(request): print('connecting to mongo database') client = pymongo.MongoClient(mongo_connection_string()) @@ -110,7 +110,7 @@ def close_client(): @pytest.mark.timeout(180) @pytest.mark.skipif(not HAS_PYMONGO, reason='No pymongo driver') -def test_basic_function_week_data(volttron_instance, database_client): +def test_basic_function_week_data(volttron_instance, database_client_prod): """ Test basic functionality of sql historian. Inserts three points as part of all topic and checks if all three got into the database @@ -177,7 +177,7 @@ def test_basic_function_week_data(volttron_instance, database_client): @pytest.mark.timeout(180) @pytest.mark.skipif(not HAS_PYMONGO, reason='No pymongo driver') -def test_basic_function_month_data(volttron_instance, database_client): +def test_basic_function_month_data(volttron_instance, database_client_prod): """ Test basic functionality of sql historian. Inserts three points as part of all topic and checks if all three got into the database @@ -277,7 +277,7 @@ def test_basic_function_month_data(volttron_instance, database_client): @pytest.mark.timeout(180) @pytest.mark.skipif(not HAS_PYMONGO, reason='No pymongo driver') -def test_basic_function_week_multi_topic(volttron_instance, database_client): +def test_basic_function_week_multi_topic(volttron_instance, database_client_prod): """ Test basic functionality of sql historian. Inserts three points as part of all topic and checks if all three got into the database @@ -317,7 +317,7 @@ def test_basic_function_week_multi_topic(volttron_instance, database_client): @pytest.mark.timeout(180) @pytest.mark.skipif(not HAS_PYMONGO, reason='No pymongo driver') -def test_basic_function_2week_multi_topic(volttron_instance, database_client): +def test_basic_function_2week_multi_topic(volttron_instance, database_client_prod): """ Test basic functionality of sql historian. Inserts three points as part of all topic and checks if all three got into the database diff --git a/services/core/MongodbTaggingService/README.md b/services/core/MongodbTaggingService/README.md new file mode 100644 index 0000000000..449dbe7488 --- /dev/null +++ b/services/core/MongodbTaggingService/README.md @@ -0,0 +1,74 @@ +# Mongodb Tagging Service + +Mongodb tagging service provide APIs to tag both topic names(device +points) and topic name prefixes (campus, building, unit/equipment, sub +unit) and then query for relevant topics based on saved tag names and +values. This agent stores the tags in a mongodb database. + +Tags used by this agent are not user defined. They have to be +pre-defined in a resource file at volttron_data/tagging_resources. The +agent validates against this predefined list of tags every time user add +tags to topics. Tags can be added to one topic at a time or multiple +topics by using a topic name pattern(regular expression). This agent +uses tags from [project haystack](https://project-haystack.org/). and +adds a few custom tags for campus and VOLTTRON point name. + +Each tag has an associated value and users can query for topic names +based tags and its values using a simplified sql-like query string. +Queries can specify tag names with values or tags without values for +boolean tags(markers). Queries can combine multiple conditions with +keyword AND and OR, and use the keyword NOT to negate a conditions. + +## Requirements + +This historian requires a mongodb connector installed in your activated +volttron environment to talk to mongodb. Please execute the following +from an activated shell in order to install it. + + pip install pymongo + +## Dependencies and Limitations + +1. When adding tags to topics, this agent calls the + platform.historian\'s get_topic_list and hence requires the + platform.historian to be running but it doesn\'t require the + historian to use any specific database. It does not require + platform.historian to be running for using its query APIs. +2. Resource files that provides the list of valid tags is mandatory and + should be in volttron_data/tagging_reosurces/tags.csv +3. Tagging service only provides APIs query for topic names based on + tags. Once the list of topic names is retrieved, users should use + the historian APIs to get the data corresponding to those topics. +4. Current version of tagging service does not support versioning of + tag/values. When tags values set using tagging service APIs + update/overwrite any existing tag entries in the database + +## Configuration Options + +The following JSON configuration file shows all the options currently +supported by this agent. + +``` {.python} +{ + "connection": { + "type": "mongodb", + "params": { + "host": "localhost", + "port": 27017, + "database": "test_historian", + "user": "username for this db. should have read write access", + "passwd": "password for this db" + } + }, + # optional. Specify if collections created for tagging should have names + # starting with a specific prefix _ + "table_prefix":"volttron", + + # optional. Specify if you want tagging service to query the historian + # with this vip identity. defaults to platform.historian + "historian_vip_identity": "mongo.historian" +} +``` + +## See Also +[TaggingServiceSpec](https://volttron.readthedocs.io/en/develop/developing-volttron/developing-agents/specifications/tagging-service.html) diff --git a/services/core/MongodbTaggingService/README.rst b/services/core/MongodbTaggingService/README.rst deleted file mode 100644 index 2d41be64ba..0000000000 --- a/services/core/MongodbTaggingService/README.rst +++ /dev/null @@ -1,85 +0,0 @@ -.. _Mongodb_Tagging_Service: - -======================= -Mongodb Tagging Service -======================= - -Mongodb tagging service provide APIs to tag both topic names(device points) and -topic name prefixes (campus, building, unit/equipment, sub unit) and then -query for relevant topics based on saved tag names and values. This agent -stores the tags in a mongodb database. - -Tags used by this agent are not user defined. They have to be pre-defined in a -resource file at volttron_data/tagging_resources. The agent validates against -this predefined list of tags every time user add tags to topics. Tags can be -added to one topic at a time or multiple topics by using a topic name -pattern(regular expression). This agent uses tags from -`project haystack `_. and adds a few custom -tags for campus and VOLTTRON point name. - -Each tag has an associated value and users can query for topic names based -tags and its values using a simplified sql-like query string. Queries can -specify tag names with values or tags without values for boolean tags(markers). -Queries can combine multiple conditions with keyword AND and OR, -and use the keyword NOT to negate a conditions. - -Requirements ------------- - -This historian requires a mongodb connector installed in your activated -volttron environment to talk to mongodb. Please execute the following -from an activated shell in order to install it. - -:: - - pip install pymongo - -Dependencies and Limitations ----------------------------- - -1. When adding tags to topics, this agent calls the platform.historian's - get_topic_list and hence requires the platform.historian to be running - but it doesn't require the historian to use any specific database. It - does not require platform.historian to be running for using its - query APIs. -2. Resource files that provides the list of valid tags is mandatory and should - be in volttron_data/tagging_reosurces/tags.csv -3. Tagging service only provides APIs query for topic names based on tags. - Once the list of topic names is retrieved, users should use the historian - APIs to get the data corresponding to those topics. -4. Current version of tagging service does not support versioning of - tag/values. When tags values set using tagging service APIs update/overwrite - any existing tag entries in the database - -Configuration Options ---------------------- - -The following JSON configuration file shows all the options currently supported -by this agent. - -.. code-block:: python - - { - "connection": { - "type": "mongodb", - "params": { - "host": "localhost", - "port": 27017, - "database": "test_historian", - "user": "username for this db. should have read write access", - "passwd": "password for this db" - } - }, - # optional. Specify if collections created for tagging should have names - # starting with a specific prefix _ - "table_prefix":"volttron", - - # optional. Specify if you want tagging service to query the historian - # with this vip identity. defaults to platform.historian - "historian_vip_identity": "crate.historian" - } - -See Also --------- - -`TaggingServiceSpec`_ diff --git a/services/core/MongodbTaggingService/Tests/test_mongodb_tagging.py b/services/core/MongodbTaggingService/Tests/test_mongodb_tagging.py new file mode 100644 index 0000000000..0ecc8c24bc --- /dev/null +++ b/services/core/MongodbTaggingService/Tests/test_mongodb_tagging.py @@ -0,0 +1,67 @@ +# -*- coding: utf-8 -*- {{{ +# vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et: +# +# Copyright 2020, Battelle Memorial Institute. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# This material was prepared as an account of work sponsored by an agency of +# the United States Government. Neither the United States Government nor the +# United States Department of Energy, nor Battelle, nor any of their +# employees, nor any jurisdiction or organization that has cooperated in the +# development of these materials, makes any warranty, express or +# implied, or assumes any legal liability or responsibility for the accuracy, +# completeness, or usefulness or any information, apparatus, product, +# software, or process disclosed, or represents that its use would not infringe +# privately owned rights. Reference herein to any specific commercial product, +# process, or service by trade name, trademark, manufacturer, or otherwise +# does not necessarily constitute or imply its endorsement, recommendation, or +# favoring by the United States Government or any agency thereof, or +# Battelle Memorial Institute. The views and opinions of authors expressed +# herein do not necessarily state or reflect those of the +# United States Government or any agency thereof. +# +# PACIFIC NORTHWEST NATIONAL LABORATORY operated by +# BATTELLE for the UNITED STATES DEPARTMENT OF ENERGY +# under Contract DE-AC05-76RL01830 +# }}} + +import os +import json +import gevent +import pytest + +from volttron.platform import get_services_core +from volttron.platform.messaging.health import STATUS_GOOD + + +@pytest.mark.tagging +def test_default_config(volttron_instance): + """ + Test the default configuration file included with the agent + """ + publish_agent = volttron_instance.build_agent(identity="test_agent") + gevent.sleep(1) + + config_path = os.path.join(get_services_core("MongodbTaggingService"), "config") + with open(config_path, "r") as config_file: + config_json = json.load(config_file) + assert isinstance(config_json, dict) + + volttron_instance.install_agent( + agent_dir=get_services_core("MongodbTaggingService"), + config_file=config_json, + start=True, + vip_identity="health_test") + + assert publish_agent.vip.rpc.call("health_test", "health.get_status").get(timeout=10).get('status') == STATUS_GOOD diff --git a/services/core/MongodbTaggingService/config b/services/core/MongodbTaggingService/config index b375478249..7e00b0f0f3 100644 --- a/services/core/MongodbTaggingService/config +++ b/services/core/MongodbTaggingService/config @@ -4,11 +4,11 @@ "params": { "host": "localhost", "port": 27017, - "database": "test_historian", - "user": "historian", - "passwd": "historian" + "database": "mongo_test", + "user": "test", + "passwd": "test" } }, - "table_prefix":"volttron", #optional - "historian_vip_identity":"platform.historian" #optional + "table_prefix":"volttron", + "historian_vip_identity":"platform.historian" } diff --git a/services/core/MongodbTaggingService/conftest.py b/services/core/MongodbTaggingService/conftest.py new file mode 100644 index 0000000000..68e5e611b1 --- /dev/null +++ b/services/core/MongodbTaggingService/conftest.py @@ -0,0 +1,6 @@ +import sys + +from volttrontesting.fixtures.volttron_platform_fixtures import * + +# Add system path of the agent's directory +sys.path.insert(0, os.path.abspath(os.path.dirname(__file__))) diff --git a/services/core/MongodbTaggingService/mongodb/__init__.py b/services/core/MongodbTaggingService/mongotagging/__init__.py similarity index 100% rename from services/core/MongodbTaggingService/mongodb/__init__.py rename to services/core/MongodbTaggingService/mongotagging/__init__.py diff --git a/services/core/MongodbTaggingService/mongodb/tagging.py b/services/core/MongodbTaggingService/mongotagging/tagging.py similarity index 96% rename from services/core/MongodbTaggingService/mongodb/tagging.py rename to services/core/MongodbTaggingService/mongotagging/tagging.py index ba61fc735d..ed0c787098 100644 --- a/services/core/MongodbTaggingService/mongodb/tagging.py +++ b/services/core/MongodbTaggingService/mongotagging/tagging.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- {{{ # vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et: # -# Copyright 2019, Battelle Memorial Institute. +# Copyright 2020, Battelle Memorial Institute. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -46,7 +46,6 @@ import re from pkg_resources import resource_string, resource_exists from pymongo.errors import BulkWriteError - from volttron.platform.agent import utils from volttron.platform.agent.base_tagging import BaseTaggingService from volttron.platform.dbutils import mongoutils @@ -62,14 +61,14 @@ def tagging_service(config_path, **kwargs): """ - This method is called by the :py:func:`service.tagging.main` to + This method is called by the :py:func:`tagging.main` to parse the passed config file or configuration dictionary object, validate the configuration entries, and create an instance of MongodbTaggingService :param config_path: could be a path to a configuration file or can be a dictionary object :param kwargs: additional keyword arguments if any - :return: an instance of :py:class:`service.tagging.SQLTaggingService` + :return: an instance of :py:class:`tagging.MongodbTaggingService` """ if isinstance(config_path, dict): config_dict = config_path @@ -258,7 +257,7 @@ def _init_categories(self, db): "description": i['description']}) bulk.execute() else: - _log.warn("No categories to initialize. No such file "+ file_path) + _log.warning("No categories to initialize. No such file " + file_path) def _init_category_tags(self, db): file_path = self.resource_sub_dir + '/category_tags.txt' @@ -297,8 +296,7 @@ def _init_category_tags(self, db): [('categories', pymongo.ASCENDING)], background=True) else: - _log.warn("No category to tags mapping to initialize. No such " - "file " + file_path) + _log.warning("No category to tags mapping to initialize. No such file " + file_path) @doc_inherit def query_categories(self, include_description=False, skip=0, count=None, @@ -318,17 +316,17 @@ def query_categories(self, include_description=False, skip=0, count=None, else: cursor = db[self.categories_collection].find( projection=['_id', 'description'], skip=skip_count, - limit=count, sort=[('_id',order_by)]) + limit=count, sort=[('_id', order_by)]) result_dict = list(cursor) results = OrderedDict() for r in result_dict: - results[r['_id']] = r.get('description',"") + results[r['_id']] = r.get('description', "") if include_description: - return results.items() + return list(results.items()) else: - return results.keys() + return list(results.keys()) @doc_inherit def query_tags_by_category(self, category, include_kind=False, @@ -379,7 +377,7 @@ def insert_topic_tags(self, tags, update_version=False): execute = False for topic_pattern, topic_tags in tags.items(): for tag_name, tag_value in topic_tags.items(): - if not self.valid_tags.has_key(tag_name): + if tag_name not in self.valid_tags: raise ValueError( "Invalid tag name:{}".format(tag_name)) # TODO: Validate and convert values based on tag kind/type @@ -388,10 +386,10 @@ def insert_topic_tags(self, tags, update_version=False): # tag_value = get_tag_value(tag_value, # self.valid_tags[tag_name]) if tag_name == 'id' and tag_value is not None: - _log.warn("id tags are not explicitly stored. " - "topic prefix servers as unique identifier for" - "an entity. id value sent({}) will not be " - "stored".format(tag_value)) + _log.warning("id tags are not explicitly stored. " + "topic prefix servers as unique identifier for" + "an entity. id value sent({}) will not be " + "stored".format(tag_value)) prefixes = self.get_matching_topic_prefixes(topic_pattern) if not prefixes: result['error'][topic_pattern] = "No matching topic found" @@ -456,7 +454,7 @@ def query_tags_by_topic(self, topic_prefix, include_kind=False, meta = {} if include_description or include_kind: cursor = db[self.tags_collection].find( - {"_id":{"$in":d.keys()}}) + {"_id":{"$in":list(d.keys())}}) records = list(cursor) for r in records: meta[r['_id']] = (r['kind'], r['description']) diff --git a/services/core/MongodbTaggingService/setup.py b/services/core/MongodbTaggingService/setup.py index 7aabdc3931..38d236e876 100644 --- a/services/core/MongodbTaggingService/setup.py +++ b/services/core/MongodbTaggingService/setup.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- {{{ # vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et: # -# Copyright 2019, Battelle Memorial Institute. +# Copyright 2020, Battelle Memorial Institute. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/services/core/ObixHistoryPublish/README.md b/services/core/ObixHistoryPublish/README.md new file mode 100644 index 0000000000..3800017c51 --- /dev/null +++ b/services/core/ObixHistoryPublish/README.md @@ -0,0 +1,17 @@ +Obix History Publisher +====================== + +The following is an example configuration for the Obix History Publisher. + + { + "url": "http://example.com/obix/histories/EXAMPLE/", + "username": "username", + "password": "password", + # Interval to query interface for updates in minutes. + # History points are only published if new data is available + # config points are gathered and published at this interval. + "check_interval": 15, + # Path prefix for all publishes + "path_prefix": "devices/obix/history/", + "register_config": "config://registry_config.csv" + } diff --git a/services/core/ObixHistoryPublish/config b/services/core/ObixHistoryPublish/config index bdede3d51f..ad80022d13 100644 --- a/services/core/ObixHistoryPublish/config +++ b/services/core/ObixHistoryPublish/config @@ -2,11 +2,7 @@ "url": "http://example.com/obix/histories/EXAMPLE/", "username": "username", "password": "password", - # Interval to query interface for updates in minutes. - # History points are only published if new data is available - # config points are gathered and published at this interval. "check_interval": 15, - # Path prefix for all publishes "path_prefix": "devices/obix/history/", "register_config": "config://registry_config.csv" -} \ No newline at end of file +} diff --git a/services/core/ObixHistoryPublish/obix_history/agent.py b/services/core/ObixHistoryPublish/obix_history/agent.py index 3550c0316b..091b7ca3f2 100644 --- a/services/core/ObixHistoryPublish/obix_history/agent.py +++ b/services/core/ObixHistoryPublish/obix_history/agent.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- {{{ # vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et: # -# Copyright 2019, Battelle Memorial Institute. +# Copyright 2020, Battelle Memorial Institute. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -41,7 +41,7 @@ import logging import sys from volttron.platform.agent import utils -from volttron.platform.vip.agent import Agent, Core, RPC +from volttron.platform.vip.agent import Agent from volttron.platform.scheduling import periodic import grequests import gevent @@ -50,7 +50,7 @@ from collections import defaultdict from volttron.platform.messaging import headers as headers_mod -TOPIC_DELIM ='/' +TOPIC_DELIM = '/' _log = logging.getLogger(__name__) utils.setup_logging() @@ -58,11 +58,9 @@ def obix_history(config_path, **kwargs): - """Parses the Agent configuration and returns an instance of - the agent created using that configuration. - + """ + Parses the Agent configuration and returns an instance of the agent created using that configuration. :param config_path: Path to a configuration file. - :type config_path: str :returns: ObixHistory :rtype: ObixHistory @@ -133,7 +131,9 @@ def get_value_async_result(self, username=None, password=None, start_time=None, return grequests.get(url, auth=(username, password), params=payload_str) def time_format(self, dt): - """Format timestamp for becchp.com query""" + """ + Format timestamp for becchp.com query + """ _log.debug("time_format dt: {}".format(dt)) return "%s:%06.3f%s" % ( dt.strftime('%Y-%m-%dT%H:%M'), @@ -210,6 +210,9 @@ def __init__(self, url=None, self.last_read = None self.default_last_read = default_last_read + self.topics = None + self.historian_name = "" + self.scheduled_update = None self.registers = list() @@ -228,7 +231,6 @@ def configure(self, config_name, action, contents): """ Called after the Agent has connected to the message bus. If a configuration exists at startup this will be called before onstart. - Is called every time the configuration in the store changes. """ config = self.default_config.copy() @@ -263,7 +265,6 @@ def configure(self, config_name, action, contents): self.register_config = register_config self.historian_name = historian_name - self.configure_registers(register_config) if self.last_read is None: @@ -283,20 +284,17 @@ def set_last_read(self): _log.error("ERROR PROCESSING CONFIGURATION: last_read file does not contain dictionary") last_read = None - if last_read is None: last_read = {} - backup_last_read = utils.format_timestamp(utils.get_aware_utc_now() + datetime.timedelta(hours=-1*self.default_last_read)) + backup_last_read = utils.format_timestamp(utils.get_aware_utc_now() + datetime.timedelta( + hours=-1*self.default_last_read)) for r in self.registers: new_last_read = last_read.get(r.index, backup_last_read) last_read[r.index] = r.last_read = new_last_read self.last_read = last_read - # for r in self.registers: - # r.last_read = last_read[r.index] - def restart_greenlet(self): if self.scheduled_update is not None: @@ -313,12 +311,11 @@ def configure_registers(self, register_config): _log.warning("No registers configured.") return - self.topics = [] # used to index registers + self.topics = [] # used to index registers self.registers = [] for register_line in register_config: - if ("Device Name" not in register_line or - "Volttron Point Name" not in register_line or - "Obix Name" not in register_line): + if "Device Name" not in register_line or "Volttron Point Name" not in register_line or \ + "Obix Name" not in register_line: _log.warning("Column missing from configuration file line: {}".format(register_line)) continue device_topic = self.path_prefix + register_line['Device Name'] @@ -330,12 +327,12 @@ def configure_registers(self, register_config): self.topics.append((device_topic, point_name)) def collate_results(self, devices): - # devices[device_topic][point_name][time]: [value, units] -> result[time][device_topic][{point_name: value}, {point_name: {'units': units}] + # devices[device_topic][point_name][time]: [value, units] -> + # result[time][device_topic][{point_name: value}, {point_name: {'units': units}] result = defaultdict(lambda: defaultdict(lambda: [{}, {}])) for device_topic, points in devices.items(): for point_name, times in points.items(): for time, value in times.items(): - #result[time][device_topic] = [{}, {}] result[time][device_topic][0][point_name] = value[0] result[time][device_topic][1][point_name] = {'units': value[1]} return result @@ -420,8 +417,7 @@ def publish(self, topic_prefix, message, headers): def main(): """Main method called to start the agent.""" - utils.vip_main(obix_history, - version=__version__) + utils.vip_main(obix_history, version=__version__) if __name__ == '__main__': diff --git a/services/core/ObixHistoryPublish/setup.py b/services/core/ObixHistoryPublish/setup.py index ad8a426b18..f1e7b2c1c6 100644 --- a/services/core/ObixHistoryPublish/setup.py +++ b/services/core/ObixHistoryPublish/setup.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- {{{ # vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et: # -# Copyright 2019, Battelle Memorial Institute. +# Copyright 2020, Battelle Memorial Institute. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -63,4 +63,4 @@ 'eggsecutable = ' + agent_module + ':main', ] } -) \ No newline at end of file +) diff --git a/services/core/OpenADRVenAgent/README.md b/services/core/OpenADRVenAgent/README.md new file mode 100644 index 0000000000..cf876e80a0 --- /dev/null +++ b/services/core/OpenADRVenAgent/README.md @@ -0,0 +1,120 @@ +# OpenADRVenAgent + +OpenADR (Automated Demand Response) is a standard for alerting and responding to the need to adjust electric power +consumption in response to fluctuations in grid demand. + +For further information about OpenADR and this agent, please see the OpenADR documentation in VOLTTRON ReadTheDocs. + +## Dependencies +The VEN agent depends on some third-party libraries that are not in the standard VOLTTRON installation. +They should be installed in the VOLTTRON virtual environment prior to building the agent. Use requirements.txt in the +agent directory to install the requirements + +``` +cd $VOLTTRON_ROOT/services/core/OpenADRVenAgent +pip install -r requirements.txt +``` + +## Configuration Parameters + +The VEN agent’s configuration file contains JSON that includes several parameters for configuring VTN server communications and other behavior. A sample configuration file, config, has been provided in the agent directory. + +The VEN agent supports the following configuration parameters: + +|Parameter|Example|Description| +|---------|-------|-----------| +|db\_path|“\$VOLTTRON\_HOME/data/|Pathname of the agent's sqlite database. Shell| +||openadr.sqlite”|variables will be expanded if they are present| +|||in the pathname.| +|ven\_id|“0”|The OpenADR ID of this virtual end node. Identifies| +|||this VEN to the VTN. If automated VEN registration| +|||is used, the ID is assigned by the VTN at that| +|||time. If the VEN is registered manually with the| +|||VTN (i.e., via configuration file settings), then| +|||a common VEN ID should be entered in this config| +|||file and in the VTN's site definition.| +|ven\_name|"ven01"|Name of this virtual end node. This name is used| +|||during automated registration only, identiying| +|||the VEN before its VEN ID is known.| +|vtn\_id|“vtn01”|OpenADR ID of the VTN with which this VEN| +|||communicates.| +|vtn\_address|“.|URL and port number of the VTN.| +||ki-evi.com:8000”|| +|send\_registration|“False”|(“True” or ”False”) If “True”, the VEN sends| +|||a one-time automated registration request to| +|||the VTN to obtain the VEN ID. If automated| +|||registration will be used, the VEN should be run| +|||in this mode initially, then shut down and run| +|||with this parameter set to “False” thereafter.| +|security\_level|“standard”|If 'high', the VTN and VEN use a third-party| +|||signing authority to sign and authenticate each| +|||request. The default setting is “standard”: the| +|||XML payloads do not contain Signature elements.| +|poll\_interval\_secs|30|(integer) How often the VEN should send an OadrPoll| +|||request to the VTN. The poll interval cannot be| +|||more frequent than the VEN’s 5-second process| +|||loop frequency.| +|log\_xml|“False”|(“True” or “False”) Whether to write each| +|||inbound/outbound request’s XML data to the| +|||agent's log.| +|opt\_in\_timeout\_secs|1800|(integer) How long to wait before making a| +|||default optIn/optOut decision.| +|opt\_in\_default\_decision|“optOut”|(“True” or “False”) Which optIn/optOut choice| +|||to make by default.| +|request\_events\_on\_startup|"False"|("True" or "False") Whether to ask the VTN for a| +|||list of current events during VEN startup.| +|report\_parameters|(see below)|A dictionary of definitions of reporting/telemetry| +|||parameters.| + +Reporting Configuration +======================= + +The VEN’s reporting configuration, specified as a dictionary in the agent configuration, defines each telemetry element (metric) that the VEN can report to the VTN, if requested. By default, it defines reports named “telemetry” and "telemetry\_status", with a report configuration dictionary containing the following parameters: + +|"telemetry" report: parameters|Example|Description| +|------------------------------|-------|-----------| +|report\_name|"TELEMETRY\_USAGE"|Friendly name of the report.| +|report\_name\_metadata|"METADATA\_TELEMETRY\_USAGE"|Friendly name of the report’s metadata, when sent| +|||by the VEN’s oadrRegisterReport request.| +|report\_specifier\_id|"telemetry"|Uniquely identifies the report’s data set.| +|report\_interval\_secs\_default|"300"|How often to send a reporting update to the VTN.| +|telemetry\_parameters (baseline\_power\_kw): r\_id|"baseline\_power"|(baseline\_power) Unique ID of the metric.| +|telemetry\_parameters (baseline\_power\_kw): report\_type|"baseline"|(baseline\_power) The type of metric being reported.| +|telemetry\_parameters (baseline\_power\_kw): reading\_type|"Direct Read"|(baseline\_power) How the metric was calculated.| +|telemetry\_parameters (baseline\_power\_kw): units|"powerReal"|(baseline\_power) The reading's data type.| +|telemetry\_parameters (baseline\_power\_kw): method\_name|"get\_baseline\_power"|(baseline\_power) The VEN method to use when| +|||extracting the data for reporting.| +|telemetry\_parameters (baseline\_power\_kw): min\_frequency|30|(baseline\_power) The metric’s minimum sampling| +|||frequency.| +|telemetry\_parameters (baseline\_power\_kw): max\_frequency|60|(baseline\_power) The metric’s maximum sampling| +|||frequency.| +|telemetry\_parameters (current\_power\_kw): r\_id|"actual\_power"|(current\_power) Unique ID of the metric.| +|telemetry\_parameters (current\_power\_kw): report\_type|"reading"|(current\_power) The type of metric being reported.| +|telemetry\_parameters (current\_power\_kw): reading\_type|"Direct Read"|(current\_power) How the metric was calculated.| +|telemetry\_parameters (current\_power\_kw): units|"powerReal"|(baseline\_power) The reading's data type.| +|telemetry\_parameters (current\_power\_kw): method\_name|"get\_current\_power"|(current\_power) The VEN method to use when| +|||extracting the data for reporting.| +|telemetry\_parameters (current\_power\_kw): min\_frequency|30|(current\_power) The metric’s minimum sampling| +|||frequency.| +|telemetry\_parameters (current\_power\_kw): max\_frequency|60|(current\_power) The metric’s maximum sampling| +|||frequency.| + +|"telemetry\_status" report: parameters|Example|Description| +|--------------------------------------|-------|-----------| +|report\_name|"TELEMETRY\_STATUS"|Friendly name of the report.| +|report\_name\_metadata|"METADATA\_TELEMETRY\_STATUS"|Friendly name of the report’s metadata, when sent| +|||by the VEN’s oadrRegisterReport request.| +|report\_specifier\_id|"telemetry\_status"|Uniquely identifies the report’s data set.| +|report\_interval\_secs\_default|"300"|How often to send a reporting update to the VTN.| +|telemetry\_parameters (Status): r\_id|"Status"|Unique ID of the metric.| +|telemetry\_parameters (Status): report\_type|"x-resourceStatus"|The type of metric being reported.| +|telemetry\_parameters (Status): reading\_type|"x-notApplicable"|How the metric was calculated.| +|telemetry\_parameters (Status): units|""|The reading's data type.| +|telemetry\_parameters (Status): method\_name|""|The VEN method to use when extracting the data| +|||for reporting.| +|telemetry\_parameters (Status): min\_frequency|60|The metric’s minimum sampling frequency.| +|telemetry\_parameters (Status): max\_frequency|120|The metric’s maximum sampling frequency.| + + + + diff --git a/services/core/OpenADRVenAgent/openadrven.config b/services/core/OpenADRVenAgent/config similarity index 100% rename from services/core/OpenADRVenAgent/openadrven.config rename to services/core/OpenADRVenAgent/config diff --git a/services/core/OpenADRVenAgent/install-ven-agent.sh b/services/core/OpenADRVenAgent/install-ven-agent.sh index c555d66c5e..bae585cdf7 100644 --- a/services/core/OpenADRVenAgent/install-ven-agent.sh +++ b/services/core/OpenADRVenAgent/install-ven-agent.sh @@ -3,6 +3,6 @@ export VIP_SOCKET="ipc://$VOLTTRON_HOME/run/vip.socket" python scripts/install-agent.py \ -s $VOLTTRON_ROOT/services/core/OpenADRVenAgent \ -i venagent \ - -c $VOLTTRON_ROOT/services/core/OpenADRVenAgent/openadrven.config \ + -c $VOLTTRON_ROOT/services/core/OpenADRVenAgent/config \ -t venagent \ - -f \ No newline at end of file + -f diff --git a/services/core/OpenADRVenAgent/openadrven/agent.py b/services/core/OpenADRVenAgent/openadrven/agent.py index c86a918f6f..84365818e7 100644 --- a/services/core/OpenADRVenAgent/openadrven/agent.py +++ b/services/core/OpenADRVenAgent/openadrven/agent.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- {{{ # vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et: # -# Copyright 2019, Battelle Memorial Institute. +# Copyright 2020, Battelle Memorial Institute. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -272,7 +272,7 @@ def initialize_config(self, config): """ Initialize the agent's configuration. - Configuration parameters (see openadrven.config for a sample config file): + Configuration parameters (see config for a sample config file): db_path: Pathname of the agent's sqlite database. ~ and shell variables will be expanded if present. diff --git a/services/core/OpenADRVenAgent/openadrven/models.py b/services/core/OpenADRVenAgent/openadrven/models.py index bab70d634e..30a3bb4963 100644 --- a/services/core/OpenADRVenAgent/openadrven/models.py +++ b/services/core/OpenADRVenAgent/openadrven/models.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- {{{ # vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et: # -# Copyright 2019, Battelle Memorial Institute. +# Copyright 2020, Battelle Memorial Institute. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/services/core/OpenADRVenAgent/openadrven/oadr_builder.py b/services/core/OpenADRVenAgent/openadrven/oadr_builder.py index 42d29a8809..435f75560a 100644 --- a/services/core/OpenADRVenAgent/openadrven/oadr_builder.py +++ b/services/core/OpenADRVenAgent/openadrven/oadr_builder.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- {{{ # vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et: # -# Copyright 2019, Battelle Memorial Institute. +# Copyright 2020, Battelle Memorial Institute. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/services/core/OpenADRVenAgent/openadrven/oadr_extractor.py b/services/core/OpenADRVenAgent/openadrven/oadr_extractor.py index 14b848496c..7accce57c2 100644 --- a/services/core/OpenADRVenAgent/openadrven/oadr_extractor.py +++ b/services/core/OpenADRVenAgent/openadrven/oadr_extractor.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- {{{ # vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et: # -# Copyright 2019, Battelle Memorial Institute. +# Copyright 2020, Battelle Memorial Institute. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/services/core/OpenADRVenAgent/setup.py b/services/core/OpenADRVenAgent/setup.py index eaf3948ea7..cc64769bff 100644 --- a/services/core/OpenADRVenAgent/setup.py +++ b/services/core/OpenADRVenAgent/setup.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- {{{ # vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et: # -# Copyright 2019, Battelle Memorial Institute. +# Copyright 2020, Battelle Memorial Institute. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/services/core/OpenADRVenAgent/test/ControlAgentSim/controlagentsim/agent.py b/services/core/OpenADRVenAgent/test/ControlAgentSim/controlagentsim/agent.py index 36a1ab431f..a73da34166 100644 --- a/services/core/OpenADRVenAgent/test/ControlAgentSim/controlagentsim/agent.py +++ b/services/core/OpenADRVenAgent/test/ControlAgentSim/controlagentsim/agent.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- {{{ # vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et: # -# Copyright 2019, Battelle Memorial Institute. +# Copyright 2020, Battelle Memorial Institute. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/services/core/OpenADRVenAgent/test/crypto_experiment.py b/services/core/OpenADRVenAgent/test/crypto_experiment.py index 485e4fe694..aa15cd0285 100644 --- a/services/core/OpenADRVenAgent/test/crypto_experiment.py +++ b/services/core/OpenADRVenAgent/test/crypto_experiment.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- {{{ # vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et: # -# Copyright 2019, Battelle Memorial Institute. +# Copyright 2020, Battelle Memorial Institute. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/services/core/OpenADRVenAgent/test/test_ven.py b/services/core/OpenADRVenAgent/test/test_ven.py index 8a80ccdc92..2069e19c32 100644 --- a/services/core/OpenADRVenAgent/test/test_ven.py +++ b/services/core/OpenADRVenAgent/test/test_ven.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- {{{ # vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et: # -# Copyright 2019, Battelle Memorial Institute. +# Copyright 2020, Battelle Memorial Institute. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/services/core/OpenEISHistorian/README.md b/services/core/OpenEISHistorian/README.md new file mode 100644 index 0000000000..573b91498f --- /dev/null +++ b/services/core/OpenEISHistorian/README.md @@ -0,0 +1,75 @@ +OpenEIS historian +================= + +The following is an example configuration for the OpenEIS Historian. + + { + # The agent id is used for display in volttron central. + "agentid": "openeishistorian", + # The vip identity to use with this historian. + # should not be a platform.historian! + # + # Default value is un referenced because it listens specifically to the bus. + #"identity": "openeis.historian", + + # Require connection section for all historians. The openeis historian + # requires a url for the openis server and login credentials for publishing + # to the correct user's dataset. + "connection": { + "type": "openeis", + "params": { + # The server that is running openeis + # the rest path for the dataset is dataset/append/{id} + # and will be populated from the topic_dataset list below. + "uri": "http://localhost:8000", + + # Openeis requires a username/password combination in order to + # login to the site via rest or the ui. + # + "login": "volttron", + "password": "volttron" + } + }, + + # All datasets that are going to be recorded by this historian need to be + # defined here. + # + # A dataset definition consists of the following parts + # "ds1": { + # + # The dataset id that was created in openeis. + # "dataset_id": 1, + # + # Setting to 1 allows only the caching of data that actually meets + # the mapped point criteria for this dataset. + # Defaults to 0 + # "ignore_unmapped_points": 0, + # + # An ordered list of points that are to be posted to openeis. The + # points must contain a key specifying the incoming topic with the + # value an openeis schema point: + # [ + # {"rtu4/OutsideAirTemp": "campus1/building1/rtu4/OutdoorAirTemperature"} + # ] + # }, + "dataset_definitions": { + "ds1": { + "dataset_id": 1, + "ignore_unmapped_points": 0, + "points": [ + {"campus1/building1/OutsideAirTemp": "campus1/building1/OutdoorAirTemperature"}, + {"campus1/building1/HVACStatus": "campus1/building1/HVACStatus"}, + {"campus1/building1/CompressorStatus": "campus1/building1/LightingStatus"} + ] + } + #, + #"ds2": { + # "id": 2, + # "points": [ + # "rtu4/OutsideAirTemp", + # "rtu4/MixedAirTemp" + # ] + # } + } + } + diff --git a/services/core/OpenEISHistorian/config b/services/core/OpenEISHistorian/config new file mode 100644 index 0000000000..4d61968ddf --- /dev/null +++ b/services/core/OpenEISHistorian/config @@ -0,0 +1,21 @@ +{ + "connection": { + "type": "openeis", + "params": { + "uri": "http://localhost:8000", + "login": "volttron", + "password": "volttron" + } + }, + "dataset_definitions": { + "ds1": { + "dataset_id": 1, + "ignore_unmapped_points": 0, + "points": [ + {"campus1/building1/OutsideAirTemp": "campus1/building1/OutdoorAirTemperature"}, + {"campus1/building1/HVACStatus": "campus1/building1/HVACStatus"}, + {"campus1/building1/CompressorStatus": "campus1/building1/LightingStatus"} + ] + } + } +} diff --git a/services/core/OpenEISHistorian/openeis.historian.config b/services/core/OpenEISHistorian/openeis.historian.config deleted file mode 100644 index 2afdccf5c4..0000000000 --- a/services/core/OpenEISHistorian/openeis.historian.config +++ /dev/null @@ -1,69 +0,0 @@ -{ - # The agent id is used for display in volttron central. - "agentid": "openeishistorian", - # The vip identity to use with this historian. - # should not be a platform.historian! - # - # Default value is un referenced because it listens specifically to the bus. - #"identity": "openeis.historian", - - # Require connection section for all historians. The openeis historian - # requires a url for the openis server and login credentials for publishing - # to the correct user's dataset. - "connection": { - "type": "openeis", - "params": { - # The server that is running openeis - # the rest path for the dataset is dataset/append/{id} - # and will be populated from the topic_dataset list below. - "uri": "http://localhost:8000", - - # Openeis requires a username/password combination in order to - # login to the site via rest or the ui. - # - "login": "volttron", - "password": "volttron" - } - }, - - # All datasets that are going to be recorded by this historian need to be - # defined here. - # - # A dataset definition consists of the following parts - # "ds1": { - # - # The dataset id that was created in openeis. - # "dataset_id": 1, - # - # Setting to 1 allows only the caching of data that actually meets - # the mapped point criteria for this dataset. - # Defaults to 0 - # "ignore_unmapped_points": 0, - # - # An ordered list of points that are to be posted to openeis. The - # points must contain a key specifying the incoming topic with the - # value an openeis schema point: - # [ - # {"rtu4/OutsideAirTemp": "campus1/building1/rtu4/OutdoorAirTemperature"} - # ] - # }, - "dataset_definitions": { - "ds1": { - "dataset_id": 1, - "ignore_unmapped_points": 0, - "points": [ - {"campus1/building1/OutsideAirTemp": "campus1/building1/OutdoorAirTemperature"}, - {"campus1/building1/HVACStatus": "campus1/building1/HVACStatus"}, - {"campus1/building1/CompressorStatus": "campus1/building1/LightingStatus"} - ] - } -#, -#"ds2": { -# "id": 2, -# "points": [ -# "rtu4/OutsideAirTemp", -# "rtu4/MixedAirTemp" -# ] -# } - } -} diff --git a/services/core/OpenEISHistorian/openeis/historian.py b/services/core/OpenEISHistorian/openeis/historian.py index 714c708d16..585183d760 100644 --- a/services/core/OpenEISHistorian/openeis/historian.py +++ b/services/core/OpenEISHistorian/openeis/historian.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- {{{ # vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et: # -# Copyright 2019, Battelle Memorial Institute. +# Copyright 2020, Battelle Memorial Institute. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -40,12 +40,11 @@ import datetime import logging import sys - import requests from requests import ConnectionError + from volttron.utils.docs import doc_inherit from volttron.platform import jsonapi - from volttron.platform.agent.base_historian import BaseHistorian from volttron.platform.agent import utils @@ -53,10 +52,11 @@ _log = logging.getLogger(__name__) __version__ = '3.1' + def historian(config_path, **kwargs): config = utils.load_config(config_path) - connection = config.get('connection'); + connection = config.get('connection') assert connection assert connection.get('type') == 'openeis' @@ -80,7 +80,7 @@ def historian(config_path, **kwargs): assert len(datasets) > 0 headers = {'content-type': 'application/json'} - + class OpenEISHistorian(BaseHistorian): '''An OpenEIS historian which allows the publishing of dynamic. @@ -107,17 +107,15 @@ class OpenEISHistorian(BaseHistorian): @doc_inherit def publish_to_historian(self, to_publish_list): - _log.debug("publish_to_historian number of items: {}" - .format(len(to_publish_list))) + _log.debug("publish_to_historian number of items: {}".format(len(to_publish_list))) - #pprint(to_publish_list) + # print(to_publish_list) dataset_uri = uri + "/api/datasets/append" - # Build a paylooad for each of the points in each of the dataset - # definitions. + # Build a payload for each of the points in each of the dataset definitions. for dsk, dsv in datasets.items(): ds_id = dsv["dataset_id"] - ds_points = dsv['points'] #[unicode(p) for p in dsv['points']] + ds_points = dsv['points'] # [unicode(p) for p in dsv['points']] ignore_unmapped = dsv.get('ignore_unmapped_points', 0) point_map = {} @@ -131,34 +129,28 @@ def publish_to_historian(self, to_publish_list): if not openeis_sensor in point_map: point_map[openeis_sensor] = [] - point_map[openeis_sensor].append([to_pub['timestamp'], - to_pub['value']]) + point_map[openeis_sensor].append([to_pub['timestamp'], to_pub['value']]) else: if ignore_unmapped: self.report_handled(to_pub) else: - err = 'Point {topic} was not found in point map.' \ - .format(**to_pub) + err = 'Point {topic} was not found in point map.'.format(**to_pub) _log.error(err) - #pprint(point_map) + # pprint(point_map) if len(point_map) > 0: - payload = { 'dataset_id': ds_id, + payload = {'dataset_id': ds_id, 'point_map': point_map} - payload = jsonapi.dumps(payload, - default=datetime.datetime.isoformat) + payload = jsonapi.dumps(payload, default=datetime.datetime.isoformat) try: - #resp = requests.post(login_uri, auth=auth) - resp = requests.put(dataset_uri, verify=False, headers=headers, - data=payload) + # resp = requests.post(login_uri, auth=auth) + resp = requests.put(dataset_uri, verify=False, headers=headers, data=payload) if resp.status_code == requests.codes.ok: self.report_handled(try_publish) except ConnectionError: _log.error('Unable to connect to openeis at {}'.format(uri)) return - - ''' Transform the to_publish_list into a dictionary like the following @@ -182,7 +174,9 @@ def historian_setup(self): def main(argv=sys.argv): - '''Main method called by the eggsecutable.''' + """ + Main method called by the eggsecutable. + """ try: utils.vip_main(historian, version=__version__) except Exception as e: diff --git a/services/core/OpenEISHistorian/openeis/settings.py b/services/core/OpenEISHistorian/openeis/settings.py deleted file mode 100644 index 7c9d04d4ed..0000000000 --- a/services/core/OpenEISHistorian/openeis/settings.py +++ /dev/null @@ -1 +0,0 @@ -SCHEDULE_PERIOD = 180 diff --git a/services/core/OpenEISHistorian/setup.py b/services/core/OpenEISHistorian/setup.py index d0b881ddb9..98020d3341 100644 --- a/services/core/OpenEISHistorian/setup.py +++ b/services/core/OpenEISHistorian/setup.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- {{{ # vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et: # -# Copyright 2019, Battelle Memorial Institute. +# Copyright 2020, Battelle Memorial Institute. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/services/core/MasterDriverAgent/IDENTITY b/services/core/PlatformDriverAgent/IDENTITY similarity index 100% rename from services/core/MasterDriverAgent/IDENTITY rename to services/core/PlatformDriverAgent/IDENTITY diff --git a/services/core/PlatformDriverAgent/README.md b/services/core/PlatformDriverAgent/README.md new file mode 100644 index 0000000000..af075f6cfb --- /dev/null +++ b/services/core/PlatformDriverAgent/README.md @@ -0,0 +1,75 @@ +# Platform Driver Agent + +The Platform Driver agent is a special purpose agent a user can install on the platform to manage communication of the +platform with devices. The Platform driver features a number of endpoints for collecting data and sending control signals +using the message bus and automatically publishes data to the bus on a specified interval. + +## Dependencies + +VOLTTRON drivers operated by the platform driver may have additional requirements for installation. Required libraries: +1. BACnet driver - bacpypes +2. Modbus driver - pymodbus +3. Modbus_TK driver - modbus-tk +4. DNP3 and IEEE 2030.5 drivers - pydnp3 + +The easiest way to install the requirements for drivers included in the VOLTTRON repository is to use bootstrap.py +``` +python3 bootstrap.py --drivers +``` + +## Configuration + +### Agent Configuration + +The Platform Driver Agent configuration consists of general settings for all devices. The default values of the +Platform Driver should be sufficient for most users. The user may optionally change the interval between device scrapes +with the driver_scrape_interval. + +The following example sets the driver_scrape_interval to 0.05 seconds or 20 devices per second: +``` +{ + "driver_scrape_interval": 0.05, + "publish_breadth_first_all": false, + "publish_depth_first": false, + "publish_breadth_first": false, + "publish_depth_first_all": true, + "group_offset_interval": 0.0 +} +``` + +1. driver_scrape_interval - Sets the interval between devices scrapes. Defaults to 0.02 or 50 devices per second. +Useful for when the platform scrapes too many devices at once resulting in failed scrapes. +2. group_offset_interval - Sets the interval between when groups of devices are scraped. Has no effect if all devices +are in the same group. +In order to improve the scalability of the platform unneeded device state publishes for all devices can be turned off. +All of the following setting are optional and default to True. +3. publish_depth_first_all - Enable “depth first” publish of all points to a single topic for all devices. +4. publish_breadth_first_all - Enable “breadth first” publish of all points to a single topic for all devices. +5. publish_depth_first - Enable “depth first” device state publishes for each register on the device for all devices. +6. publish_breadth_first - Enable “breadth first” device state publishes for each register on the device for all devices. + +### Driver Configuration +Each device configuration has the following form: +``` +{ + "driver_config": {"device_address": "10.1.1.5", + "device_id": 500}, + "driver_type": "bacnet", + "registry_config":"config://registry_configs/vav.csv", + "interval": 60, + "heart_beat_point": "heartbeat", + "group": 0 +} +``` +The following settings are required for all device configurations: +1. driver_config - Driver specific setting go here. See below for driver specific settings. +2. driver_type - Type of driver to use for this device: bacnet, modbus, fake, etc. +3. registry_config - Reference to a configuration file in the configuration store for registers on the device. + +These settings are optional: + +1. interval - Period which to scrape the device and publish the results in seconds. Defaults to 60 seconds. +2. heart_beat_point - A Point which to toggle to indicate a heartbeat to the device. A point with this +Volttron Point Name must exist in the registry. If this setting is missing the driver will not send a heart beat signal +to the device. Heart beats are triggered by the Actuator Agent which must be running to use this feature. +3. group - Group this device belongs to. Defaults to 0 diff --git a/services/core/MasterDriverAgent/master-driver.agent b/services/core/PlatformDriverAgent/config similarity index 100% rename from services/core/MasterDriverAgent/master-driver.agent rename to services/core/PlatformDriverAgent/config diff --git a/services/core/MasterDriverAgent/conftest.py b/services/core/PlatformDriverAgent/conftest.py similarity index 100% rename from services/core/MasterDriverAgent/conftest.py rename to services/core/PlatformDriverAgent/conftest.py diff --git a/services/core/PlatformDriverAgent/platform-driver.agent b/services/core/PlatformDriverAgent/platform-driver.agent new file mode 100644 index 0000000000..d2b03d38c7 --- /dev/null +++ b/services/core/PlatformDriverAgent/platform-driver.agent @@ -0,0 +1,6 @@ +{ + "driver_scrape_interval": 0.05, + "publish_breadth_first_all": false, + "publish_depth_first": false, + "publish_breadth_first": false +} diff --git a/volttrontesting/fixtures/__init__.py b/services/core/PlatformDriverAgent/platform_driver/__init__.py similarity index 100% rename from volttrontesting/fixtures/__init__.py rename to services/core/PlatformDriverAgent/platform_driver/__init__.py diff --git a/services/core/MasterDriverAgent/master_driver/agent.py b/services/core/PlatformDriverAgent/platform_driver/agent.py similarity index 96% rename from services/core/MasterDriverAgent/master_driver/agent.py rename to services/core/PlatformDriverAgent/platform_driver/agent.py index 8c5f8b6ea1..7a08f93298 100644 --- a/services/core/MasterDriverAgent/master_driver/agent.py +++ b/services/core/PlatformDriverAgent/platform_driver/agent.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- {{{ # vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et: # -# Copyright 2019, Battelle Memorial Institute. +# Copyright 2020, Battelle Memorial Institute. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -63,7 +63,7 @@ class OverrideError(DriverInterfaceError): pass -def master_driver_agent(config_path, **kwargs): +def platform_driver_agent(config_path, **kwargs): config = utils.load_config(config_path) @@ -106,8 +106,8 @@ def get_config(name, default=None): driver_scrape_interval = get_config('driver_scrape_interval', 0.02) if config.get("driver_config_list") is not None: - _log.warning("Master driver configured with old setting. This is no longer supported.") - _log.warning('Use the script "scripts/update_master_driver_config.py" to convert the configuration.') + _log.warning("Platform driver configured with old setting. This is no longer supported.") + _log.warning('Use the script "scripts/update_platform_driver_config.py" to convert the configuration.') publish_depth_first_all = bool(get_config("publish_depth_first_all", True)) publish_breadth_first_all = bool(get_config("publish_breadth_first_all", False)) @@ -116,7 +116,7 @@ def get_config(name, default=None): group_offset_interval = get_config("group_offset_interval", 0.0) - return MasterDriverAgent(driver_config_list, scalability_test, + return PlatformDriverAgent(driver_config_list, scalability_test, scalability_test_iterations, driver_scrape_interval, group_offset_interval, @@ -130,7 +130,7 @@ def get_config(name, default=None): heartbeat_autostart=True, **kwargs) -class MasterDriverAgent(Agent): +class PlatformDriverAgent(Agent): def __init__(self, driver_config_list, scalability_test = False, scalability_test_iterations=3, driver_scrape_interval=0.02, @@ -143,7 +143,7 @@ def __init__(self, driver_config_list, scalability_test = False, publish_depth_first=False, publish_breadth_first=False, **kwargs): - super(MasterDriverAgent, self).__init__(**kwargs) + super(PlatformDriverAgent, self).__init__(**kwargs) self.instances = {} self.scalability_test = scalability_test self.scalability_test_iterations = scalability_test_iterations @@ -235,22 +235,22 @@ def configure_main(self, config_name, action, contents): except ValueError as e: _log.error("ERROR PROCESSING STARTUP CRITICAL CONFIGURATION SETTINGS: {}".format(e)) - _log.error("MASTER DRIVER SHUTTING DOWN") + _log.error("Platform driver SHUTTING DOWN") sys.exit(1) else: if self.max_open_sockets != config["max_open_sockets"]: - _log.info("The master driver must be restarted for changes to the max_open_sockets setting to take " + _log.info("The platform driver must be restarted for changes to the max_open_sockets setting to take " "effect") if self.max_concurrent_publishes != config["max_concurrent_publishes"]: - _log.info("The master driver must be restarted for changes to the max_concurrent_publishes setting to " + _log.info("The platform driver must be restarted for changes to the max_concurrent_publishes setting to " "take effect") if self.scalability_test != bool(config["scalability_test"]): if not self.scalability_test: _log.info( - "The master driver must be restarted with scalability_test set to true in order to run a test.") + "The platform driver must be restarted with scalability_test set to true in order to run a test.") if self.scalability_test: _log.info("A scalability test may not be interrupted. Restarting the driver is required to stop " "the test.") @@ -293,14 +293,14 @@ def configure_main(self, config_name, action, contents): driver_scrape_interval = float(config["driver_scrape_interval"]) except ValueError as e: _log.error("ERROR PROCESSING CONFIGURATION: {}".format(e)) - _log.error("Master driver scrape interval settings unchanged") + _log.error("Platform driver scrape interval settings unchanged") # TODO: set a health status for the agent try: group_offset_interval = float(config["group_offset_interval"]) except ValueError as e: _log.error("ERROR PROCESSING CONFIGURATION: {}".format(e)) - _log.error("Master driver group interval settings unchanged") + _log.error("Platform driver group interval settings unchanged") # TODO: set a health status for the agent if self.scalability_test and action == "UPDATE": @@ -778,7 +778,7 @@ def forward_bacnet_cov_value(self, source_address, point_name, point_values): def main(argv=sys.argv): """Main method called to start the agent.""" - utils.vip_main(master_driver_agent, identity=PLATFORM_DRIVER, + utils.vip_main(platform_driver_agent, identity=PLATFORM_DRIVER, version=__version__) diff --git a/services/core/MasterDriverAgent/master_driver/driver.py b/services/core/PlatformDriverAgent/platform_driver/driver.py similarity index 98% rename from services/core/MasterDriverAgent/master_driver/driver.py rename to services/core/PlatformDriverAgent/platform_driver/driver.py index 3253ebd50e..6f9c479a90 100644 --- a/services/core/MasterDriverAgent/master_driver/driver.py +++ b/services/core/PlatformDriverAgent/platform_driver/driver.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- {{{ # vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et: # -# Copyright 2019, Battelle Memorial Institute. +# Copyright 2020, Battelle Memorial Institute. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -149,7 +149,7 @@ def find_starting_datetime(self, now): def get_interface(self, driver_type, config_dict, config_string): """Returns an instance of the interface""" - module_name = "master_driver.interfaces." + driver_type + module_name = "platform_driver.interfaces." + driver_type module = __import__(module_name,globals(),locals(),[], 0) interfaces = module.interfaces sub_module = getattr(interfaces, driver_type) @@ -364,7 +364,7 @@ def revert_all(self, **kwargs): def publish_cov_value(self, point_name, point_values): """ - Called in the master driver agent to publish a cov from a point + Called in the platform driver agent to publish a cov from a point :param point_name: point which sent COV notifications :param point_values: COV point values """ diff --git a/services/core/MasterDriverAgent/master_driver/driver_exceptions.py b/services/core/PlatformDriverAgent/platform_driver/driver_exceptions.py similarity index 97% rename from services/core/MasterDriverAgent/master_driver/driver_exceptions.py rename to services/core/PlatformDriverAgent/platform_driver/driver_exceptions.py index ca231f8632..5a20709dd5 100644 --- a/services/core/MasterDriverAgent/master_driver/driver_exceptions.py +++ b/services/core/PlatformDriverAgent/platform_driver/driver_exceptions.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- {{{ # vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et: # -# Copyright 2019, Battelle Memorial Institute. +# Copyright 2020, Battelle Memorial Institute. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/services/core/MasterDriverAgent/master_driver/driver_locks.py b/services/core/PlatformDriverAgent/platform_driver/driver_locks.py similarity index 98% rename from services/core/MasterDriverAgent/master_driver/driver_locks.py rename to services/core/PlatformDriverAgent/platform_driver/driver_locks.py index 310d66cba1..5ef6340171 100644 --- a/services/core/MasterDriverAgent/master_driver/driver_locks.py +++ b/services/core/PlatformDriverAgent/platform_driver/driver_locks.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- {{{ # vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et: # -# Copyright 2019, Battelle Memorial Institute. +# Copyright 2020, Battelle Memorial Institute. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -82,4 +82,3 @@ def publish_lock(): yield finally: _publish_lock.release() - \ No newline at end of file diff --git a/services/core/MasterDriverAgent/master_driver/interfaces/IEEE2030_5.py b/services/core/PlatformDriverAgent/platform_driver/interfaces/IEEE2030_5.py similarity index 97% rename from services/core/MasterDriverAgent/master_driver/interfaces/IEEE2030_5.py rename to services/core/PlatformDriverAgent/platform_driver/interfaces/IEEE2030_5.py index 804e05041a..3c24056045 100644 --- a/services/core/MasterDriverAgent/master_driver/interfaces/IEEE2030_5.py +++ b/services/core/PlatformDriverAgent/platform_driver/interfaces/IEEE2030_5.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- {{{ # vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et: # -# Copyright 2019, Battelle Memorial Institute. +# Copyright 2020, Battelle Memorial Institute. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -39,7 +39,7 @@ from datetime import datetime, timedelta import logging -from master_driver.interfaces import BaseInterface, BaseRegister, BasicRevert +from platform_driver.interfaces import BaseInterface, BaseRegister, BasicRevert _log = logging.getLogger(__name__) type_mapping = {"string": str, @@ -119,11 +119,11 @@ class Interface(BasicRevert, BaseInterface): Test drivers for the IEEE 2030.5 interface can be configured as follows: cd $VOLTTRON_ROOT - export DRIVER_ROOT=$VOLTTRON_ROOT/services/core/MasterDriverAgent/master_driver + export DRIVER_ROOT=$VOLTTRON_ROOT/services/core/PlatformDriverAgent/platform_driver volttron-ctl config store platform.driver IEEE2030_5.csv $DRIVER_ROOT/IEEE2030_5.csv --csv volttron-ctl config store platform.driver devices/IEEE2030_5_1 $DRIVER_ROOT/test_IEEE2030_5_1.config volttron-ctl config store platform.driver devices/IEEE2030_5_2 $DRIVER_ROOT/test_IEEE2030_5_2.config - echo IEEE2030_5 drivers configured for MasterDriver: + echo IEEE2030_5 drivers configured for PlatformDriver: volttron-ctl config list platform.driver """ diff --git a/services/core/MasterDriverAgent/master_driver/interfaces/__init__.py b/services/core/PlatformDriverAgent/platform_driver/interfaces/__init__.py similarity index 95% rename from services/core/MasterDriverAgent/master_driver/interfaces/__init__.py rename to services/core/PlatformDriverAgent/platform_driver/interfaces/__init__.py index 1a008e2067..12c0181a1f 100644 --- a/services/core/MasterDriverAgent/master_driver/interfaces/__init__.py +++ b/services/core/PlatformDriverAgent/platform_driver/interfaces/__init__.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- {{{ # vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et: # -# Copyright 2019, Battelle Memorial Institute. +# Copyright 2020, Battelle Memorial Institute. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -45,7 +45,7 @@ While it is possible to create an Agent which handles communication with a new device it will miss out on the benefits of creating a proper interface for the -Master Driver Agent. +Platform Driver Agent. Creating an Interface for a device allows users of the device to automatically benefit from the following platform features: @@ -65,9 +65,9 @@ ------------------------ To create a new device driver create a new module in the -:py:mod:`MasterDriverAgent.master_driver.interfaces` package. The name of +:py:mod:`PlatformDriverAgent.platform_driver.interfaces` package. The name of this module will be the name to use in the "driver_type" setting in -a :ref:`driver configuration file ` in order to +a :ref:`driver configuration file ` in order to load the new driver. In the new module create a subclass of :py:class:`BaseInterface` called `Interface`. @@ -93,13 +93,13 @@ Interface Configuration and Startup ----------------------------------- -When processing a :ref:`driver configuration file ` -the Master Driver Agent will use the "driver_type" setting to automatically find and load the +When processing a :ref:`driver configuration file ` +the Platform Driver Agent will use the "driver_type" setting to automatically find and load the appropriate ``Interface`` class for the desired driver. -After loading the class the Master Driver Agent will call :py:meth:`BaseInterface.configure` +After loading the class the Platform Driver Agent will call :py:meth:`BaseInterface.configure` with the contents of the "driver_config" section of the -:ref:`driver configuration file ` +:ref:`driver configuration file ` parsed into a python dictionary and the contents of the file referenced in "registry_config" entry. @@ -107,14 +107,14 @@ on a device by creating instances of :py:class:`BaseRegister` (or a subclass) and adding them to the Interface with :py:meth:`BaseInterface.insert_register`. -After calling :py:meth:`BaseInterface.configure` the Master Driver Agent +After calling :py:meth:`BaseInterface.configure` the Platform Driver Agent will use the created registers to create meta data for each point on the device. Device Scraping --------------- The work scheduling and publish periodic device scrapes is handled by -the Master Driver Agent. When a scrape starts the Master Driver Agent calls the +the Platform Driver Agent. When a scrape starts the Platform Driver Agent calls the :py:meth:`BaseInterface.scrape_all`. It will take the results of the call and attach meta data and and publish as needed. @@ -122,11 +122,11 @@ ------------------ Requests to interact with the device via any method supported by the platform -are routed to the correct Interface instance by the Master Driver Agent. +are routed to the correct Interface instance by the Platform Driver Agent. Most commands originate from RPC calls to the :py:class:`Actuator Agent` and are forwarded -to the Master Driver Agent. +to the Platform Driver Agent. - A command to set the value of a point on a device results in a call to :py:meth:`BaseInterface.set_point`. @@ -144,7 +144,7 @@ Registers --------- -The Master Driver Agent uses the :py:meth:`BaseInterface.get_register_names` and +The Platform Driver Agent uses the :py:meth:`BaseInterface.get_register_names` and :py:meth:`BaseInterface.get_register_by_name` methods to get registers to setup meta data. This means that its a requirement to use the BaseRegister class to store @@ -199,7 +199,7 @@ class BaseRegister(object): :type units: str :type description: str - The Master Driver Agent will use :py:meth:`BaseRegister.get_units` to populate metadata for + The Platform Driver Agent will use :py:meth:`BaseRegister.get_units` to populate metadata for publishing. When instantiating register instances be sure to provide a useful string for the units argument. """ @@ -246,7 +246,7 @@ class BaseInterface(object, metaclass=abc.ABCMeta): All interfaces *must* subclass this. - :param vip: A reference to the MasterDriverAgent vip subsystem. + :param vip: A reference to the PlatformDriverAgent vip subsystem. :param core: A reference to the parent driver agent's core subsystem. """ @@ -373,7 +373,7 @@ def set_point(self, point_name, value, **kwargs): @abc.abstractmethod def scrape_all(self): """ - Method the Master Driver Agent calls to get the current state + Method the Platform Driver Agent calls to get the current state of a device for publication. :return: Point names to values for device. @@ -635,7 +635,7 @@ def _set_point(self, point_name, value): @abc.abstractmethod def _scrape_all(self): """ - Method the Master Driver Agent calls to get the current state + Method the Platform Driver Agent calls to get the current state of a device for publication. If using this mixin you must override this method diff --git a/services/core/MasterDriverAgent/master_driver/interfaces/bacnet.py b/services/core/PlatformDriverAgent/platform_driver/interfaces/bacnet.py similarity index 98% rename from services/core/MasterDriverAgent/master_driver/interfaces/bacnet.py rename to services/core/PlatformDriverAgent/platform_driver/interfaces/bacnet.py index 08e5148a78..d1cde02bbf 100644 --- a/services/core/MasterDriverAgent/master_driver/interfaces/bacnet.py +++ b/services/core/PlatformDriverAgent/platform_driver/interfaces/bacnet.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- {{{ # vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et: # -# Copyright 2019, Battelle Memorial Institute. +# Copyright 2020, Battelle Memorial Institute. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -40,8 +40,8 @@ import logging from datetime import datetime, timedelta -from master_driver.driver_exceptions import DriverConfigError -from master_driver.interfaces import BaseInterface, BaseRegister +from platform_driver.driver_exceptions import DriverConfigError +from platform_driver.interfaces import BaseInterface, BaseRegister from volttron.platform.vip.agent import errors from volttron.platform.jsonrpc import RemoteError diff --git a/services/core/MasterDriverAgent/master_driver/interfaces/chargepoint/README.rst b/services/core/PlatformDriverAgent/platform_driver/interfaces/chargepoint/README.rst similarity index 100% rename from services/core/MasterDriverAgent/master_driver/interfaces/chargepoint/README.rst rename to services/core/PlatformDriverAgent/platform_driver/interfaces/chargepoint/README.rst diff --git a/services/core/MasterDriverAgent/master_driver/interfaces/chargepoint/__init__.py b/services/core/PlatformDriverAgent/platform_driver/interfaces/chargepoint/__init__.py similarity index 99% rename from services/core/MasterDriverAgent/master_driver/interfaces/chargepoint/__init__.py rename to services/core/PlatformDriverAgent/platform_driver/interfaces/chargepoint/__init__.py index be5f8a12e2..d9a90e0482 100644 --- a/services/core/MasterDriverAgent/master_driver/interfaces/chargepoint/__init__.py +++ b/services/core/PlatformDriverAgent/platform_driver/interfaces/chargepoint/__init__.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- {{{ # vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et: # -# Copyright 2019, Battelle Memorial Institute. +# Copyright 2020, Battelle Memorial Institute. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -42,10 +42,10 @@ import abc import sys -import service as cps -import async_service as async +from . import service as cps +from . import async_service as async -from master_driver.interfaces import BaseInterface, BaseRegister, BasicRevert, DriverInterfaceError +from .. import BaseInterface, BaseRegister, BasicRevert, DriverInterfaceError from suds.sudsobject import asdict _log = logging.getLogger(__name__) diff --git a/services/core/MasterDriverAgent/master_driver/interfaces/chargepoint/async_service.py b/services/core/PlatformDriverAgent/platform_driver/interfaces/chargepoint/async_service.py similarity index 99% rename from services/core/MasterDriverAgent/master_driver/interfaces/chargepoint/async_service.py rename to services/core/PlatformDriverAgent/platform_driver/interfaces/chargepoint/async_service.py index 8d1a87a3dc..35375f457e 100644 --- a/services/core/MasterDriverAgent/master_driver/interfaces/chargepoint/async_service.py +++ b/services/core/PlatformDriverAgent/platform_driver/interfaces/chargepoint/async_service.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- {{{ # vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et: # -# Copyright 2019, Battelle Memorial Institute. +# Copyright 2020, Battelle Memorial Institute. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -64,7 +64,7 @@ import logging import suds from gevent import monkey -from service import CPAPIException +from .service import CPAPIException from datetime import datetime, timedelta monkey.patch_all() diff --git a/services/core/MasterDriverAgent/master_driver/interfaces/chargepoint/credential_check.py b/services/core/PlatformDriverAgent/platform_driver/interfaces/chargepoint/credential_check.py similarity index 99% rename from services/core/MasterDriverAgent/master_driver/interfaces/chargepoint/credential_check.py rename to services/core/PlatformDriverAgent/platform_driver/interfaces/chargepoint/credential_check.py index 59d3c78ea0..9687d08d63 100644 --- a/services/core/MasterDriverAgent/master_driver/interfaces/chargepoint/credential_check.py +++ b/services/core/PlatformDriverAgent/platform_driver/interfaces/chargepoint/credential_check.py @@ -1,4 +1,4 @@ -import service as cps +from . import service as cps import suds import io diff --git a/services/core/MasterDriverAgent/master_driver/interfaces/chargepoint/requirements.txt b/services/core/PlatformDriverAgent/platform_driver/interfaces/chargepoint/requirements.txt similarity index 100% rename from services/core/MasterDriverAgent/master_driver/interfaces/chargepoint/requirements.txt rename to services/core/PlatformDriverAgent/platform_driver/interfaces/chargepoint/requirements.txt diff --git a/services/core/MasterDriverAgent/master_driver/interfaces/chargepoint/service.py b/services/core/PlatformDriverAgent/platform_driver/interfaces/chargepoint/service.py similarity index 99% rename from services/core/MasterDriverAgent/master_driver/interfaces/chargepoint/service.py rename to services/core/PlatformDriverAgent/platform_driver/interfaces/chargepoint/service.py index b50cfee0df..a53ba5ff30 100644 --- a/services/core/MasterDriverAgent/master_driver/interfaces/chargepoint/service.py +++ b/services/core/PlatformDriverAgent/platform_driver/interfaces/chargepoint/service.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- {{{ # vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et: # -# Copyright 2019, Battelle Memorial Institute. +# Copyright 2020, Battelle Memorial Institute. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/services/core/MasterDriverAgent/master_driver/interfaces/chargepoint/tests/TestAgent/install-test-agent.sh b/services/core/PlatformDriverAgent/platform_driver/interfaces/chargepoint/tests/TestAgent/install-test-agent.sh similarity index 100% rename from services/core/MasterDriverAgent/master_driver/interfaces/chargepoint/tests/TestAgent/install-test-agent.sh rename to services/core/PlatformDriverAgent/platform_driver/interfaces/chargepoint/tests/TestAgent/install-test-agent.sh diff --git a/services/core/MasterDriverAgent/master_driver/interfaces/chargepoint/tests/TestAgent/setup.py b/services/core/PlatformDriverAgent/platform_driver/interfaces/chargepoint/tests/TestAgent/setup.py similarity index 100% rename from services/core/MasterDriverAgent/master_driver/interfaces/chargepoint/tests/TestAgent/setup.py rename to services/core/PlatformDriverAgent/platform_driver/interfaces/chargepoint/tests/TestAgent/setup.py diff --git a/services/core/MasterDriverAgent/master_driver/interfaces/chargepoint/tests/TestAgent/testagent.config b/services/core/PlatformDriverAgent/platform_driver/interfaces/chargepoint/tests/TestAgent/testagent.config similarity index 100% rename from services/core/MasterDriverAgent/master_driver/interfaces/chargepoint/tests/TestAgent/testagent.config rename to services/core/PlatformDriverAgent/platform_driver/interfaces/chargepoint/tests/TestAgent/testagent.config diff --git a/volttrontesting/platform/security/__init__.py b/services/core/PlatformDriverAgent/platform_driver/interfaces/chargepoint/tests/TestAgent/tester/__init__.py similarity index 100% rename from volttrontesting/platform/security/__init__.py rename to services/core/PlatformDriverAgent/platform_driver/interfaces/chargepoint/tests/TestAgent/tester/__init__.py diff --git a/services/core/MasterDriverAgent/master_driver/interfaces/chargepoint/tests/TestAgent/tester/agent.py b/services/core/PlatformDriverAgent/platform_driver/interfaces/chargepoint/tests/TestAgent/tester/agent.py similarity index 99% rename from services/core/MasterDriverAgent/master_driver/interfaces/chargepoint/tests/TestAgent/tester/agent.py rename to services/core/PlatformDriverAgent/platform_driver/interfaces/chargepoint/tests/TestAgent/tester/agent.py index 8c62f22466..f9322e88fd 100644 --- a/services/core/MasterDriverAgent/master_driver/interfaces/chargepoint/tests/TestAgent/tester/agent.py +++ b/services/core/PlatformDriverAgent/platform_driver/interfaces/chargepoint/tests/TestAgent/tester/agent.py @@ -1,7 +1,7 @@ ## -*- coding: utf-8 -*- {{{ # vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et: # -# Copyright 2019, Battelle Memorial Institute. +# Copyright 2020, Battelle Memorial Institute. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/services/core/MasterDriverAgent/master_driver/interfaces/chargepoint/tests/test_chargepoint_driver.py b/services/core/PlatformDriverAgent/platform_driver/interfaces/chargepoint/tests/test_chargepoint_driver.py similarity index 97% rename from services/core/MasterDriverAgent/master_driver/interfaces/chargepoint/tests/test_chargepoint_driver.py rename to services/core/PlatformDriverAgent/platform_driver/interfaces/chargepoint/tests/test_chargepoint_driver.py index a2b1b41ac9..61c7e70cc8 100644 --- a/services/core/MasterDriverAgent/master_driver/interfaces/chargepoint/tests/test_chargepoint_driver.py +++ b/services/core/PlatformDriverAgent/platform_driver/interfaces/chargepoint/tests/test_chargepoint_driver.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- {{{ # vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et: # -# Copyright 2019, Battelle Memorial Institute. +# Copyright 2020, Battelle Memorial Institute. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -153,7 +153,7 @@ @pytest.fixture(scope='module') def agent(request, volttron_instance): md_agent = volttron_instance.build_agent() - # Clean out master driver configurations. + # Clean out platform driver configurations. md_agent.vip.rpc.call('config.store', 'manage_delete_store', 'platform.driver').get(timeout=10) @@ -184,14 +184,14 @@ def agent(request, volttron_instance): REGISTRY_CONFIG_STRING, 'csv').get(timeout=10) - master_uuid = volttron_instance.install_agent(agent_dir=get_services_core("MasterDriverAgent"), + platform_uuid = volttron_instance.install_agent(agent_dir=get_services_core("PlatformDriverAgent"), config_file={}, start=True) - print('agent id: ', master_uuid) + print('agent id: ', platform_uuid) gevent.sleep(10) # wait for the agent to start and start the devices def stop(): - volttron_instance.stop_agent(master_uuid) + volttron_instance.stop_agent(platform_uuid) md_agent.core.stop() request.addfinalizer(stop) diff --git a/services/core/MasterDriverAgent/master_driver/interfaces/dnp3.py b/services/core/PlatformDriverAgent/platform_driver/interfaces/dnp3.py similarity index 99% rename from services/core/MasterDriverAgent/master_driver/interfaces/dnp3.py rename to services/core/PlatformDriverAgent/platform_driver/interfaces/dnp3.py index 3b5ce58504..93e1690465 100644 --- a/services/core/MasterDriverAgent/master_driver/interfaces/dnp3.py +++ b/services/core/PlatformDriverAgent/platform_driver/interfaces/dnp3.py @@ -105,7 +105,7 @@ class Interface(BasicRevert, BaseInterface): Test drivers for the DNP3 interface can be configured as follows: export VOLTTRON_ROOT= - export DRIVER_ROOT=$VOLTTRON_ROOT/services/core/MasterDriverAgent + export DRIVER_ROOT=$VOLTTRON_ROOT/services/core/PlatformDriverAgent cd $VOLTTRON_ROOT volttron-ctl config store platform.driver dnp3.csv $DRIVER_ROOT/example_configurations/dnp3.csv --csv volttron-ctl config store platform.driver devices/dnp3 $DRIVER_ROOT/example_configurations/test_dnp3.config diff --git a/services/core/MasterDriverAgent/master_driver/interfaces/ecobee.py b/services/core/PlatformDriverAgent/platform_driver/interfaces/ecobee.py similarity index 99% rename from services/core/MasterDriverAgent/master_driver/interfaces/ecobee.py rename to services/core/PlatformDriverAgent/platform_driver/interfaces/ecobee.py index d63908f6b9..6b2ceb6901 100644 --- a/services/core/MasterDriverAgent/master_driver/interfaces/ecobee.py +++ b/services/core/PlatformDriverAgent/platform_driver/interfaces/ecobee.py @@ -48,7 +48,7 @@ from volttron.platform.agent import utils from volttron.platform.agent.known_identities import CONFIGURATION_STORE, PLATFORM_DRIVER from volttron.utils.persistance import PersistentDict -from services.core.MasterDriverAgent.master_driver.interfaces import BaseInterface, BaseRegister, BasicRevert +from platform_driver.interfaces import BaseInterface, BaseRegister, BasicRevert AUTH_CONFIG_PATH = "drivers/auth/ecobee_{}" THERMOSTAT_URL = 'https://api.ecobee.com/1/thermostat' @@ -273,7 +273,7 @@ def refresh_tokens(self): def update_auth_config(self): """ - Update the master driver configuration for this device with new values from auth functions + Update the platform driver configuration for this device with new values from auth functions """ auth_config = {"AUTH_CODE": self.authorization_code, "ACCESS_TOKEN": self.access_token, diff --git a/services/core/MasterDriverAgent/master_driver/interfaces/fakedriver.py b/services/core/PlatformDriverAgent/platform_driver/interfaces/fakedriver.py similarity index 97% rename from services/core/MasterDriverAgent/master_driver/interfaces/fakedriver.py rename to services/core/PlatformDriverAgent/platform_driver/interfaces/fakedriver.py index 3fba40ec1d..f4fd1ca0cc 100644 --- a/services/core/MasterDriverAgent/master_driver/interfaces/fakedriver.py +++ b/services/core/PlatformDriverAgent/platform_driver/interfaces/fakedriver.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- {{{ # vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et: # -# Copyright 2019, Battelle Memorial Institute. +# Copyright 2020, Battelle Memorial Institute. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -42,7 +42,7 @@ import math from math import pi -from master_driver.interfaces import BaseInterface, BaseRegister, BasicRevert +from platform_driver.interfaces import BaseInterface, BaseRegister, BasicRevert from csv import DictReader from io import StringIO import logging @@ -86,7 +86,7 @@ def __init__(self, read_only, pointName, units, reg_type, self.math_func = getattr(math, default_value) else: _log.error('Invalid default_value in EKGregister.') - _log.warn('Defaulting to sin(x)') + _log.warning('Defaulting to sin(x)') self.math_func = math.sin @property diff --git a/services/core/MasterDriverAgent/master_driver/interfaces/modbus.py b/services/core/PlatformDriverAgent/platform_driver/interfaces/modbus.py similarity index 98% rename from services/core/MasterDriverAgent/master_driver/interfaces/modbus.py rename to services/core/PlatformDriverAgent/platform_driver/interfaces/modbus.py index 0b5f16809a..e6f5868c9c 100644 --- a/services/core/MasterDriverAgent/master_driver/interfaces/modbus.py +++ b/services/core/PlatformDriverAgent/platform_driver/interfaces/modbus.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- {{{ # vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et: # -# Copyright 2019, Battelle Memorial Institute. +# Copyright 2020, Battelle Memorial Institute. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -49,8 +49,8 @@ from contextlib import contextmanager, closing -from master_driver.driver_locks import socket_lock -from master_driver.interfaces import BaseInterface, BaseRegister, BasicRevert, DriverInterfaceError +from platform_driver.driver_locks import socket_lock +from platform_driver.interfaces import BaseInterface, BaseRegister, BasicRevert, DriverInterfaceError from volttron.platform.agent import utils @contextmanager diff --git a/services/core/MasterDriverAgent/master_driver/interfaces/modbus_tk/__init__.py b/services/core/PlatformDriverAgent/platform_driver/interfaces/modbus_tk/__init__.py similarity index 98% rename from services/core/MasterDriverAgent/master_driver/interfaces/modbus_tk/__init__.py rename to services/core/PlatformDriverAgent/platform_driver/interfaces/modbus_tk/__init__.py index 67a5620bd9..83bcff4ab3 100644 --- a/services/core/MasterDriverAgent/master_driver/interfaces/modbus_tk/__init__.py +++ b/services/core/PlatformDriverAgent/platform_driver/interfaces/modbus_tk/__init__.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- {{{ # vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et: # -# Copyright 2019, Battelle Memorial Institute. +# Copyright 2020, Battelle Memorial Institute. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -38,9 +38,9 @@ from gevent import monkey from volttron.platform.agent import utils -from master_driver.interfaces import BaseRegister, BaseInterface, BasicRevert -from master_driver.interfaces.modbus_tk import helpers -from master_driver.interfaces.modbus_tk.maps import Map +from platform_driver.interfaces import BaseRegister, BaseInterface, BasicRevert +from platform_driver.interfaces.modbus_tk import helpers +from platform_driver.interfaces.modbus_tk.maps import Map import logging import struct diff --git a/services/core/MasterDriverAgent/master_driver/interfaces/modbus_tk/client.py b/services/core/PlatformDriverAgent/platform_driver/interfaces/modbus_tk/client.py similarity index 99% rename from services/core/MasterDriverAgent/master_driver/interfaces/modbus_tk/client.py rename to services/core/PlatformDriverAgent/platform_driver/interfaces/modbus_tk/client.py index 8aa1760262..b16a262f22 100644 --- a/services/core/MasterDriverAgent/master_driver/interfaces/modbus_tk/client.py +++ b/services/core/PlatformDriverAgent/platform_driver/interfaces/modbus_tk/client.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- {{{ # vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et: # -# Copyright 2019, Battelle Memorial Institute. +# Copyright 2020, Battelle Memorial Institute. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/services/core/MasterDriverAgent/master_driver/interfaces/modbus_tk/config_cmd.py b/services/core/PlatformDriverAgent/platform_driver/interfaces/modbus_tk/config_cmd.py similarity index 99% rename from services/core/MasterDriverAgent/master_driver/interfaces/modbus_tk/config_cmd.py rename to services/core/PlatformDriverAgent/platform_driver/interfaces/modbus_tk/config_cmd.py index 54c4e6d1cc..ec58de4b96 100644 --- a/services/core/MasterDriverAgent/master_driver/interfaces/modbus_tk/config_cmd.py +++ b/services/core/PlatformDriverAgent/platform_driver/interfaces/modbus_tk/config_cmd.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- {{{ # vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et: # -# Copyright 2019, Battelle Memorial Institute. +# Copyright 2020, Battelle Memorial Institute. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -37,7 +37,7 @@ # }}} -from master_driver.interfaces.modbus_tk.helpers import str2bool +from .helpers import str2bool import cmd import yaml @@ -879,4 +879,4 @@ def do_quit(self, line): "Type or to list all commands.".format(**commander._directories) commander.prompt = "\nModbusTK > " commander.cmdloop() - exit() \ No newline at end of file + exit() diff --git a/services/core/MasterDriverAgent/master_driver/interfaces/modbus_tk/helpers.py b/services/core/PlatformDriverAgent/platform_driver/interfaces/modbus_tk/helpers.py similarity index 99% rename from services/core/MasterDriverAgent/master_driver/interfaces/modbus_tk/helpers.py rename to services/core/PlatformDriverAgent/platform_driver/interfaces/modbus_tk/helpers.py index 866b25ab77..f96dff5e43 100644 --- a/services/core/MasterDriverAgent/master_driver/interfaces/modbus_tk/helpers.py +++ b/services/core/PlatformDriverAgent/platform_driver/interfaces/modbus_tk/helpers.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- {{{ # vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et: # -# Copyright 2019, Battelle Memorial Institute. +# Copyright 2020, Battelle Memorial Institute. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -136,7 +136,7 @@ def parse_transform_arg(func, arg): """ parse_arg = arg if func in (scale, scale_int, scale_decimal_int_signed): - if type(arg) not in (int, long, float): + if type(arg) not in (int, float): try: parse_arg = int(arg, 10) except ValueError: diff --git a/services/core/MasterDriverAgent/master_driver/interfaces/modbus_tk/maps/__init__.py b/services/core/PlatformDriverAgent/platform_driver/interfaces/modbus_tk/maps/__init__.py similarity index 98% rename from services/core/MasterDriverAgent/master_driver/interfaces/modbus_tk/maps/__init__.py rename to services/core/PlatformDriverAgent/platform_driver/interfaces/modbus_tk/maps/__init__.py index bc89e8c689..dd95f4a63e 100644 --- a/services/core/MasterDriverAgent/master_driver/interfaces/modbus_tk/maps/__init__.py +++ b/services/core/PlatformDriverAgent/platform_driver/interfaces/modbus_tk/maps/__init__.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- {{{ # vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et: # -# Copyright 2019, Battelle Memorial Institute. +# Copyright 2020, Battelle Memorial Institute. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -36,8 +36,8 @@ # under Contract DE-AC05-76RL01830 # }}} -from master_driver.interfaces.modbus_tk.client import Field, Client -from master_driver.interfaces.modbus_tk import helpers +from platform_driver.interfaces.modbus_tk.client import Field, Client +from platform_driver.interfaces.modbus_tk import helpers from collections import Mapping import csv diff --git a/services/core/MasterDriverAgent/master_driver/interfaces/modbus_tk/maps/battery_meter.csv b/services/core/PlatformDriverAgent/platform_driver/interfaces/modbus_tk/maps/battery_meter.csv similarity index 100% rename from services/core/MasterDriverAgent/master_driver/interfaces/modbus_tk/maps/battery_meter.csv rename to services/core/PlatformDriverAgent/platform_driver/interfaces/modbus_tk/maps/battery_meter.csv diff --git a/services/core/MasterDriverAgent/master_driver/interfaces/modbus_tk/maps/ion6200.config b/services/core/PlatformDriverAgent/platform_driver/interfaces/modbus_tk/maps/ion6200.config similarity index 100% rename from services/core/MasterDriverAgent/master_driver/interfaces/modbus_tk/maps/ion6200.config rename to services/core/PlatformDriverAgent/platform_driver/interfaces/modbus_tk/maps/ion6200.config diff --git a/services/core/MasterDriverAgent/master_driver/interfaces/modbus_tk/maps/ion6200.csv b/services/core/PlatformDriverAgent/platform_driver/interfaces/modbus_tk/maps/ion6200.csv similarity index 100% rename from services/core/MasterDriverAgent/master_driver/interfaces/modbus_tk/maps/ion6200.csv rename to services/core/PlatformDriverAgent/platform_driver/interfaces/modbus_tk/maps/ion6200.csv diff --git a/services/core/MasterDriverAgent/master_driver/interfaces/modbus_tk/maps/ion6200_map.csv b/services/core/PlatformDriverAgent/platform_driver/interfaces/modbus_tk/maps/ion6200_map.csv similarity index 100% rename from services/core/MasterDriverAgent/master_driver/interfaces/modbus_tk/maps/ion6200_map.csv rename to services/core/PlatformDriverAgent/platform_driver/interfaces/modbus_tk/maps/ion6200_map.csv diff --git a/services/core/MasterDriverAgent/master_driver/interfaces/modbus_tk/maps/ion8600.csv b/services/core/PlatformDriverAgent/platform_driver/interfaces/modbus_tk/maps/ion8600.csv similarity index 100% rename from services/core/MasterDriverAgent/master_driver/interfaces/modbus_tk/maps/ion8600.csv rename to services/core/PlatformDriverAgent/platform_driver/interfaces/modbus_tk/maps/ion8600.csv diff --git a/services/core/MasterDriverAgent/master_driver/interfaces/modbus_tk/maps/ion8600_map.csv b/services/core/PlatformDriverAgent/platform_driver/interfaces/modbus_tk/maps/ion8600_map.csv similarity index 100% rename from services/core/MasterDriverAgent/master_driver/interfaces/modbus_tk/maps/ion8600_map.csv rename to services/core/PlatformDriverAgent/platform_driver/interfaces/modbus_tk/maps/ion8600_map.csv diff --git a/services/core/MasterDriverAgent/master_driver/interfaces/modbus_tk/maps/maps.yaml b/services/core/PlatformDriverAgent/platform_driver/interfaces/modbus_tk/maps/maps.yaml similarity index 100% rename from services/core/MasterDriverAgent/master_driver/interfaces/modbus_tk/maps/maps.yaml rename to services/core/PlatformDriverAgent/platform_driver/interfaces/modbus_tk/maps/maps.yaml diff --git a/services/core/MasterDriverAgent/master_driver/interfaces/modbus_tk/maps/mixed_endian_reg_map.csv b/services/core/PlatformDriverAgent/platform_driver/interfaces/modbus_tk/maps/mixed_endian_reg_map.csv similarity index 100% rename from services/core/MasterDriverAgent/master_driver/interfaces/modbus_tk/maps/mixed_endian_reg_map.csv rename to services/core/PlatformDriverAgent/platform_driver/interfaces/modbus_tk/maps/mixed_endian_reg_map.csv diff --git a/services/core/MasterDriverAgent/master_driver/interfaces/modbus_tk/maps/modbus_tk_test.config b/services/core/PlatformDriverAgent/platform_driver/interfaces/modbus_tk/maps/modbus_tk_test.config similarity index 100% rename from services/core/MasterDriverAgent/master_driver/interfaces/modbus_tk/maps/modbus_tk_test.config rename to services/core/PlatformDriverAgent/platform_driver/interfaces/modbus_tk/maps/modbus_tk_test.config diff --git a/services/core/MasterDriverAgent/master_driver/interfaces/modbus_tk/maps/modbus_tk_test.csv b/services/core/PlatformDriverAgent/platform_driver/interfaces/modbus_tk/maps/modbus_tk_test.csv similarity index 100% rename from services/core/MasterDriverAgent/master_driver/interfaces/modbus_tk/maps/modbus_tk_test.csv rename to services/core/PlatformDriverAgent/platform_driver/interfaces/modbus_tk/maps/modbus_tk_test.csv diff --git a/services/core/MasterDriverAgent/master_driver/interfaces/modbus_tk/maps/modbus_tk_test_map.csv b/services/core/PlatformDriverAgent/platform_driver/interfaces/modbus_tk/maps/modbus_tk_test_map.csv similarity index 100% rename from services/core/MasterDriverAgent/master_driver/interfaces/modbus_tk/maps/modbus_tk_test_map.csv rename to services/core/PlatformDriverAgent/platform_driver/interfaces/modbus_tk/maps/modbus_tk_test_map.csv diff --git a/services/core/MasterDriverAgent/master_driver/interfaces/modbus_tk/maps/scale_reg_map.csv b/services/core/PlatformDriverAgent/platform_driver/interfaces/modbus_tk/maps/scale_reg_map.csv similarity index 100% rename from services/core/MasterDriverAgent/master_driver/interfaces/modbus_tk/maps/scale_reg_map.csv rename to services/core/PlatformDriverAgent/platform_driver/interfaces/modbus_tk/maps/scale_reg_map.csv diff --git a/services/core/MasterDriverAgent/master_driver/interfaces/modbus_tk/maps/scale_reg_pow_10_map.csv b/services/core/PlatformDriverAgent/platform_driver/interfaces/modbus_tk/maps/scale_reg_pow_10_map.csv similarity index 100% rename from services/core/MasterDriverAgent/master_driver/interfaces/modbus_tk/maps/scale_reg_pow_10_map.csv rename to services/core/PlatformDriverAgent/platform_driver/interfaces/modbus_tk/maps/scale_reg_pow_10_map.csv diff --git a/services/core/MasterDriverAgent/master_driver/interfaces/modbus_tk/maps/watts_on.config b/services/core/PlatformDriverAgent/platform_driver/interfaces/modbus_tk/maps/watts_on.config similarity index 100% rename from services/core/MasterDriverAgent/master_driver/interfaces/modbus_tk/maps/watts_on.config rename to services/core/PlatformDriverAgent/platform_driver/interfaces/modbus_tk/maps/watts_on.config diff --git a/services/core/MasterDriverAgent/master_driver/interfaces/modbus_tk/maps/watts_on.csv b/services/core/PlatformDriverAgent/platform_driver/interfaces/modbus_tk/maps/watts_on.csv similarity index 100% rename from services/core/MasterDriverAgent/master_driver/interfaces/modbus_tk/maps/watts_on.csv rename to services/core/PlatformDriverAgent/platform_driver/interfaces/modbus_tk/maps/watts_on.csv diff --git a/services/core/MasterDriverAgent/master_driver/interfaces/modbus_tk/maps/watts_on_map.csv b/services/core/PlatformDriverAgent/platform_driver/interfaces/modbus_tk/maps/watts_on_map.csv similarity index 100% rename from services/core/MasterDriverAgent/master_driver/interfaces/modbus_tk/maps/watts_on_map.csv rename to services/core/PlatformDriverAgent/platform_driver/interfaces/modbus_tk/maps/watts_on_map.csv diff --git a/services/core/MasterDriverAgent/master_driver/interfaces/modbus_tk/maps/write_single_registers.config b/services/core/PlatformDriverAgent/platform_driver/interfaces/modbus_tk/maps/write_single_registers.config similarity index 100% rename from services/core/MasterDriverAgent/master_driver/interfaces/modbus_tk/maps/write_single_registers.config rename to services/core/PlatformDriverAgent/platform_driver/interfaces/modbus_tk/maps/write_single_registers.config diff --git a/services/core/MasterDriverAgent/master_driver/interfaces/modbus_tk/maps/write_single_registers.csv b/services/core/PlatformDriverAgent/platform_driver/interfaces/modbus_tk/maps/write_single_registers.csv similarity index 100% rename from services/core/MasterDriverAgent/master_driver/interfaces/modbus_tk/maps/write_single_registers.csv rename to services/core/PlatformDriverAgent/platform_driver/interfaces/modbus_tk/maps/write_single_registers.csv diff --git a/services/core/MasterDriverAgent/master_driver/interfaces/modbus_tk/maps/write_single_registers_map.csv b/services/core/PlatformDriverAgent/platform_driver/interfaces/modbus_tk/maps/write_single_registers_map.csv similarity index 100% rename from services/core/MasterDriverAgent/master_driver/interfaces/modbus_tk/maps/write_single_registers_map.csv rename to services/core/PlatformDriverAgent/platform_driver/interfaces/modbus_tk/maps/write_single_registers_map.csv diff --git a/services/core/MasterDriverAgent/master_driver/interfaces/modbus_tk/requirements.txt b/services/core/PlatformDriverAgent/platform_driver/interfaces/modbus_tk/requirements.txt similarity index 100% rename from services/core/MasterDriverAgent/master_driver/interfaces/modbus_tk/requirements.txt rename to services/core/PlatformDriverAgent/platform_driver/interfaces/modbus_tk/requirements.txt diff --git a/services/core/MasterDriverAgent/master_driver/interfaces/modbus_tk/server.py b/services/core/PlatformDriverAgent/platform_driver/interfaces/modbus_tk/server.py similarity index 99% rename from services/core/MasterDriverAgent/master_driver/interfaces/modbus_tk/server.py rename to services/core/PlatformDriverAgent/platform_driver/interfaces/modbus_tk/server.py index 27333a9b56..780d40f852 100644 --- a/services/core/MasterDriverAgent/master_driver/interfaces/modbus_tk/server.py +++ b/services/core/PlatformDriverAgent/platform_driver/interfaces/modbus_tk/server.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- {{{ # vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et: # -# Copyright 2019, Battelle Memorial Institute. +# Copyright 2020, Battelle Memorial Institute. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -292,4 +292,4 @@ def handler(signum, frame): modbus_server = ServerProcess(host=args.host, port=args.port) modbus_server.start() - modbus_server.join() \ No newline at end of file + modbus_server.join() diff --git a/services/core/MasterDriverAgent/master_driver/interfaces/modbus_tk/tests/README.rst b/services/core/PlatformDriverAgent/platform_driver/interfaces/modbus_tk/tests/README.rst similarity index 84% rename from services/core/MasterDriverAgent/master_driver/interfaces/modbus_tk/tests/README.rst rename to services/core/PlatformDriverAgent/platform_driver/interfaces/modbus_tk/tests/README.rst index 46855f880b..6e21757922 100644 --- a/services/core/MasterDriverAgent/master_driver/interfaces/modbus_tk/tests/README.rst +++ b/services/core/PlatformDriverAgent/platform_driver/interfaces/modbus_tk/tests/README.rst @@ -4,9 +4,9 @@ MODBUS_TK REGRESSION TEST README modbus_listener_agent.py ------------------------ -Set-up listener agent to listen to master driver agent with TCP and RTU transport: +Set-up listener agent to listen to platform driver agent with TCP and RTU transport: -- Use "volttron-cfg" to set-up master driver agent with default fake_driver, add the int register "count" +- Use "volttron-cfg" to set-up platform driver agent with default fake_driver, add the int register "count" to its csv config using this script: $ vctl config edit platform.driver fake.csv - Add modbus_test driver config and csv using this script for tcp transport testing: @@ -28,7 +28,7 @@ test_modbus_tk_driver.py ------------------------ Regression test for modbus_tk interface with tcp transport: -- Build master driver agent and define two different modbus driver config and two different register csv set, +- Build platform driver agent and define two different modbus driver config and two different register csv set, one followed the original modbus structure, and another followed the new modbus_tk structure - With the set-up server running, do regression test on set_point, get_point, scrape_all, revert_point, and revert_device for both drivers @@ -38,7 +38,7 @@ test_scrape_all.py ------------------ Regression test for modbus_tk interface with tcp transport: -- Build master driver agent and define two different modbus driver config and two different register csv set, +- Build platform driver agent and define two different modbus driver config and two different register csv set, one followed the original modbus structure, and another followed the new modbus_tk structure - With the set-up server running, run scrape_all for both drivers in two different threads with some set-up time interval @@ -66,7 +66,7 @@ test_write_single_registers.py ------------------------------ Regression test for modbus_tk interface with tcp transport: -- Build master driver agent and define write_single_registers driver config and register csv set with the additional +- Build platform driver agent and define write_single_registers driver config and register csv set with the additional feature write_multiple_registers = false (it means write single register with modbus function code 06) - With the set-up server running, do regression test on set_point, get_point, scrape_all, revert_point, and revert_device for the driver diff --git a/services/core/MongodbHistorian/tests/test_mongo_with_data.py b/services/core/PlatformDriverAgent/platform_driver/interfaces/modbus_tk/tests/__init__.py similarity index 100% rename from services/core/MongodbHistorian/tests/test_mongo_with_data.py rename to services/core/PlatformDriverAgent/platform_driver/interfaces/modbus_tk/tests/__init__.py diff --git a/services/core/PlatformDriverAgent/platform_driver/interfaces/modbus_tk/tests/example_config/__init__.py b/services/core/PlatformDriverAgent/platform_driver/interfaces/modbus_tk/tests/example_config/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/services/core/MasterDriverAgent/master_driver/interfaces/modbus_tk/tests/example_config/fake.config b/services/core/PlatformDriverAgent/platform_driver/interfaces/modbus_tk/tests/example_config/fake.config similarity index 100% rename from services/core/MasterDriverAgent/master_driver/interfaces/modbus_tk/tests/example_config/fake.config rename to services/core/PlatformDriverAgent/platform_driver/interfaces/modbus_tk/tests/example_config/fake.config diff --git a/services/core/MasterDriverAgent/master_driver/interfaces/modbus_tk/tests/example_config/fake.csv b/services/core/PlatformDriverAgent/platform_driver/interfaces/modbus_tk/tests/example_config/fake.csv similarity index 100% rename from services/core/MasterDriverAgent/master_driver/interfaces/modbus_tk/tests/example_config/fake.csv rename to services/core/PlatformDriverAgent/platform_driver/interfaces/modbus_tk/tests/example_config/fake.csv diff --git a/services/core/MasterDriverAgent/master_driver/interfaces/modbus_tk/tests/example_config/modbus_test.config b/services/core/PlatformDriverAgent/platform_driver/interfaces/modbus_tk/tests/example_config/modbus_test.config similarity index 100% rename from services/core/MasterDriverAgent/master_driver/interfaces/modbus_tk/tests/example_config/modbus_test.config rename to services/core/PlatformDriverAgent/platform_driver/interfaces/modbus_tk/tests/example_config/modbus_test.config diff --git a/services/core/MasterDriverAgent/master_driver/interfaces/modbus_tk/tests/example_config/modbus_test.csv b/services/core/PlatformDriverAgent/platform_driver/interfaces/modbus_tk/tests/example_config/modbus_test.csv similarity index 100% rename from services/core/MasterDriverAgent/master_driver/interfaces/modbus_tk/tests/example_config/modbus_test.csv rename to services/core/PlatformDriverAgent/platform_driver/interfaces/modbus_tk/tests/example_config/modbus_test.csv diff --git a/services/core/MasterDriverAgent/master_driver/interfaces/modbus_tk/tests/example_config/watts_on.csv b/services/core/PlatformDriverAgent/platform_driver/interfaces/modbus_tk/tests/example_config/watts_on.csv similarity index 100% rename from services/core/MasterDriverAgent/master_driver/interfaces/modbus_tk/tests/example_config/watts_on.csv rename to services/core/PlatformDriverAgent/platform_driver/interfaces/modbus_tk/tests/example_config/watts_on.csv diff --git a/services/core/MasterDriverAgent/master_driver/interfaces/modbus_tk/tests/example_config/watts_on_1.config b/services/core/PlatformDriverAgent/platform_driver/interfaces/modbus_tk/tests/example_config/watts_on_1.config similarity index 100% rename from services/core/MasterDriverAgent/master_driver/interfaces/modbus_tk/tests/example_config/watts_on_1.config rename to services/core/PlatformDriverAgent/platform_driver/interfaces/modbus_tk/tests/example_config/watts_on_1.config diff --git a/services/core/MasterDriverAgent/master_driver/interfaces/modbus_tk/tests/example_config/watts_on_2.config b/services/core/PlatformDriverAgent/platform_driver/interfaces/modbus_tk/tests/example_config/watts_on_2.config similarity index 100% rename from services/core/MasterDriverAgent/master_driver/interfaces/modbus_tk/tests/example_config/watts_on_2.config rename to services/core/PlatformDriverAgent/platform_driver/interfaces/modbus_tk/tests/example_config/watts_on_2.config diff --git a/services/core/MasterDriverAgent/master_driver/interfaces/modbus_tk/tests/example_config/watts_on_map.csv b/services/core/PlatformDriverAgent/platform_driver/interfaces/modbus_tk/tests/example_config/watts_on_map.csv similarity index 100% rename from services/core/MasterDriverAgent/master_driver/interfaces/modbus_tk/tests/example_config/watts_on_map.csv rename to services/core/PlatformDriverAgent/platform_driver/interfaces/modbus_tk/tests/example_config/watts_on_map.csv diff --git a/services/core/MasterDriverAgent/master_driver/interfaces/modbus_tk/tests/modbus_listener_agent.py b/services/core/PlatformDriverAgent/platform_driver/interfaces/modbus_tk/tests/modbus_listener_agent.py similarity index 93% rename from services/core/MasterDriverAgent/master_driver/interfaces/modbus_tk/tests/modbus_listener_agent.py rename to services/core/PlatformDriverAgent/platform_driver/interfaces/modbus_tk/tests/modbus_listener_agent.py index e4fbd39c35..9bf393b27c 100644 --- a/services/core/MasterDriverAgent/master_driver/interfaces/modbus_tk/tests/modbus_listener_agent.py +++ b/services/core/PlatformDriverAgent/platform_driver/interfaces/modbus_tk/tests/modbus_listener_agent.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- {{{ # vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et: # -# Copyright 2019, Battelle Memorial Institute. +# Copyright 2020, Battelle Memorial Institute. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -68,13 +68,13 @@ def __init__(self, config_path, **kwargs): try: self._heartbeat_period = int(self._heartbeat_period) except: - _log.warn('Invalid heartbeat period specified setting to default') + _log.warning('Invalid heartbeat period specified setting to default') self._heartbeat_period = DEFAULT_HEARTBEAT_PERIOD log_level = self.config.get('log-level', 'INFO') if log_level == 'ERROR': self._logfn = _log.error elif log_level == 'WARN': - self._logfn = _log.warn + self._logfn = _log.warning elif log_level == 'DEBUG': self._logfn = _log.debug else: @@ -101,7 +101,7 @@ def on_match(self, peer, sender, bus, topic, headers, message): update_val = self.test('fake-campus/fake-building/fake-device', 'Count') print(update_val) - # Define the modbus_test driver for master_driver agent, do set point for all registers + # Define the modbus_test driver for platform_driver agent, do set point for all registers self.set_point('modbus_test', 'BigUShort', 1234) self.set_point('modbus_test', 'BigUInt', 141141) self.set_point('modbus_test', 'BigULong', 9999999) @@ -118,11 +118,11 @@ def on_match(self, peer, sender, bus, topic, headers, message): self.set_point('modbus_test', 'LittleLong', -898989) print(('MODBUS TEST', self.scrape_all('modbus_test'))) - # Define watts_on_1 (slave id 1) and watts_on_2 (slave id 2) for master_driver agent, do scrape_all + # Define watts_on_1 (slave id 1) and watts_on_2 (slave id 2) for platform_driver agent, do scrape_all print(('SLAVE ID 1', self.scrape_all('watts_on_1'))) print(('SLAVE ID 2', self.scrape_all('watts_on_2'))) - # Define modbus_tk_test driver for master_driver agent, do set point, get point, and scrape_all + # Define modbus_tk_test driver for platform_driver agent, do set point, get point, and scrape_all print(self.set_point('modbus_tk_test', 'unsigned short', 1234)) print(self.get_point('modbus_tk_test', 'unsigned short')) print(self.scrape_all('modbus_tk_test')) @@ -156,4 +156,4 @@ def main(argv=sys.argv): if __name__ == '__main__': # Entry point for script - sys.exit(main()) \ No newline at end of file + sys.exit(main()) diff --git a/services/core/MasterDriverAgent/master_driver/interfaces/modbus_tk/tests/modbus_server.py b/services/core/PlatformDriverAgent/platform_driver/interfaces/modbus_tk/tests/modbus_server.py similarity index 94% rename from services/core/MasterDriverAgent/master_driver/interfaces/modbus_tk/tests/modbus_server.py rename to services/core/PlatformDriverAgent/platform_driver/interfaces/modbus_tk/tests/modbus_server.py index 57aea7007f..ea959b528e 100644 --- a/services/core/MasterDriverAgent/master_driver/interfaces/modbus_tk/tests/modbus_server.py +++ b/services/core/PlatformDriverAgent/platform_driver/interfaces/modbus_tk/tests/modbus_server.py @@ -1,7 +1,7 @@ -from master_driver.interfaces.modbus_tk.server import Server -from master_driver.interfaces.modbus_tk import helpers -from master_driver.interfaces.modbus_tk.client import Client, Field -from master_driver.interfaces.modbus_tk.maps import Map, Catalog +from platform_driver.interfaces.modbus_tk.server import Server +from platform_driver.interfaces.modbus_tk import helpers +from platform_driver.interfaces.modbus_tk.client import Client, Field +from platform_driver.interfaces.modbus_tk.maps import Map, Catalog import serial from struct import pack, unpack @@ -84,7 +84,7 @@ def watts_on_server(): # Can define ModbusClient2 by Map or defined the class as ModbusClient1 or ModbusClient2 # modbus_map = Map( - # map_dir='/Users/anhnguyen/repos/kisensum-volttron/volttron/services/core/MasterDriverAgent/master_driver/interfaces/modbus_tk/maps', + # map_dir='/Users/anhnguyen/repos/kisensum-volttron/volttron/services/core/PlatformDriverAgent/platform_driver/interfaces/modbus_tk/maps', # addressing='offset', name='watts_on', file='watts_on.csv', endian='big') # ModbusClient2 = modbus_map.get_class() @@ -114,4 +114,4 @@ def watts_on_server(): server_process.start() # For rtu transport - # watts_on_server() \ No newline at end of file + # watts_on_server() diff --git a/services/core/MasterDriverAgent/master_driver/interfaces/modbus_tk/tests/test_battery_meter.py b/services/core/PlatformDriverAgent/platform_driver/interfaces/modbus_tk/tests/test_battery_meter.py similarity index 97% rename from services/core/MasterDriverAgent/master_driver/interfaces/modbus_tk/tests/test_battery_meter.py rename to services/core/PlatformDriverAgent/platform_driver/interfaces/modbus_tk/tests/test_battery_meter.py index 7db23b6075..43313652e2 100644 --- a/services/core/MasterDriverAgent/master_driver/interfaces/modbus_tk/tests/test_battery_meter.py +++ b/services/core/PlatformDriverAgent/platform_driver/interfaces/modbus_tk/tests/test_battery_meter.py @@ -5,8 +5,8 @@ from volttron.platform import get_services_core, jsonapi from volttrontesting.utils.utils import get_rand_ip_and_port -from master_driver.interfaces.modbus_tk.server import Server -from master_driver.interfaces.modbus_tk.maps import Map, Catalog +from platform_driver.interfaces.modbus_tk.server import Server +from platform_driver.interfaces.modbus_tk.maps import Map, Catalog from volttron.platform.agent.known_identities import PLATFORM_DRIVER logger = logging.getLogger(__name__) @@ -271,15 +271,15 @@ @pytest.fixture(scope="module") def agent(request, volttron_instance): """ - Build MasterDriverAgent, add modbus driver & csv configurations + Build PlatformDriverAgent, add modbus driver & csv configurations """ - # Build master driver agent + # Build platform driver agent md_agent = volttron_instance.build_agent(identity="test_md_agent") capabilities = {'edit_config_store': {'identity': PLATFORM_DRIVER}} volttron_instance.add_capabilities(md_agent.core.publickey, capabilities) - # Clean out master driver configurations + # Clean out platform driver configurations # wait for it to return before adding new config md_agent.vip.rpc.call('config.store', 'manage_delete_store', @@ -308,8 +308,8 @@ def agent(request, volttron_instance): REGISTER_MAP, config_type='csv') - master_uuid = volttron_instance.install_agent( - agent_dir=get_services_core("MasterDriverAgent"), + platform_uuid = volttron_instance.install_agent( + agent_dir=get_services_core("PlatformDriverAgent"), config_file={}, start=True) @@ -317,9 +317,9 @@ def agent(request, volttron_instance): def stop(): """ - Stop master driver agent + Stop platform driver agent """ - volttron_instance.stop_agent(master_uuid) + volttron_instance.stop_agent(platform_uuid) md_agent.core.stop() request.addfinalizer(stop) diff --git a/services/core/MasterDriverAgent/master_driver/interfaces/modbus_tk/tests/test_ion6200.py b/services/core/PlatformDriverAgent/platform_driver/interfaces/modbus_tk/tests/test_ion6200.py similarity index 94% rename from services/core/MasterDriverAgent/master_driver/interfaces/modbus_tk/tests/test_ion6200.py rename to services/core/PlatformDriverAgent/platform_driver/interfaces/modbus_tk/tests/test_ion6200.py index f59e8c951d..e4c91934f2 100644 --- a/services/core/MasterDriverAgent/master_driver/interfaces/modbus_tk/tests/test_ion6200.py +++ b/services/core/PlatformDriverAgent/platform_driver/interfaces/modbus_tk/tests/test_ion6200.py @@ -5,8 +5,8 @@ import os.path from volttron.platform import get_services_core -from master_driver.interfaces.modbus_tk.server import Server -from master_driver.interfaces.modbus_tk.maps import Catalog +from platform_driver.interfaces.modbus_tk.server import Server +from platform_driver.interfaces.modbus_tk.maps import Catalog from volttron.platform.agent.known_identities import PLATFORM_DRIVER logger = logging.getLogger(__name__) @@ -77,15 +77,15 @@ @pytest.fixture(scope="module") def ion_driver_agent(request, volttron_instance): - """Build MasterDriverAgent, add modbus driver & csv configurations + """Build PlatformDriverAgent, add modbus driver & csv configurations """ - # Build master driver agent + # Build platform driver agent md_agent = volttron_instance.build_agent(identity="test_md_agent") capabilities = {'edit_config_store': {'identity': PLATFORM_DRIVER}} volttron_instance.add_capabilities(md_agent.core.publickey, capabilities) - # Clean out master driver configurations + # Clean out platform driver configurations # wait for it to return before adding new config md_agent.vip.rpc.call('config.store', 'manage_delete_store', @@ -114,16 +114,16 @@ def ion_driver_agent(request, volttron_instance): ION6200_CSV_MAP, config_type='csv') - master_uuid = volttron_instance.install_agent(agent_dir=get_services_core("MasterDriverAgent"), + platform_uuid = volttron_instance.install_agent(agent_dir=get_services_core("PlatformDriverAgent"), config_file={}, start=True) gevent.sleep(10) # wait for the agent to start and start the devices def stop(): - """Stop master driver agent + """Stop platform driver agent """ - volttron_instance.stop_agent(master_uuid) + volttron_instance.stop_agent(platform_uuid) md_agent.core.stop() request.addfinalizer(stop) @@ -246,4 +246,4 @@ def test_revert_all_new(self, ion_driver_agent): assert type(default_values) is dict for key in default_values.keys(): - assert default_values[key] == 0 or 0.0 \ No newline at end of file + assert default_values[key] == 0 or 0.0 diff --git a/services/core/MasterDriverAgent/master_driver/interfaces/modbus_tk/tests/test_mixed_endian.py b/services/core/PlatformDriverAgent/platform_driver/interfaces/modbus_tk/tests/test_mixed_endian.py similarity index 95% rename from services/core/MasterDriverAgent/master_driver/interfaces/modbus_tk/tests/test_mixed_endian.py rename to services/core/PlatformDriverAgent/platform_driver/interfaces/modbus_tk/tests/test_mixed_endian.py index 27834461dc..b4201a6189 100644 --- a/services/core/MasterDriverAgent/master_driver/interfaces/modbus_tk/tests/test_mixed_endian.py +++ b/services/core/PlatformDriverAgent/platform_driver/interfaces/modbus_tk/tests/test_mixed_endian.py @@ -4,8 +4,8 @@ import time from volttron.platform import get_services_core -from master_driver.interfaces.modbus_tk.server import Server -from master_driver.interfaces.modbus_tk.maps import Map, Catalog +from platform_driver.interfaces.modbus_tk.server import Server +from platform_driver.interfaces.modbus_tk.maps import Map, Catalog from volttron.platform.agent.known_identities import PLATFORM_DRIVER logger = logging.getLogger(__name__) @@ -76,15 +76,15 @@ @pytest.fixture(scope="module") def agent(request, volttron_instance): - """Build MasterDriverAgent, add modbus driver & csv configurations + """Build PlatformDriverAgent, add modbus driver & csv configurations """ - # Build master driver agent + # Build platform driver agent md_agent = volttron_instance.build_agent(identity="test_md_agent") capabilities = {'edit_config_store': {'identity': PLATFORM_DRIVER}} volttron_instance.add_capabilities(md_agent.core.publickey, capabilities) - # Clean out master driver configurations + # Clean out platform driver configurations # wait for it to return before adding new config md_agent.vip.rpc.call('config.store', 'manage_delete_store', @@ -127,16 +127,16 @@ def agent(request, volttron_instance): NEW_REGISTER_MAP, config_type='csv') - master_uuid = volttron_instance.install_agent(agent_dir=get_services_core("MasterDriverAgent"), + platform_uuid = volttron_instance.install_agent(agent_dir=get_services_core("PlatformDriverAgent"), config_file={}, start=True) gevent.sleep(10) # wait for the agent to start and start the devices def stop(): - """Stop master driver agent + """Stop platform driver agent """ - volttron_instance.stop_agent(master_uuid) + volttron_instance.stop_agent(platform_uuid) md_agent.core.stop() request.addfinalizer(stop) diff --git a/services/core/MasterDriverAgent/master_driver/interfaces/modbus_tk/tests/test_modbus_tk_driver.py b/services/core/PlatformDriverAgent/platform_driver/interfaces/modbus_tk/tests/test_modbus_tk_driver.py similarity index 95% rename from services/core/MasterDriverAgent/master_driver/interfaces/modbus_tk/tests/test_modbus_tk_driver.py rename to services/core/PlatformDriverAgent/platform_driver/interfaces/modbus_tk/tests/test_modbus_tk_driver.py index 40b5ec1411..b422962f3f 100644 --- a/services/core/MasterDriverAgent/master_driver/interfaces/modbus_tk/tests/test_modbus_tk_driver.py +++ b/services/core/PlatformDriverAgent/platform_driver/interfaces/modbus_tk/tests/test_modbus_tk_driver.py @@ -6,8 +6,8 @@ from random import randint from volttrontesting.utils.utils import get_rand_ip_and_port from volttron.platform import get_services_core, jsonapi -from master_driver.interfaces.modbus_tk.server import Server -from master_driver.interfaces.modbus_tk.maps import Map, Catalog +from platform_driver.interfaces.modbus_tk.server import Server +from platform_driver.interfaces.modbus_tk.maps import Map, Catalog from volttron.platform.agent.known_identities import PLATFORM_DRIVER logger = logging.getLogger(__name__) @@ -100,15 +100,15 @@ @pytest.fixture(scope="module") def agent(request, volttron_instance): """ - Build MasterDriverAgent, add modbus driver & csv configurations + Build PlatformDriverAgent, add modbus driver & csv configurations """ - # Build master driver agent + # Build platform driver agent md_agent = volttron_instance.build_agent(identity="test_md_agent") capabilities = {'edit_config_store': {'identity': PLATFORM_DRIVER}} volttron_instance.add_capabilities(md_agent.core.publickey, capabilities) - # Clean out master driver configurations + # Clean out platform driver configurations # wait for it to return before adding new config md_agent.vip.rpc.call('config.store', 'manage_delete_store', @@ -151,8 +151,8 @@ def agent(request, volttron_instance): OLD_VOLTTRON_REGISTRY_CONFIG, config_type='csv') - master_uuid = volttron_instance.install_agent( - agent_dir=get_services_core("MasterDriverAgent"), + platform_uuid = volttron_instance.install_agent( + agent_dir=get_services_core("PlatformDriverAgent"), config_file={}, start=True) @@ -160,9 +160,9 @@ def agent(request, volttron_instance): def stop(): """ - Stop master driver agent + Stop platform driver agent """ - volttron_instance.stop_agent(master_uuid) + volttron_instance.stop_agent(platform_uuid) md_agent.core.stop() request.addfinalizer(stop) diff --git a/services/core/MasterDriverAgent/master_driver/interfaces/modbus_tk/tests/test_scale_reg.py b/services/core/PlatformDriverAgent/platform_driver/interfaces/modbus_tk/tests/test_scale_reg.py similarity index 93% rename from services/core/MasterDriverAgent/master_driver/interfaces/modbus_tk/tests/test_scale_reg.py rename to services/core/PlatformDriverAgent/platform_driver/interfaces/modbus_tk/tests/test_scale_reg.py index 176026c86f..17d9f3fe92 100644 --- a/services/core/MasterDriverAgent/master_driver/interfaces/modbus_tk/tests/test_scale_reg.py +++ b/services/core/PlatformDriverAgent/platform_driver/interfaces/modbus_tk/tests/test_scale_reg.py @@ -4,8 +4,8 @@ import time from volttron.platform import get_services_core -from master_driver.interfaces.modbus_tk.server import Server -from master_driver.interfaces.modbus_tk.maps import Map, Catalog +from platform_driver.interfaces.modbus_tk.server import Server +from platform_driver.interfaces.modbus_tk.maps import Map, Catalog from volttron.platform.agent.known_identities import PLATFORM_DRIVER logger = logging.getLogger(__name__) @@ -47,15 +47,15 @@ @pytest.fixture(scope="module") def agent(request, volttron_instance): - """Build MasterDriverAgent, add modbus driver & csv configurations + """Build PlatformDriverAgent, add modbus driver & csv configurations """ - # Build master driver agent + # Build platform driver agent md_agent = volttron_instance.build_agent(identity="test_md_agent") capabilities = {'edit_config_store': {'identity': PLATFORM_DRIVER}} volttron_instance.add_capabilities(md_agent.core.publickey, capabilities) - # Clean out master driver configurations + # Clean out platform driver configurations # wait for it to return before adding new config md_agent.vip.rpc.call('config.store', 'manage_delete_store', @@ -84,16 +84,16 @@ def agent(request, volttron_instance): REGISTER_MAP, config_type='csv') - master_uuid = volttron_instance.install_agent(agent_dir=get_services_core("MasterDriverAgent"), + platform_uuid = volttron_instance.install_agent(agent_dir=get_services_core("PlatformDriverAgent"), config_file={}, start=True) gevent.sleep(10) # wait for the agent to start and start the devices def stop(): - """Stop master driver agent + """Stop platform driver agent """ - volttron_instance.stop_agent(master_uuid) + volttron_instance.stop_agent(platform_uuid) md_agent.core.stop() request.addfinalizer(stop) diff --git a/services/core/MasterDriverAgent/master_driver/interfaces/modbus_tk/tests/test_scale_reg_pow_10.py b/services/core/PlatformDriverAgent/platform_driver/interfaces/modbus_tk/tests/test_scale_reg_pow_10.py similarity index 93% rename from services/core/MasterDriverAgent/master_driver/interfaces/modbus_tk/tests/test_scale_reg_pow_10.py rename to services/core/PlatformDriverAgent/platform_driver/interfaces/modbus_tk/tests/test_scale_reg_pow_10.py index a111827ae7..bfa4e8b282 100644 --- a/services/core/MasterDriverAgent/master_driver/interfaces/modbus_tk/tests/test_scale_reg_pow_10.py +++ b/services/core/PlatformDriverAgent/platform_driver/interfaces/modbus_tk/tests/test_scale_reg_pow_10.py @@ -4,8 +4,8 @@ import time from volttron.platform import get_services_core -from master_driver.interfaces.modbus_tk.server import Server -from master_driver.interfaces.modbus_tk.maps import Map, Catalog +from platform_driver.interfaces.modbus_tk.server import Server +from platform_driver.interfaces.modbus_tk.maps import Map, Catalog from volttron.platform.agent.known_identities import PLATFORM_DRIVER logger = logging.getLogger(__name__) @@ -47,15 +47,15 @@ @pytest.fixture(scope="module") def agent(request, volttron_instance): - """Build MasterDriverAgent, add modbus driver & csv configurations + """Build PlatformDriverAgent, add modbus driver & csv configurations """ - # Build master driver agent + # Build platform driver agent md_agent = volttron_instance.build_agent(identity="test_md_agent") capabilities = {'edit_config_store': {'identity': PLATFORM_DRIVER}} volttron_instance.add_capabilities(md_agent.core.publickey, capabilities) - # Clean out master driver configurations + # Clean out platform driver configurations # wait for it to return before adding new config md_agent.vip.rpc.call('config.store', 'manage_delete_store', @@ -84,16 +84,16 @@ def agent(request, volttron_instance): REGISTER_MAP, config_type='csv') - master_uuid = volttron_instance.install_agent(agent_dir=get_services_core("MasterDriverAgent"), + platform_uuid = volttron_instance.install_agent(agent_dir=get_services_core("PlatformDriverAgent"), config_file={}, start=True) gevent.sleep(10) # wait for the agent to start and start the devices def stop(): - """Stop master driver agent + """Stop platform driver agent """ - volttron_instance.stop_agent(master_uuid) + volttron_instance.stop_agent(platform_uuid) md_agent.core.stop() request.addfinalizer(stop) diff --git a/services/core/MasterDriverAgent/master_driver/interfaces/modbus_tk/tests/test_scrape_all.py b/services/core/PlatformDriverAgent/platform_driver/interfaces/modbus_tk/tests/test_scrape_all.py similarity index 94% rename from services/core/MasterDriverAgent/master_driver/interfaces/modbus_tk/tests/test_scrape_all.py rename to services/core/PlatformDriverAgent/platform_driver/interfaces/modbus_tk/tests/test_scrape_all.py index 8a31874f1b..9c8dcb318d 100644 --- a/services/core/MasterDriverAgent/master_driver/interfaces/modbus_tk/tests/test_scrape_all.py +++ b/services/core/PlatformDriverAgent/platform_driver/interfaces/modbus_tk/tests/test_scrape_all.py @@ -7,8 +7,8 @@ from random import randint from volttrontesting.utils.utils import get_rand_ip_and_port from volttron.platform import get_services_core, jsonapi -from master_driver.interfaces.modbus_tk.server import Server -from master_driver.interfaces.modbus_tk.maps import Map, Catalog +from platform_driver.interfaces.modbus_tk.server import Server +from platform_driver.interfaces.modbus_tk.maps import Map, Catalog from volttron.platform.agent.known_identities import PLATFORM_DRIVER logger = logging.getLogger(__name__) @@ -90,15 +90,15 @@ @pytest.fixture(scope="module") def agent(request, volttron_instance): """ - Build MasterDriverAgent, add modbus driver & csv configurations + Build PlatformDriverAgent, add modbus driver & csv configurations """ - # Build master driver agent + # Build platform driver agent md_agent = volttron_instance.build_agent(identity="test_md_agent") capabilities = {'edit_config_store': {'identity': PLATFORM_DRIVER}} volttron_instance.add_capabilities(md_agent.core.publickey, capabilities) - # Clean out master driver configurations + # Clean out platform driver configurations # wait for it to return before adding new config md_agent.vip.rpc.call('config.store', 'manage_delete_store', @@ -141,8 +141,8 @@ def agent(request, volttron_instance): OLD_VOLTTRON_REGISTRY_CONFIG, config_type='csv') - master_uuid = volttron_instance.install_agent( - agent_dir=get_services_core("MasterDriverAgent"), + platform_uuid = volttron_instance.install_agent( + agent_dir=get_services_core("PlatformDriverAgent"), config_file={}, start=True) @@ -150,9 +150,9 @@ def agent(request, volttron_instance): def stop(): """ - Stop master driver agent + Stop platform driver agent """ - volttron_instance.stop_agent(master_uuid) + volttron_instance.stop_agent(platform_uuid) md_agent.core.stop() request.addfinalizer(stop) diff --git a/services/core/MasterDriverAgent/master_driver/interfaces/modbus_tk/tests/test_watts_on.py b/services/core/PlatformDriverAgent/platform_driver/interfaces/modbus_tk/tests/test_watts_on.py similarity index 93% rename from services/core/MasterDriverAgent/master_driver/interfaces/modbus_tk/tests/test_watts_on.py rename to services/core/PlatformDriverAgent/platform_driver/interfaces/modbus_tk/tests/test_watts_on.py index 2c7014c639..efb8f15515 100644 --- a/services/core/MasterDriverAgent/master_driver/interfaces/modbus_tk/tests/test_watts_on.py +++ b/services/core/PlatformDriverAgent/platform_driver/interfaces/modbus_tk/tests/test_watts_on.py @@ -55,15 +55,15 @@ @pytest.fixture(scope="module") def agent(request, volttron_instance): - """Build MasterDriverAgent, add modbus driver & csv configurations + """Build PlatformDriverAgent, add modbus driver & csv configurations """ - # Build master driver agent + # Build platform driver agent md_agent = volttron_instance.build_agent(identity="test_md_agent") capabilities = {'edit_config_store': {'identity': PLATFORM_DRIVER}} volttron_instance.add_capabilities(md_agent.core.publickey, capabilities) - # Clean out master driver configurations + # Clean out platform driver configurations # wait for it to return before adding new config md_agent.vip.rpc.call('config.store', 'manage_delete_store', @@ -92,16 +92,16 @@ def agent(request, volttron_instance): REGISTRY_CONFIG_MAP, config_type='csv') - master_uuid = volttron_instance.install_agent(agent_dir=get_services_core("MasterDriverAgent"), + platform_uuid = volttron_instance.install_agent(agent_dir=get_services_core("PlatformDriverAgent"), config_file={}, start=True) gevent.sleep(10) # wait for the agent to start and start the devices def stop(): - """Stop master driver agent + """Stop platform driver agent """ - volttron_instance.stop_agent(master_uuid) + volttron_instance.stop_agent(platform_uuid) md_agent.core.stop() request.addfinalizer(stop) diff --git a/services/core/MasterDriverAgent/master_driver/interfaces/modbus_tk/tests/test_write_single_registers.py b/services/core/PlatformDriverAgent/platform_driver/interfaces/modbus_tk/tests/test_write_single_registers.py similarity index 93% rename from services/core/MasterDriverAgent/master_driver/interfaces/modbus_tk/tests/test_write_single_registers.py rename to services/core/PlatformDriverAgent/platform_driver/interfaces/modbus_tk/tests/test_write_single_registers.py index 718797300e..b6027ee010 100644 --- a/services/core/MasterDriverAgent/master_driver/interfaces/modbus_tk/tests/test_write_single_registers.py +++ b/services/core/PlatformDriverAgent/platform_driver/interfaces/modbus_tk/tests/test_write_single_registers.py @@ -4,8 +4,8 @@ import time from volttron.platform import get_services_core -from master_driver.interfaces.modbus_tk.server import Server -from master_driver.interfaces.modbus_tk.maps import Map, Catalog +from platform_driver.interfaces.modbus_tk.server import Server +from platform_driver.interfaces.modbus_tk.maps import Map, Catalog from volttron.platform.agent.known_identities import PLATFORM_DRIVER logger = logging.getLogger(__name__) @@ -45,15 +45,15 @@ @pytest.fixture(scope="module") def agent(request, volttron_instance): - """Build MasterDriverAgent, add modbus driver & csv configurations + """Build PlatformDriverAgent, add modbus driver & csv configurations """ - # Build master driver agent + # Build platform driver agent md_agent = volttron_instance.build_agent(identity="test_md_agent") capabilities = {'edit_config_store': {'identity': PLATFORM_DRIVER}} volttron_instance.add_capabilities(md_agent.core.publickey, capabilities) - # Clean out master driver configurations + # Clean out platform driver configurations # wait for it to return before adding new config md_agent.vip.rpc.call('config.store', 'manage_delete_store', @@ -82,16 +82,16 @@ def agent(request, volttron_instance): REGISTRY_CONFIG_MAP, config_type='csv') - master_uuid = volttron_instance.install_agent(agent_dir=get_services_core("MasterDriverAgent"), + platform_uuid = volttron_instance.install_agent(agent_dir=get_services_core("PlatformDriverAgent"), config_file={}, start=True) gevent.sleep(10) # wait for the agent to start and start the devices def stop(): - """Stop master driver agent + """Stop platform driver agent """ - volttron_instance.stop_agent(master_uuid) + volttron_instance.stop_agent(platform_uuid) md_agent.core.stop() request.addfinalizer(stop) @@ -212,4 +212,4 @@ def test_revert_all(self, agent): assert type(default_values) is dict for key in default_values.keys(): - assert default_values[key] == 0 \ No newline at end of file + assert default_values[key] == 0 diff --git a/services/core/MasterDriverAgent/master_driver/interfaces/obix.py b/services/core/PlatformDriverAgent/platform_driver/interfaces/obix.py similarity index 98% rename from services/core/MasterDriverAgent/master_driver/interfaces/obix.py rename to services/core/PlatformDriverAgent/platform_driver/interfaces/obix.py index 54f686298c..117c56d7bd 100644 --- a/services/core/MasterDriverAgent/master_driver/interfaces/obix.py +++ b/services/core/PlatformDriverAgent/platform_driver/interfaces/obix.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- {{{ # vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et: # -# Copyright 2019, Battelle Memorial Institute. +# Copyright 2020, Battelle Memorial Institute. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -42,7 +42,7 @@ import grequests from xml.dom.minidom import parseString -from master_driver.interfaces import BaseInterface, BaseRegister, BasicRevert +from platform_driver.interfaces import BaseInterface, BaseRegister, BasicRevert #Logging is completely configured by now. _log = logging.getLogger(__name__) diff --git a/services/core/MasterDriverAgent/master_driver/interfaces/radiothermostat.py b/services/core/PlatformDriverAgent/platform_driver/interfaces/radiothermostat.py similarity index 99% rename from services/core/MasterDriverAgent/master_driver/interfaces/radiothermostat.py rename to services/core/PlatformDriverAgent/platform_driver/interfaces/radiothermostat.py index 028f4e4b45..1c0791360c 100644 --- a/services/core/MasterDriverAgent/master_driver/interfaces/radiothermostat.py +++ b/services/core/PlatformDriverAgent/platform_driver/interfaces/radiothermostat.py @@ -37,7 +37,7 @@ """ -from master_driver.interfaces import BaseInterface, BaseRegister, DriverInterfaceError +from platform_driver.interfaces import BaseInterface, BaseRegister, DriverInterfaceError from . import thermostat_api from volttron.platform import jsonapi diff --git a/services/core/MasterDriverAgent/master_driver/interfaces/rainforesteagle.py b/services/core/PlatformDriverAgent/platform_driver/interfaces/rainforesteagle.py similarity index 98% rename from services/core/MasterDriverAgent/master_driver/interfaces/rainforesteagle.py rename to services/core/PlatformDriverAgent/platform_driver/interfaces/rainforesteagle.py index b9891208f5..4f88f3eeb7 100644 --- a/services/core/MasterDriverAgent/master_driver/interfaces/rainforesteagle.py +++ b/services/core/PlatformDriverAgent/platform_driver/interfaces/rainforesteagle.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- {{{ # vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et: # -# Copyright 2019, Battelle Memorial Institute. +# Copyright 2020, Battelle Memorial Institute. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -40,7 +40,7 @@ import logging import requests -from master_driver.interfaces import (BaseInterface, +from platform_driver.interfaces import (BaseInterface, BaseRegister, BasicRevert, DriverInterfaceError) diff --git a/services/core/MasterDriverAgent/master_driver/interfaces/rainforestemu2.py b/services/core/PlatformDriverAgent/platform_driver/interfaces/rainforestemu2.py similarity index 98% rename from services/core/MasterDriverAgent/master_driver/interfaces/rainforestemu2.py rename to services/core/PlatformDriverAgent/platform_driver/interfaces/rainforestemu2.py index 10288d25d0..df69140498 100644 --- a/services/core/MasterDriverAgent/master_driver/interfaces/rainforestemu2.py +++ b/services/core/PlatformDriverAgent/platform_driver/interfaces/rainforestemu2.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- {{{ # vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et: # -# Copyright 2019, Battelle Memorial Institute. +# Copyright 2020, Battelle Memorial Institute. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -42,7 +42,7 @@ import time import sys -from master_driver.interfaces import (BaseInterface, +from platform_driver.interfaces import (BaseInterface, BaseRegister, BasicRevert, DriverInterfaceError) diff --git a/services/core/MasterDriverAgent/master_driver/interfaces/restful.py b/services/core/PlatformDriverAgent/platform_driver/interfaces/restful.py similarity index 97% rename from services/core/MasterDriverAgent/master_driver/interfaces/restful.py rename to services/core/PlatformDriverAgent/platform_driver/interfaces/restful.py index fed5cf99b8..27ab28ba86 100644 --- a/services/core/MasterDriverAgent/master_driver/interfaces/restful.py +++ b/services/core/PlatformDriverAgent/platform_driver/interfaces/restful.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- {{{ # vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et: # -# Copyright 2019, Battelle Memorial Institute. +# Copyright 2020, Battelle Memorial Institute. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -39,7 +39,7 @@ import logging import requests -from master_driver.interfaces import BaseInterface, BaseRegister, BasicRevert +from platform_driver.interfaces import BaseInterface, BaseRegister, BasicRevert _log = logging.getLogger(__name__) diff --git a/services/core/MasterDriverAgent/master_driver/interfaces/ted_meter/__init__.py b/services/core/PlatformDriverAgent/platform_driver/interfaces/ted_meter/__init__.py similarity index 99% rename from services/core/MasterDriverAgent/master_driver/interfaces/ted_meter/__init__.py rename to services/core/PlatformDriverAgent/platform_driver/interfaces/ted_meter/__init__.py index 92431acecf..ab7f739184 100644 --- a/services/core/MasterDriverAgent/master_driver/interfaces/ted_meter/__init__.py +++ b/services/core/PlatformDriverAgent/platform_driver/interfaces/ted_meter/__init__.py @@ -37,7 +37,7 @@ import grequests from volttron.platform.agent import utils -from master_driver.interfaces import BaseRegister, BaseInterface, BasicRevert +from ...interfaces import BaseRegister, BaseInterface, BasicRevert TED_METER_LOGGER = logging.getLogger("ted_meter") @@ -161,7 +161,7 @@ def __init__(self, **kwargs): self.device_path = kwargs.get("device_path") def configure(self, config_dict, registry_config_str): - """Configure method called by the master driver with configuration + """Configure method called by the platform driver with configuration stanza and registry config file, we ignore the registry config, as we build the registers based on the configuration collected from TED Pro Device diff --git a/services/core/MasterDriverAgent/master_driver/interfaces/thermostat_api.py b/services/core/PlatformDriverAgent/platform_driver/interfaces/thermostat_api.py similarity index 100% rename from services/core/MasterDriverAgent/master_driver/interfaces/thermostat_api.py rename to services/core/PlatformDriverAgent/platform_driver/interfaces/thermostat_api.py diff --git a/services/core/PlatformDriverAgent/platform_driver/interfaces/universal.py b/services/core/PlatformDriverAgent/platform_driver/interfaces/universal.py new file mode 100644 index 0000000000..3ee2b70350 --- /dev/null +++ b/services/core/PlatformDriverAgent/platform_driver/interfaces/universal.py @@ -0,0 +1,278 @@ +# -*- coding: utf-8 -*- {{{ +# vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et: +# +# Copyright 2020, Battelle Memorial Institute. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# This material was prepared as an account of work sponsored by an agency of +# the United States Government. Neither the United States Government nor the +# United States Department of Energy, nor Battelle, nor any of their +# employees, nor any jurisdiction or organization that has cooperated in the +# development of these materials, makes any warranty, express or +# implied, or assumes any legal liability or responsibility for the accuracy, +# completeness, or usefulness or any information, apparatus, product, +# software, or process disclosed, or represents that its use would not infringe +# privately owned rights. Reference herein to any specific commercial product, +# process, or service by trade name, trademark, manufacturer, or otherwise +# does not necessarily constitute or imply its endorsement, recommendation, or +# favoring by the United States Government or any agency thereof, or +# Battelle Memorial Institute. The views and opinions of authors expressed +# herein do not necessarily state or reflect those of the +# United States Government or any agency thereof. +# +# PACIFIC NORTHWEST NATIONAL LABORATORY operated by +# BATTELLE for the UNITED STATES DEPARTMENT OF ENERGY +# under Contract DE-AC05-76RL01830 +# }}} + +''' + +------------------------------------------------------------------------------- + History + 03/30/16 - Initial. + 08/15/16 - Remove whitespace in config file. + 10/11/16 - Pass only device_id to VehicleDriver. + 03/01/17 - Call agent.GetPoint in get_point. + 04/17/17 - Updated for Volttron 4.0. +------------------------------------------------------------------------------- +''' +__author1__ = 'Carl Miller ' +__copyright__ = 'Copyright (c) 2019, Battelle Memorial Institute' +__license__ = 'Apache 2.0' +__version__ = '0.2.0' + +from volttron.platform.agent import utils + +try: + from ..interfaces import BaseRegister, BaseInterface, BasicRevert +except: + from services.core.PlatformDriverAgent.platform_driver.interfaces import BaseInterface, BaseRegister, BasicRevert + +from csv import DictReader +# from StringIO import StringIO - python 2 +from io import StringIO # python 3 +import gevent +import logging +import sys + +# set DRIVER_PATH to path to your specific driver agent +DRIVER_PATH = "/home/volttron/GridAgents/VolttronAgents/Drivers" +sys.path.insert(0, DRIVER_PATH) +from heaters.agent import HeaterDriver +from meters.agent import MeterDriver +from hvac.agent import ThermostatDriver +from blinds.agent import BlindsDriver +from vehicles.agent import VehicleDriver + +_log = logging.getLogger(__name__) + + +# UDI - Universal Driver Interface +class Interface(BasicRevert, BaseInterface): + def __init__(self, **kwargs): + super(Interface, self).__init__(**kwargs) + # the following are new in bacnet 4.0 driver, do we need to do too? + # self.register_count = 10000 + # self.register_count_divisor = 1 + + self.agent = None + import argparse + parser = argparse.ArgumentParser() + parser.add_argument('-v', '--verbose', action='count', dest="verbosity", default=0) + args = parser.parse_args() + self._verboseness = args.verbosity + + if (self._verboseness == 0): + verbiage = logging.ERROR + if (self._verboseness == 1): + verbiage = logging.WARNING # '-v' + elif (self._verboseness == 2): + verbiage = logging.INFO # '-vv' + elif (self._verboseness >= 3): + verbiage = logging.DEBUG # '-vvv' + _log.setLevel(verbiage) + + ''' + config_dict: 'filename'.config, specified in the 'platform-driver.agent' file. + registry_config_str: points csv file + def configure(self, config_dict, registry_config_str): + when 4.0 platform driver is started, class ConfigStore is instantiated: + volttron/platform/vip/agent/subsystems/configstore.py which exports initial_update() + which calls volttron/platform/store.py: def get_configs(self): + self.vip.rpc.call(identity, "config.initial_update" sets list of registry_configs + + scripts/install_platform_driver_configs.py calls 'manage_store' rpc, which is in volttron/platform/store.py + which calls process_raw_config(), which stores it as a dict. + process_raw_config() is also called by process_store() in store.py + when the platform starts ( class ConfigStoreService): + processing_raw_config 'registry_configs/meter.csv' (config_type: csv) + process_store() is called by _setup using a 'PersistentDict', i.e.: + store_path '/home/carl/.volttron/configuration_store/platform.driver.store' + + install_platform_driver_configs.py stores them as config_type="csv", it is useful for batch processing alot + of files at once, like when upgrading from 3.5 to 4.0 + + to add single config to store, activate and start platform then: + List current configs: + volttron-ctl config list platform.driver + config + devices/PNNL/LABHOME_B/METER1 + registry_configs/meter.csv + Delete current configs: + volttron-ctl config delete platform.driver registry_configs/meter.csv # note lack of prefix './GridAgents/configs/' + volttron-ctl config delete platform.driver devices/PNNL/LABHOME_B/METER1 + To store the driver configuration run the command: + delete any files from ../GridAgents/configs + volttron-ctl config store platform.driver devices/PNNL/LABHOME_B ../GridAgents/configs/devices/PNNL/LABHOME_B/METER1 + + To store the registry configuration run the command (note the **--raw option) + volttron-ctl config store platform.driver registry_configs/meter.csv ../GridAgents/configs/registry_configs/meter.csv --raw + + ***** NOTE: you MUST install the csv file in --raw mode for universal drivers. ***** + + ''' + + def configure(self, config_dict, registry_config_dict): # 4.0 passes in a reg DICT not string now + try: + device_type = config_dict['device_type'] + ''' see ./volttron/volttron/platform/vip/agent/__init__.py for Agent object definition + every agent has a .core and .vip: + vip.ping + vip.rpc + vip.hello + vip.pubsub + vip.health + vip.heartbeat + vip.config + ''' + if (device_type == "heater"): + self.agent = HeaterDriver(None, config_dict['device_id']) + elif (device_type == "meter"): + self.agent = MeterDriver(None, config_dict['device_id'], ) + elif (device_type == "thermostat"): + self.agent = ThermostatDriver(None, config_dict['device_id']) + elif (device_type == "blinds"): + self.agent = BlindsDriver(None, config_dict['device_id']) + elif (device_type == "vehicle"): + self.agent = VehicleDriver(None, config_dict['device_id']) + else: + raise RuntimeError("Unsupported Device Type: '{}'".format(device_type)) + + self.parse_config(self.agent, device_type, config_dict, registry_config_dict) + + event = gevent.event.Event() + gevent.spawn(self.agent.core.run, event) + event.wait(timeout=5) + + except KeyError as e: + _log.fatal("configure Failed accessing Key({}) in configuration file: {}".format(e, config_dict)) + raise SystemExit + + except RuntimeError as e: + _log.fatal("configure Failed using configuration file: {}".format(config_dict)) + raise SystemExit(e) + + except Exception as e: + _log.fatal("configure Failed({}) using configuration file: {}".format(e, config_dict)) + raise SystemExit + + # get_point + def get_point(self, point_name): + register = self.get_register_by_name(point_name) + value = self.agent.GetPoint(register) + # if( self._verboseness == 2 ): + # _log.debug( "Universal get_point called for '{}', value: {}.".format(point_name, value)) + return value + + # _set_point + def _set_point(self, point_name, value): + register = self.get_register_by_name(point_name) + if register.read_only: + raise IOError("Trying to write to a point configured read only: " + point_name) + + if (self.agent.SetPoint(register, value)): + register._value = register.reg_type(value) + self.point_map[point_name]._value = register._value + return register._value + + # this gets called periodically via DriverAgent::periodic_read() + # ( on behalf of PlatformDriverAgent ) + def _scrape_all(self): + result = {} + read_registers = self.get_registers_by_type("byte", True) + write_registers = self.get_registers_by_type("byte", False) + for register in read_registers + write_registers: + if (self._verboseness == 2): + _log.info("Universal Scraping Value for '{}': {}".format(register.point_name, register._value)) + result[register.point_name] = register._value + return result + + # this set each register to its default value (if it has one) + def _reset_all(self): + read_registers = self.get_registers_by_type("byte", True) + write_registers = self.get_registers_by_type("byte", False) + for register in read_registers + write_registers: + old_value = register._value + register._value = register._default_value + # _log.info( "point_map[register]._value = {}".format(self.point_map[register.point_name]._value)) + if (self._verboseness == 2): + _log.info("Hardware not reachable, Resetting Value for '{}' from {} to {}".format(register.point_name, + old_value, + register._value)) + + ''' + We maybe could have used revert_point( register.point_name ), but that is more for reverting the hardware to its default + value (calls set_point, which complains for read_only points), _reset_all is used to set the registry values to a default + when the hardware is not reachable.... + + if register in self.defaults: + self.point_map[register]._value = self.defaults[register] + if( self._verboseness == 2 ): + _log.info( "Universal Resetting Value for '{}' from {} to {}".format(register.point_name, old_value, register._value)) + else: + if( self._verboseness == 2 ): + _log.info( "No Default Value Found while Resetting '{}'.".format(register.point_name)) + ''' + + ''' + parse_config + ***** NOTE: you MUST install the csv file in --raw mode for universal drivers. ***** + volttron-ctl config store platform.driver registry_configs/meter.csv + ../GridAgents/configs/registry_configs/meter.csv --raw + ''' + + def parse_config(self, agent, device_type, config_dict, reg_config_str): + if reg_config_str is None: + return + + config_str = (utils.strip_comments(reg_config_str).lstrip()).rstrip() + + import re + # remove whitespace after delimiter, but not within delimited value: + config_str = re.sub(r',[\s]+', ',', config_str) + + # remove trailing whitespace within delimited value: + config_str = re.sub(r'[\s]+,', ',', config_str) + + # remove trailing whitespace at end of line: + # re.MULTILINE - When specified, '^' matches the beginning of the string andbeginning of each line (immediately following each newline) + # and '$' matches end of the string and end of each line (immediately preceding each newline). + config_str = re.sub(r'[\s]+$', '', config_str, flags=re.MULTILINE) + + _log.debug('Configuring {} Driver with {} and config_str {}'.format(device_type, config_dict, config_str)) + + f = StringIO(config_str) + regDict = DictReader(f) + + agent.ConfigureAgent(self, config_dict, regDict) diff --git a/services/core/MasterDriverAgent/requirements.txt b/services/core/PlatformDriverAgent/requirements.txt similarity index 100% rename from services/core/MasterDriverAgent/requirements.txt rename to services/core/PlatformDriverAgent/requirements.txt diff --git a/services/core/MasterDriverAgent/setup.py b/services/core/PlatformDriverAgent/setup.py similarity index 98% rename from services/core/MasterDriverAgent/setup.py rename to services/core/PlatformDriverAgent/setup.py index eaf3948ea7..cc64769bff 100644 --- a/services/core/MasterDriverAgent/setup.py +++ b/services/core/PlatformDriverAgent/setup.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- {{{ # vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et: # -# Copyright 2019, Battelle Memorial Institute. +# Copyright 2020, Battelle Memorial Institute. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/services/core/MasterDriverAgent/tests/restful.csv b/services/core/PlatformDriverAgent/tests/restful.csv similarity index 100% rename from services/core/MasterDriverAgent/tests/restful.csv rename to services/core/PlatformDriverAgent/tests/restful.csv diff --git a/services/core/MasterDriverAgent/tests/test_device_groups.py b/services/core/PlatformDriverAgent/tests/test_device_groups.py similarity index 88% rename from services/core/MasterDriverAgent/tests/test_device_groups.py rename to services/core/PlatformDriverAgent/tests/test_device_groups.py index e718c87c8c..b7e14b313d 100644 --- a/services/core/MasterDriverAgent/tests/test_device_groups.py +++ b/services/core/PlatformDriverAgent/tests/test_device_groups.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- {{{ # vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et: # -# Copyright 2019, Battelle Memorial Institute. +# Copyright 2020, Battelle Memorial Institute. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -37,16 +37,15 @@ # }}} """ -py.test cases for global master driver settings. +py.test cases for global platform driver settings. """ import pytest +import gevent from volttron.platform import get_services_core -from volttrontesting.utils.platformwrapper import start_wrapper_platform from volttron.platform.agent.known_identities import CONFIGURATION_STORE, PLATFORM_DRIVER -import gevent -from volttron.platform.vip.agent import Agent, PubSub +from volttron.platform.vip.agent import Agent from volttron.platform.messaging import topics from volttron.platform.agent.utils import parse_timestamp_string @@ -78,12 +77,9 @@ def add_result(self, peer, sender, bus, topic, headers, message): @pytest.fixture(scope="module") def subscriber_agent(request, volttron_instance): - agent = volttron_instance.build_agent(identity='subscriber_agent', - agent_class=_subscriber_agent) + agent = volttron_instance.build_agent(identity='subscriber_agent', agent_class=_subscriber_agent) - agent.vip.pubsub.subscribe(peer='pubsub', - prefix=topics.DRIVER_TOPIC_BASE, - callback=agent.add_result).get() + agent.vip.pubsub.subscribe(peer='pubsub', prefix=topics.DRIVER_TOPIC_BASE, callback=agent.add_result).get() yield agent @@ -103,7 +99,7 @@ def subscriber_agent(request, volttron_instance): }} """ -master_driver_config = """ +platform_driver_config = """ {{ "driver_scrape_interval": 0.1, "group_offset_interval": {interval}, @@ -125,22 +121,22 @@ def subscriber_agent(request, volttron_instance): def config_store_connection(request, volttron_instance): capabilities = [{'edit_config_store': {'identity': PLATFORM_DRIVER}}] connection = volttron_instance.build_connection(peer=CONFIGURATION_STORE, capabilities=capabilities) - # Reset master driver config store + # Reset platform driver config store connection.call("manage_delete_store", PLATFORM_DRIVER) - # Start the master driver agent which would in turn start the fake driver + # Start the platform driver agent which would in turn start the fake driver # using the configs created above - master_uuid = volttron_instance.install_agent( - agent_dir=get_services_core("MasterDriverAgent"), + platform_uuid = volttron_instance.install_agent( + agent_dir=get_services_core("PlatformDriverAgent"), config_file={}, start=True) - print("agent id: ", master_uuid) + print("agent id: ", platform_uuid) gevent.sleep(2) # wait for the agent to start and start the devices yield connection - volttron_instance.stop_agent(master_uuid) - volttron_instance.remove_agent(master_uuid) + volttron_instance.stop_agent(platform_uuid) + volttron_instance.remove_agent(platform_uuid) connection.kill() @@ -151,7 +147,7 @@ def config_store(request, config_store_connection): config_store_connection.call("manage_store", PLATFORM_DRIVER, "fake.csv", registry_config_string, config_type="csv") yield config_store_connection - # Reset master driver config store + # Reset platform driver config store print("Wiping out store.") config_store_connection.call("manage_delete_store", PLATFORM_DRIVER) gevent.sleep(0.1) @@ -170,7 +166,7 @@ def remove_config(config_store, config_name): @pytest.mark.driver def test_no_groups(config_store, subscriber_agent): - setup_config(config_store, "config", master_driver_config, interval=0) + setup_config(config_store, "config", platform_driver_config, interval=0) setup_config(config_store, "devices/fake0", fake_device_config, group=0) setup_config(config_store, "devices/fake1", fake_device_config, group=0) setup_config(config_store, "devices/fake2", fake_device_config, group=0) @@ -189,7 +185,7 @@ def test_no_groups(config_store, subscriber_agent): @pytest.mark.driver def test_groups_no_interval(config_store, subscriber_agent): - setup_config(config_store, "config", master_driver_config, interval=0) + setup_config(config_store, "config", platform_driver_config, interval=0) setup_config(config_store, "devices/fake0", fake_device_config, group=0) setup_config(config_store, "devices/fake1", fake_device_config, group=1) setup_config(config_store, "devices/fake2", fake_device_config, group=2) @@ -208,7 +204,7 @@ def test_groups_no_interval(config_store, subscriber_agent): @pytest.mark.driver def test_groups_interval(config_store, subscriber_agent): - setup_config(config_store, "config", master_driver_config, interval=0.5) + setup_config(config_store, "config", platform_driver_config, interval=0.5) setup_config(config_store, "devices/fake0", fake_device_config, group=0) setup_config(config_store, "devices/fake1", fake_device_config, group=1) setup_config(config_store, "devices/fake2", fake_device_config, group=1) @@ -227,7 +223,7 @@ def test_groups_interval(config_store, subscriber_agent): @pytest.mark.driver def test_add_remove_drivers(config_store, subscriber_agent): - setup_config(config_store, "config", master_driver_config, interval=0.5) + setup_config(config_store, "config", platform_driver_config, interval=0.5) setup_config(config_store, "devices/fake0_0", fake_device_config, group=0) setup_config(config_store, "devices/fake0_1", fake_device_config, group=0) setup_config(config_store, "devices/fake0_2", fake_device_config, group=0) diff --git a/services/core/MasterDriverAgent/tests/test_dnp3_driver.py b/services/core/PlatformDriverAgent/tests/test_dnp3_driver.py similarity index 92% rename from services/core/MasterDriverAgent/tests/test_dnp3_driver.py rename to services/core/PlatformDriverAgent/tests/test_dnp3_driver.py index 67a8db0c46..64b013b60f 100644 --- a/services/core/MasterDriverAgent/tests/test_dnp3_driver.py +++ b/services/core/PlatformDriverAgent/tests/test_dnp3_driver.py @@ -65,7 +65,7 @@ @pytest.fixture(scope="module") def agent(request, volttron_instance): - """Build MasterDriverAgent and add DNP3 driver config to it.""" + """Build PlatformDriverAgent and add DNP3 driver config to it.""" test_agent = volttron_instance.build_agent() @@ -83,11 +83,11 @@ def update_config(agent_id, name, value, cfg_type): vip_identity=DNP3_AGENT_ID, start=True) - # Build and start MasterDriverAgent + # Build and start PlatformDriverAgent test_agent.vip.rpc.call('config.store', 'manage_delete_store', PLATFORM_DRIVER) - master_uuid = volttron_instance.install_agent(agent_dir=get_services_core("MasterDriverAgent"), + platform_uuid = volttron_instance.install_agent(agent_dir=get_services_core("PlatformDriverAgent"), config_file={}, start=True) @@ -98,7 +98,7 @@ def update_config(agent_id, name, value, cfg_type): gevent.sleep(3) def stop(): - volttron_instance.stop_agent(master_uuid) + volttron_instance.stop_agent(platform_uuid) volttron_instance.stop_agent(dnp3_agent_uuid) test_agent.core.stop() diff --git a/services/core/PlatformDriverAgent/tests/test_driver.py b/services/core/PlatformDriverAgent/tests/test_driver.py new file mode 100644 index 0000000000..62f9854875 --- /dev/null +++ b/services/core/PlatformDriverAgent/tests/test_driver.py @@ -0,0 +1,404 @@ +# -*- coding: utf-8 -*- {{{ +# vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et: +# +# Copyright 2020, Battelle Memorial Institute. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# This material was prepared as an account of work sponsored by an agency of +# the United States Government. Neither the United States Government nor the +# United States Department of Energy, nor Battelle, nor any of their +# employees, nor any jurisdiction or organization that has cooperated in the +# development of these materials, makes any warranty, express or +# implied, or assumes any legal liability or responsibility for the accuracy, +# completeness, or usefulness or any information, apparatus, product, +# software, or process disclosed, or represents that its use would not infringe +# privately owned rights. Reference herein to any specific commercial product, +# process, or service by trade name, trademark, manufacturer, or otherwise +# does not necessarily constitute or imply its endorsement, recommendation, or +# favoring by the United States Government or any agency thereof, or +# Battelle Memorial Institute. The views and opinions of authors expressed +# herein do not necessarily state or reflect those of the +# United States Government or any agency thereof. +# +# PACIFIC NORTHWEST NATIONAL LABORATORY operated by +# BATTELLE for the UNITED STATES DEPARTMENT OF ENERGY +# under Contract DE-AC05-76RL01830 +# }}} + +import logging +import contextlib +from datetime import datetime, date, time +from mock import create_autospec + +import pytest +import pytz + +from platform_driver import agent +from platform_driver.agent import DriverAgent +from platform_driver.interfaces import BaseInterface +from platform_driver.interfaces.fakedriver import Interface as FakeInterface +from volttrontesting.utils.utils import AgentMock +from volttron.platform.vip.agent import Agent +from volttron.platform.messaging.utils import Topic +from volttron.platform.vip.agent.core import ScheduledEvent + + +agent._log = logging.getLogger("test_logger") +DriverAgent.__bases__ = (AgentMock.imitate(Agent, Agent()),) + + +@pytest.mark.driver_unit +def test_update_publish_types_should_only_set_depth_first_to_true(): + publish_depth_first_all = True + publish_breadth_first_all = True + publish_depth_first = True + publish_breadth_first = True + + with get_driver_agent() as driver_agent: + driver_agent.update_publish_types(publish_depth_first_all, publish_breadth_first_all, + publish_depth_first, publish_breadth_first) + + assert not driver_agent.publish_depth_first_all + assert not driver_agent.publish_breadth_first_all + assert driver_agent.publish_depth_first + assert not driver_agent.publish_breadth_first + + +@pytest.mark.driver_unit +@pytest.mark.parametrize("time_slot, driver_scrape_interval, group, group_offset_interval, " + "expected_time_slot_offset, expected_group", + [(60, 2, 0, 3, 0, 0), + (1, 4, 2, 3, 10, 2)]) +def test_update_scrape_schedule_should_set_periodic_event(time_slot, driver_scrape_interval, group, group_offset_interval, + expected_time_slot_offset, expected_group): + with get_driver_agent(has_periodic_read_event=True, has_core_schedule=True) as driver_agent: + driver_agent.update_scrape_schedule(time_slot, driver_scrape_interval, group, group_offset_interval) + + assert driver_agent.group == expected_group + assert driver_agent.time_slot_offset == expected_time_slot_offset + assert isinstance(driver_agent.periodic_read_event, ScheduledEvent) + + +@pytest.mark.driver_unit +def test_update_scrape_schedule_should_return_none_when_no_periodic_read_event(): + time_slot = 1 + driver_scrape_interval = 4 + group = 2 + group_offset = 3 + expected_time_slot_offset = 10 + + with get_driver_agent() as driver_agent: + result = driver_agent.update_scrape_schedule(time_slot, driver_scrape_interval, group, group_offset) + + assert result is None + assert driver_agent.time_slot_offset == expected_time_slot_offset + + +@pytest.mark.driver_unit +@pytest.mark.parametrize("seconds, expected_datetime", [(0, + datetime.combine( + date(2020, 6, 1), + time(5, 30))), + (1, + datetime.combine( + date(2020, 6, 1), + time(5, 31, 4))), + (59, + datetime.combine( + date(2020, 6, 1), + time(5, 31, 4))) + ]) +def test_find_starting_datetime_should_return_new_datetime(seconds, expected_datetime): + # Note: the expected datetime depends on the interval attribute of driver_agent + now = datetime.combine(date(2020, 6, 1), time(5, 30, seconds)) + + with get_driver_agent() as driver_agent: + actual_start_datetime = driver_agent.find_starting_datetime(now) + + assert actual_start_datetime == expected_datetime + + +@pytest.mark.driver_unit +def test_get_interface_should_return_fakedriver_interface(): + driver_type = "fakedriver" + config_dict = {} + config_string = [{"Point Name": "HPWH_Phy0_PowerState", + "Writable": "TRUE", + "Volttron Point Name": "PowerState", + "Units": "1/0", + "Starting Value": "0", + "Type": "int"}] + + with get_driver_agent() as driver_agent: + interface = driver_agent.get_interface(driver_type, config_dict, config_string) + + assert isinstance(interface, FakeInterface) + + +@pytest.mark.driver_unit +def test_starting_should_succeed(): + sender = "somesender" + expected_path_depth = "devices/path/to/my/device/all" + expected_path_breadth = "devices/all/device/my/to/path" + + with get_driver_agent(has_core_schedule=True) as driver_agent: + driver_agent.starting(sender) + + assert driver_agent.all_path_depth == expected_path_depth + assert driver_agent.all_path_breadth == expected_path_breadth + assert isinstance(driver_agent.periodic_read_event, ScheduledEvent) + + +@pytest.mark.driver_unit +def test_setup_device_should_succeed(): + expected_base_topic = Topic("devices/path/to/my/device/{point}") + expected_device_name = Topic("path/to/my/device") + expected_meta_data = {'PowerState': {'units': '1/0', 'type': 'integer', 'tz': 'US/Pacific'}} + + with get_driver_agent() as driver_agent: + driver_agent.setup_device() + + assert driver_agent.base_topic == expected_base_topic + assert driver_agent.device_name == expected_device_name + assert driver_agent.meta_data == expected_meta_data + + +@pytest.mark.driver_unit +def test_periodic_read_should_succeed(): + now = pytz.UTC.localize(datetime.utcnow()) + + with get_driver_agent(has_core_schedule=True, meta_data={"foo": "bar"}, + has_base_topic=True, mock_publish_wrapper=True, + interface_scrape_all={"foo": "bar"}) as driver_agent: + driver_agent.periodic_read(now) + + driver_agent.parent.scrape_starting.assert_called_once() + driver_agent.parent.scrape_ending.assert_called_once() + driver_agent._publish_wrapper.assert_called_once() + assert isinstance(driver_agent.periodic_read_event, ScheduledEvent) + + +@pytest.mark.driver_unit +@pytest.mark.parametrize("scrape_all_response", [{}, Exception()]) +def test_periodic_read_should_return_none_on_scrape_response(scrape_all_response): + now = pytz.UTC.localize(datetime.utcnow()) + + with get_driver_agent(has_core_schedule=True, meta_data={"foo": "bar"}, + mock_publish_wrapper=True, interface_scrape_all=scrape_all_response) as driver_agent: + result = driver_agent.periodic_read(now) + + assert result is None + driver_agent.parent.scrape_starting.assert_called_once() + driver_agent.parent.scrape_ending.assert_not_called() + driver_agent._publish_wrapper.assert_not_called() + assert isinstance(driver_agent.periodic_read_event, ScheduledEvent) + + +@pytest.mark.driver_unit +def test_heart_beat_should_return_none_on_no_heart_beat_point(): + with get_driver_agent() as driver_agent: + result = driver_agent.heart_beat() + + assert result is None + assert not driver_agent.heart_beat_value + driver_agent.interface.set_point.assert_not_called() + + +@pytest.mark.driver_unit +def test_heart_beat_should_set_heart_beat(): + with get_driver_agent(has_heart_beat_point=True) as driver_agent: + driver_agent.heart_beat() + + assert driver_agent.heart_beat_value + driver_agent.interface.set_point.assert_called_once() + + +@pytest.mark.driver_unit +def test_get_paths_for_point_should_return_depth_breadth(): + expected_depth = "foobar/roma" + expected_breadth = "devices/roma" + point = "foobar/roma" + + with get_driver_agent(has_base_topic=True) as driver_agent: + actual_depth, actual_breadth = driver_agent.get_paths_for_point(point) + + assert actual_depth == expected_depth + assert actual_breadth == expected_breadth + + +@pytest.mark.driver_unit +def test_get_point_should_succeed(): + with get_driver_agent() as driver_agent: + driver_agent.get_point("pointname") + + driver_agent.interface.get_point.assert_called_once() + + +@pytest.mark.driver_unit +def test_set_point_should_succeed(): + with get_driver_agent() as driver_agent: + driver_agent.set_point("pointname", "value") + + driver_agent.interface.set_point.assert_called_once() + + +@pytest.mark.driver_unit +def test_scrape_all_should_succeed(): + with get_driver_agent() as driver_agent: + driver_agent.scrape_all() + + driver_agent.interface.scrape_all.assert_called_once() + + +@pytest.mark.driver_unit +def test_get_multiple_points_should_succeed(): + with get_driver_agent() as driver_agent: + driver_agent.get_multiple_points("pointnames") + + driver_agent.interface.get_multiple_points.assert_called_once() + + +@pytest.mark.driver_unit +def test_set_multiple_points_should_succeed(): + with get_driver_agent() as driver_agent: + driver_agent.set_multiple_points("pointnamevalues") + + driver_agent.interface.set_multiple_points.assert_called_once() + + +@pytest.mark.driver_unit +def test_revert_point_should_succeed(): + with get_driver_agent() as driver_agent: + driver_agent.revert_point("pointnamevalues") + + driver_agent.interface.revert_point.assert_called_once() + + +@pytest.mark.driver_unit +def test_revert_all_should_succeed(): + with get_driver_agent() as driver_agent: + driver_agent.revert_all() + + driver_agent.interface.revert_all.assert_called_once() + + +@pytest.mark.driver_unit +def test_publish_cov_value_should_succeed_when_publish_depth_first_is_true(): + point_name = "pointname" + point_values = {"pointname": "value"} + + with get_driver_agent(mock_publish_wrapper=True, + meta_data={"pointname": "values"}, + has_base_topic=True) as driver_agent: + driver_agent.publish_cov_value(point_name, point_values) + + driver_agent._publish_wrapper.assert_called_once() + + +class MockedParent: + def scrape_starting(self, device_name): + pass + + def scrape_ending(self, device_name): + pass + + +class MockedBaseTopic: + def __call__(self, point): + return point + + +class MockedPublishWrapper: + def __call__(self, depth_first_topic, headers, message): + pass + + +@contextlib.contextmanager +def get_driver_agent(has_base_topic: bool = False, + has_periodic_read_event: bool = False, + has_core_schedule: bool = False, + meta_data: dict = None, + mock_publish_wrapper: bool = False, + interface_scrape_all: any = None, + has_heart_beat_point: bool = False): + """ + Creates a Driver Agent and mocks its dependencies to be used for unit testing. + :param has_base_topic: + :param has_periodic_read_event: + :param has_core_schedule: + :param meta_data: + :param mock_publish_wrapper: + :param interface_scrape_all: + :param has_heart_beat_point: + :return: + """ + + parent = create_autospec(MockedParent) + # since parent is a mock and not a real instance of a class, we have to set attributes directly + # create_autospec does not set attributes in a class' constructor + parent.vip = "" + + config = {"driver_config": {}, + "driver_type": "fakedriver", + "registry_config": [{"Point Name": "HPWH_Phy0_PowerState", + "Writable": "TRUE", + "Volttron Point Name": "PowerState", + "Units": "1/0", + "Starting Value": "0", + "Type": "int" + }], + "interval": 60, + "publish_depth_first_all": False, + "publish_breadth_first_all": False, + "publish_depth_first": True, + "publish_breadth_first": False, + "heart_beat_point": "Heartbeat", + "timezone": "US/Pacific", + } + time_slot = 2 + driver_scrape_interval = 2 + device_path = "path/to/my/device" + group = 42 + group_offset_interval = 0 + + driver_agent = DriverAgent(parent, config, time_slot, driver_scrape_interval, device_path, + group, group_offset_interval) + + driver_agent.interface = create_autospec(BaseInterface) + + if interface_scrape_all is not None: + driver_agent.interface.scrape_all.return_value = interface_scrape_all + + if has_base_topic: + driver_agent.base_topic = MockedBaseTopic() + + if has_periodic_read_event: + driver_agent.periodic_read_event = create_autospec(ScheduledEvent) + + if has_core_schedule: + driver_agent.core.schedule.return_value = create_autospec(ScheduledEvent) + driver_agent.core.schedule.cancel = None + + if meta_data is not None: + driver_agent.meta_data = meta_data + + if mock_publish_wrapper: + driver_agent._publish_wrapper = create_autospec(MockedPublishWrapper) + + if has_heart_beat_point: + driver_agent.heart_beat_point = 42 + else: + driver_agent.heart_beat_point = None + + yield driver_agent diff --git a/services/core/MasterDriverAgent/tests/test_driver_bacnet_cov.py b/services/core/PlatformDriverAgent/tests/test_driver_bacnet_cov.py similarity index 82% rename from services/core/MasterDriverAgent/tests/test_driver_bacnet_cov.py rename to services/core/PlatformDriverAgent/tests/test_driver_bacnet_cov.py index 04aa1f8beb..18cecec127 100644 --- a/services/core/MasterDriverAgent/tests/test_driver_bacnet_cov.py +++ b/services/core/PlatformDriverAgent/tests/test_driver_bacnet_cov.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- {{{ # vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et: # -# Copyright 2019, Battelle Memorial Institute. +# Copyright 2020, Battelle Memorial Institute. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -49,6 +49,7 @@ utils.setup_logging() _log = logging.getLogger(__name__) + @pytest.fixture(scope="module") def test_agent(request, volttron_instance): """Dynamic agent for sending rpc calls and listening to the bus""" @@ -72,43 +73,38 @@ def stop_agent(): @pytest.mark.driver def test_cov_update_published(volttron_instance, test_agent): """Tests the functionality of BACnet change of value forwarding in the - Master Driver and driver.py""" - # Reset master driver config store + Platform Driver and driver.py""" + # Reset platform driver config store cmd = ['volttron-ctl', 'config', 'delete', PLATFORM_DRIVER, '--all'] - process = Popen(cmd, env=volttron_instance.env, - stdout=subprocess.PIPE, stderr=subprocess.PIPE) + process = Popen(cmd, env=volttron_instance.env, stdout=subprocess.PIPE, stderr=subprocess.PIPE) result = process.wait() assert result == 0 # Add fake device configuration cmd = ['volttron-ctl', 'config', 'store', PLATFORM_DRIVER, 'fake.csv', 'examples/configurations/drivers/fake.csv', '--csv'] - process = Popen(cmd, env=volttron_instance.env, - stdout=subprocess.PIPE, stderr=subprocess.PIPE) + process = Popen(cmd, env=volttron_instance.env, stdout=subprocess.PIPE, stderr=subprocess.PIPE) result = process.wait() assert result == 0 cmd = ['volttron-ctl', 'config', 'store', PLATFORM_DRIVER, - "devices/fakedriver", 'examples/configurations/drivers/fake.config', - '--json'] - process = Popen(cmd, env=volttron_instance.env, - stdout=subprocess.PIPE, stderr=subprocess.PIPE) + "devices/fakedriver", 'examples/configurations/drivers/fake.config', '--json'] + process = Popen(cmd, env=volttron_instance.env, stdout=subprocess.PIPE, stderr=subprocess.PIPE) result = process.wait() assert result == 0 - # install master driver, start the master driver, which starts the device - master_uuid = volttron_instance.install_agent( - agent_dir=get_services_core("MasterDriverAgent"), + # install platform driver, start the platform driver, which starts the device + platform_uuid = volttron_instance.install_agent( + agent_dir=get_services_core("PlatformDriverAgent"), config_file={}, start=True) - print("agent id: ", master_uuid) + print("agent id: ", platform_uuid) - # tell the master driver to forward the value + # tell the platform driver to forward the value point_name = "PowerState" device_path = "fakedriver" result_dict = {"fake1": "test", "fake2": "test", "fake3": "test"} - test_agent.vip.rpc.call(PLATFORM_DRIVER, 'forward_bacnet_cov_value', - device_path, point_name, result_dict) + test_agent.vip.rpc.call(PLATFORM_DRIVER, 'forward_bacnet_cov_value', device_path, point_name, result_dict) # wait for the publishes to make it to the bus gevent.sleep(2) diff --git a/services/core/MasterDriverAgent/tests/test_eagle.py b/services/core/PlatformDriverAgent/tests/test_eagle.py similarity index 96% rename from services/core/MasterDriverAgent/tests/test_eagle.py rename to services/core/PlatformDriverAgent/tests/test_eagle.py index 489c8f93d1..9dccc6d436 100644 --- a/services/core/MasterDriverAgent/tests/test_eagle.py +++ b/services/core/PlatformDriverAgent/tests/test_eagle.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- {{{ # vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et: # -# Copyright 2019, Battelle Memorial Institute. +# Copyright 2020, Battelle Memorial Institute. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -198,12 +198,12 @@ def agent(volttron_instance): agent = volttron_instance.build_agent(identity="test_agent") capabilities = {'edit_config_store': {'identity': PLATFORM_DRIVER}} volttron_instance.add_capabilities(agent.core.publickey, capabilities) - # Clean out master driver configurations. + # Clean out platform driver configurations. agent.vip.rpc.call(CONFIGURATION_STORE, 'manage_delete_store', PLATFORM_DRIVER).get(timeout=10) - #Add test configurations. + # Add test configurations. agent.vip.rpc.call(CONFIGURATION_STORE, 'manage_store', PLATFORM_DRIVER, @@ -218,11 +218,11 @@ def agent(volttron_instance): register_config_string, "json").get(timeout=10) - master_uuid = volttron_instance.install_agent( - agent_dir=get_services_core("MasterDriverAgent"), + platform_uuid = volttron_instance.install_agent( + agent_dir=get_services_core("PlatformDriverAgent"), config_file={}, start=True) - print("agent id: ", master_uuid) + print("agent id: ", platform_uuid) gevent.sleep(2) # wait for the agent to start and start the devices server = pywsgi.WSGIServer((ip, int(port)), handle) @@ -230,11 +230,10 @@ def agent(volttron_instance): yield agent - volttron_instance.stop_agent(master_uuid) + volttron_instance.stop_agent(platform_uuid) agent.core.stop() server.stop() - def test_NetworkStatus(agent): point = agent.vip.rpc.call(PLATFORM_DRIVER, 'get_point', diff --git a/services/core/MasterDriverAgent/tests/test_ecobee_driver.py b/services/core/PlatformDriverAgent/tests/test_ecobee_driver.py similarity index 98% rename from services/core/MasterDriverAgent/tests/test_ecobee_driver.py rename to services/core/PlatformDriverAgent/tests/test_ecobee_driver.py index d04bfceb11..984e7fe264 100644 --- a/services/core/MasterDriverAgent/tests/test_ecobee_driver.py +++ b/services/core/PlatformDriverAgent/tests/test_ecobee_driver.py @@ -7,7 +7,7 @@ from requests.exceptions import HTTPError import pytest -from services.core.MasterDriverAgent.master_driver.interfaces import ecobee +from services.core.PlatformDriverAgent.platform_driver.interfaces import ecobee from volttron.platform.agent import utils API_KEY = os.environ.get("ECOBEE_KEY") @@ -560,9 +560,9 @@ def test_scrape_all_trigger_refresh(mock_ecobee): # # volttron_instance.install_agent() # -# # create a master driver -# master_driver = volttron_instance.install_agent( -# agent_dir=get_services_core("MasterDriverAgent"), +# # create a platform driver +# platform_driver = volttron_instance.install_agent( +# agent_dir=get_services_core("PlatformDriverAgent"), # start=False, # config_file={ # "publish_breadth_first_all": False, @@ -589,7 +589,7 @@ def test_scrape_all_trigger_refresh(mock_ecobee): # config_type="csv") # # ecobee_driver_config.update(driver_config) -# volttron_instance.start_agent(master_driver) +# volttron_instance.start_agent(platform_driver) # # # the user must validate the pin in the VOLTTRON log for this run using the Ecobee web UI, see docs for details # # this process is allotted 60 seconds, add a couple more to make sure all of the callbacks have had a time to take @@ -601,6 +601,6 @@ def test_scrape_all_trigger_refresh(mock_ecobee): # # # Close agents after test # query_agent.core.stop() -# volttron_instance.stop_agent(master_driver) +# volttron_instance.stop_agent(platform_driver) # TODO integration tests for set point registers diff --git a/services/core/MasterDriverAgent/tests/test_global_override.py b/services/core/PlatformDriverAgent/tests/test_global_override.py similarity index 86% rename from services/core/MasterDriverAgent/tests/test_global_override.py rename to services/core/PlatformDriverAgent/tests/test_global_override.py index 206915aec6..3e88b0bf44 100644 --- a/services/core/MasterDriverAgent/tests/test_global_override.py +++ b/services/core/PlatformDriverAgent/tests/test_global_override.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- {{{ # vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et: # -# Copyright 2019, Battelle Memorial Institute. +# Copyright 2020, Battelle Memorial Institute. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -41,9 +41,9 @@ """ import pytest +import os -from volttron.platform import get_services_core -from volttrontesting.utils.platformwrapper import start_wrapper_platform +from volttron.platform import get_services_core, get_volttron_root from volttron.platform.agent.known_identities import PLATFORM_DRIVER, CONFIGURATION_STORE import gevent from volttron.platform.jsonrpc import RemoteError @@ -52,7 +52,7 @@ TEST1_AGENT = 'test1-agent' SET_FAILURE = 0.0 REVERT_FAILURE = 0.0 -master_uuid = '' +platform_uuid = '' fake_device_config = """ {{ "driver_config": {{}}, @@ -64,7 +64,7 @@ }} """ -master_driver_config = """ +platform_driver_config = """ {{ "driver_scrape_interval": 0.05, "publish_breadth_first_all": false, @@ -85,20 +85,21 @@ def config_store_connection(request, volttron_instance): capabilities = [{'edit_config_store': {'identity': PLATFORM_DRIVER}}] connection = volttron_instance.build_connection(peer=CONFIGURATION_STORE, capabilities=capabilities) - # Reset master driver config store + # Reset platform driver config store connection.call("manage_delete_store", PLATFORM_DRIVER) - # Start the master driver agent which would in turn start the fake driver + # Start the platform driver agent which would in turn start the fake driver # using the configs created above - master_uuid = volttron_instance.install_agent( - agent_dir=get_services_core("MasterDriverAgent"), + global platform_uuid + platform_uuid = volttron_instance.install_agent( + agent_dir=get_services_core("PlatformDriverAgent"), config_file={}, start=True) gevent.sleep(2) # wait for the agent to start and start the devices def stop_agent(): - volttron_instance.stop_agent(master_uuid) - volttron_instance.remove_agent(master_uuid) + volttron_instance.stop_agent(platform_uuid) + volttron_instance.remove_agent(platform_uuid) connection.kill() request.addfinalizer(stop_agent) @@ -111,14 +112,14 @@ def config_store(request, config_store_connection): # Always have fake.csv ready to go. # Add up fake.csv to config store - config_path = "scripts/scalability-testing/fake_unit_testing.csv" + config_path = os.path.join(get_volttron_root(), "scripts/scalability-testing/fake_unit_testing.csv") with open(config_path, 'r') as f: registry_config_string = f.read() - f.closed + config_store_connection.call("manage_store", PLATFORM_DRIVER, "fake.csv", registry_config_string, config_type="csv") def cleanup(): - # Reset master driver config store + # Reset platform driver config store print("Wiping out store.") config_store_connection.call("manage_delete_store", PLATFORM_DRIVER) gevent.sleep(0.1) @@ -137,11 +138,8 @@ def setup_config(config_store, config_name, config_string, **kwargs): @pytest.fixture(scope="module") def test_agent(request, volttron_instance): test_agent = volttron_instance.build_agent(identity=TEST_AGENT) + def stop_agent(): - # result = test_agent.vip.rpc.call( - # PLATFORM_DRIVER, # Target agent - # 'clear_overrides' # Method - # ).get(timeout=10) test_agent.core.stop() # Add a tear down method to stop test agent @@ -151,7 +149,7 @@ def stop_agent(): @pytest.mark.driver def test_set_override(config_store, test_agent): - setup_config(config_store, "config", master_driver_config) + setup_config(config_store, "config", platform_driver_config) for i in range(4): config_name = "devices/fakedriver{}".format(i) setup_config(config_store, config_name, fake_device_config) @@ -182,9 +180,8 @@ def test_set_override(config_store, test_agent): ).get(timeout=10) pytest.fail("Expecting Override Error. Code returned: {}".format(result)) except RemoteError as e: - assert e.exc_info['exc_type'].endswith('OverrideError') - assert e.message == 'Cannot set point on device {} since global override is set'.format( - device_path) + assert e.exc_info['exc_type'] == '__main__.OverrideError' + assert e.message == 'Cannot set point on device {} since global override is set'.format(device_path) try: result = test_agent.vip.rpc.call( @@ -195,14 +192,13 @@ def test_set_override(config_store, test_agent): pytest.fail("Expecting Override Error. Code returned: {}".format(result)) except RemoteError as e: - assert e.exc_info['exc_type'].endswith('OverrideError') - assert e.message == 'Cannot revert device {} since global override is set'.format( - device_path) + assert e.exc_info['exc_type'] == '__main__.OverrideError' + assert e.message == 'Cannot revert device {} since global override is set'.format(device_path) @pytest.mark.driver def test_set_point_after_override_elapsed_interval(config_store, test_agent): - setup_config(config_store, "config", master_driver_config) + setup_config(config_store, "config", platform_driver_config) for i in range(4): config_name = "devices/fakedriver{}".format(i) setup_config(config_store, config_name, fake_device_config) @@ -233,16 +229,15 @@ def test_set_point_after_override_elapsed_interval(config_store, test_agent): ).get(timeout=10) assert result == new_value except RemoteError as e: - assert e.exc_info['exc_type'].endswith('OverrideError') - assert e.message == 'Cannot set point on device {} since global override is set'.format( - device_path) + assert e.exc_info['exc_type'] == '__main__.OverrideError' + assert e.message == 'Cannot set point on device {} since global override is set'.format(device_path) pytest.fail("Expecting successful set point. Code raised OverrideError: {}".format(e.message)) # @pytest.mark.driver def test_set_hierarchical_override(config_store, test_agent): - setup_config(config_store, "config", master_driver_config) + setup_config(config_store, "config", platform_driver_config) for i in range(4): config_name = "devices/fakedriver{}".format(i) setup_config(config_store, config_name, fake_device_config) @@ -257,8 +252,8 @@ def test_set_hierarchical_override(config_store, test_agent): True ).get(timeout=10) + fakedriver1_path = 'fakedriver2' try: - fakedriver1_path = 'fakedriver2' point = 'SampleWritableFloat' value = 12.5 result = test_agent.vip.rpc.call( @@ -270,15 +265,14 @@ def test_set_hierarchical_override(config_store, test_agent): ).get(timeout=10) pytest.fail("Expecting Override Error. Code returned: {}".format(result)) except RemoteError as e: - assert e.exc_info['exc_type'].endswith('OverrideError') - assert e.message == 'Cannot set point on device {} since global override is set'.format( - fakedriver1_path) + assert e.exc_info['exc_type'] == '__main__.OverrideError' + assert e.message == 'Cannot set point on device {} since global override is set'.format(fakedriver1_path) gevent.sleep(4) @pytest.mark.driver def test_set_override_no_revert(config_store, test_agent): - setup_config(config_store, "config", master_driver_config) + setup_config(config_store, "config", platform_driver_config) for i in range(4): config_name = "devices/fakedriver{}".format(i) setup_config(config_store, config_name, fake_device_config) @@ -315,7 +309,7 @@ def test_set_override_no_revert(config_store, test_agent): @pytest.mark.driver def test_set_override_off(config_store, test_agent): - setup_config(config_store, "config", master_driver_config) + setup_config(config_store, "config", platform_driver_config) for i in range(4): config_name = "devices/fakedriver{}".format(i) setup_config(config_store, config_name, fake_device_config) @@ -367,9 +361,8 @@ def test_set_override_off(config_store, test_agent): ).get(timeout=10) assert result == value except RemoteError as e: - assert e.exc_info['exc_type'].endswith('OverrideError') - assert e.message == 'Cannot set point on device {} since global override is set'.format( - device_path) + assert e.exc_info['exc_type'] == '__main__.OverrideError' + assert e.message == 'Cannot set point on device {} since global override is set'.format(device_path) pytest.fail("Expecting successful set point. Code raised OverrideError: {}".format(e.message)) # Get override patterns list @@ -439,13 +432,12 @@ def test_overlapping_override_onoff(config_store, test_agent): ).get(timeout=10) pytest.fail("Expecting Override Error. Code returned : {}".format(result)) except RemoteError as e: - assert e.exc_info['exc_type'].endswith('OverrideError') - assert e.message == 'Cannot set point on device {} since global override is set'.format( - fakedriver1_device_path) + assert e.exc_info['exc_type'] == '__main__.OverrideError' + assert e.message == 'Cannot set point on device {} since global override is set'.format(fakedriver1_device_path) + fakedriver2_device_path = 'fakedriver2' try: # Try to set a point on fakedriver2 - fakedriver2_device_path = 'fakedriver2' result = test_agent.vip.rpc.call( PLATFORM_DRIVER, # Target agent 'set_point', # Method @@ -455,9 +447,8 @@ def test_overlapping_override_onoff(config_store, test_agent): ).get(timeout=10) pytest.fail("Expecting Override Error. Code returned : {}".format(result)) except RemoteError as e: - assert e.exc_info['exc_type'].endswith('OverrideError') - assert e.message == 'Cannot set point on device {} since global override is set'.format( - fakedriver2_device_path) + assert e.exc_info['exc_type'] == '__main__.OverrideError' + assert e.message == 'Cannot set point on device {} since global override is set'.format(fakedriver2_device_path) # Wait for timeout gevent.sleep(6) @@ -473,9 +464,8 @@ def test_overlapping_override_onoff(config_store, test_agent): assert result == new_value print("New value of fake driver2, SampleWritableFloat1: {}".format(new_value)) except RemoteError as e: - assert e.exc_info['exc_type'].endswith('OverrideError') - assert e.message == 'Cannot set point on device {} since global override is set'.format( - fakedriver2_device_path) + assert e.exc_info['exc_type'] == '__main__.OverrideError' + assert e.message == 'Cannot set point on device {} since global override is set'.format(fakedriver2_device_path) pytest.fail("Expecting successful set point. Code raised OverrideError: {}".format(e.message)) @@ -530,13 +520,12 @@ def test_overlapping_override_onoff2(config_store, test_agent): ).get(timeout=10) pytest.fail("Expecting Override Error. Code returned : {}".format(result)) except RemoteError as e: - assert e.exc_info['exc_type'].endswith('OverrideError') - assert e.message == 'Cannot set point on device {} since global override is set'.format( - fakedriver1_device_path) + assert e.exc_info['exc_type'] == '__main__.OverrideError' + assert e.message == 'Cannot set point on device {} since global override is set'.format(fakedriver1_device_path) + fakedriver2_device_path = 'fakedriver2' try: # Try to set a point on fakedriver2 - fakedriver2_device_path = 'fakedriver2' result = test_agent.vip.rpc.call( PLATFORM_DRIVER, # Target agent 'set_point', # Method @@ -546,9 +535,8 @@ def test_overlapping_override_onoff2(config_store, test_agent): ).get(timeout=10) assert result == new_value except RemoteError as e: - assert e.exc_info['exc_type'].endswith('OverrideError') - assert e.message == 'Cannot set point on device {} since global override is set'.format( - fakedriver2_device_path) + assert e.exc_info['exc_type'] == '__main__.OverrideError' + assert e.message == 'Cannot set point on device {} since global override is set'.format(fakedriver2_device_path) pytest.fail("Expecting successful set point. Code raised OverrideError: {}".format(e.message)) # Wait for timeout @@ -566,9 +554,8 @@ def test_overlapping_override_onoff2(config_store, test_agent): assert result == new_value print("New value of fake driver1, SampleWritableFloat1: {}".format(new_value)) except RemoteError as e: - assert e.exc_info['exc_type'].endswith('OverrideError') - assert e.message == 'Cannot set point on device {} since global override is set'.format( - fakedriver1_device_path) + assert e.exc_info['exc_type'] == '__main__.OverrideError' + assert e.message == 'Cannot set point on device {} since global override is set'.format(fakedriver1_device_path) pytest.fail("Expecting successful set point. Code raised OverrideError: {}".format(e.message)) @@ -614,9 +601,8 @@ def test_duplicate_override_on(config_store, test_agent): ).get(timeout=10) pytest.fail("Expecting Override Error. Code returned : {}".format(result)) except RemoteError as e: - assert e.exc_info['exc_type'].endswith('OverrideError') - assert e.message == 'Cannot set point on device {} since global override is set'.format( - fakedriver1_device_path) + assert e.exc_info['exc_type'] == '__main__.OverrideError' + assert e.message == 'Cannot set point on device {} since global override is set'.format(fakedriver1_device_path) @pytest.mark.driver @@ -660,10 +646,9 @@ def test_indefinite_override_on(config_store, test_agent): ).get(timeout=10) pytest.fail("Expecting Override Error. Code returned : {}".format(result)) except RemoteError as e: - assert e.exc_info['exc_type'].endswith('OverrideError') - assert e.message == 'Cannot set point on device {} since global override is set'.format( - device_path) - result = test_agent.vip.rpc.call( + assert e.exc_info['exc_type'] == '__main__.OverrideError' + assert e.message == 'Cannot set point on device {} since global override is set'.format(device_path) + test_agent.vip.rpc.call( PLATFORM_DRIVER, # Target agent 'clear_overrides' # Method ).get(timeout=10) @@ -671,48 +656,55 @@ def test_indefinite_override_on(config_store, test_agent): @pytest.mark.driver def test_indefinite_override_after_restart(config_store, test_agent, volttron_instance): + + # previously platform UUID hadn't been set, so nothing was being restarted + assert isinstance(platform_uuid, str) and len(platform_uuid) + assert volttron_instance.is_agent_running(platform_uuid) + for i in range(4): config_name = "devices/fakedriver{}".format(i) setup_config(config_store, config_name, fake_device_config) - device_path = 'fakedriver2' + + # start up fake drivers + gevent.sleep(1) # Set override feature on device test_agent.vip.rpc.call( PLATFORM_DRIVER, # Target agent 'set_override_on', # Method - device_path, # Override Pattern + 'fakedriver*', # Override Pattern 0.0, # Indefinite override False, # revert flag to True False ).get(timeout=10) # Give it enough time to set indefinite override. + gevent.sleep(1) + volttron_instance.stop_agent(platform_uuid) gevent.sleep(0.5) - global master_uuid - volttron_instance.stop_agent(master_uuid) - gevent.sleep(0.5) - # Start the master driver agent which would in turn start the fake driver + # Start the platform driver agent which would in turn start the fake driver # using the configs created above - volttron_instance.start_agent(master_uuid) + volttron_instance.start_agent(platform_uuid) gevent.sleep(1) # wait for the agent to start and start the devices + device = 'fakedriver1' + device_path = 'devices/' + device point = 'SampleWritableFloat1' - new_value = 65.5 + try: # Try to set a point on fakedriver1 result = test_agent.vip.rpc.call( PLATFORM_DRIVER, # Target agent 'set_point', # Method - device_path, # device path + device, # device path point, - new_value + 65.5 ).get(timeout=10) pytest.fail("Expecting Override Error. Code returned : {}".format(result)) except RemoteError as e: - assert e.exc_info['exc_type'].endswith('OverrideError') - assert e.message == 'Cannot set point on device {} since global override is set'.format( - device_path) - result = test_agent.vip.rpc.call( + assert e.exc_info['exc_type'] == '__main__.OverrideError' + assert e.message == 'Cannot set point on device {} since global override is set'.format(device) + test_agent.vip.rpc.call( PLATFORM_DRIVER, # Target agent 'clear_overrides' # Method ).get(timeout=10) @@ -720,7 +712,7 @@ def test_indefinite_override_after_restart(config_store, test_agent, volttron_in @pytest.mark.driver def test_override_pattern(config_store, test_agent): - setup_config(config_store, "config", master_driver_config) + setup_config(config_store, "config", platform_driver_config) # add a fake driver config_path = "devices/{}" device_path = "fakedriver1" diff --git a/services/core/MasterDriverAgent/tests/test_global_settings.py b/services/core/PlatformDriverAgent/tests/test_global_settings.py similarity index 88% rename from services/core/MasterDriverAgent/tests/test_global_settings.py rename to services/core/PlatformDriverAgent/tests/test_global_settings.py index e51a1d5139..aa116c8998 100644 --- a/services/core/MasterDriverAgent/tests/test_global_settings.py +++ b/services/core/PlatformDriverAgent/tests/test_global_settings.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- {{{ # vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et: # -# Copyright 2019, Battelle Memorial Institute. +# Copyright 2020, Battelle Memorial Institute. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -37,16 +37,15 @@ # }}} """ -py.test cases for global master driver settings. +py.test cases for global platform driver settings. """ import pytest +import gevent from volttron.platform import get_services_core -from volttrontesting.utils.platformwrapper import start_wrapper_platform from volttron.platform.agent.known_identities import CONFIGURATION_STORE, PLATFORM_DRIVER -import gevent -from volttron.platform.vip.agent import Agent, PubSub +from volttron.platform.vip.agent import Agent from volttron.platform.messaging import topics @@ -70,12 +69,9 @@ def add_result(self, peer, sender, bus, topic, headers, message): @pytest.fixture(scope="module") def subscriber_agent(request, volttron_instance): - agent = volttron_instance.build_agent(identity='subscriber_agent', - agent_class=_subscriber_agent) + agent = volttron_instance.build_agent(identity='subscriber_agent', agent_class=_subscriber_agent) - agent.vip.pubsub.subscribe(peer='pubsub', - prefix=topics.DRIVER_TOPIC_BASE, - callback=agent.add_result).get() + agent.vip.pubsub.subscribe(peer='pubsub', prefix=topics.DRIVER_TOPIC_BASE, callback=agent.add_result).get() def cleanup(): agent.core.stop() @@ -125,7 +121,7 @@ def cleanup(): }} """ -master_driver_config = """ +platform_driver_config = """ {{ "driver_scrape_interval": 0.05, "publish_breadth_first_all": {breadth_all}, @@ -135,7 +131,7 @@ def cleanup(): }} """ -master_driver_config_default = """ +platform_driver_config_default = """ {{ "driver_scrape_interval": 0.05 }} @@ -156,21 +152,21 @@ def cleanup(): def config_store_connection(request, volttron_instance): capabilities = [{'edit_config_store': {'identity': PLATFORM_DRIVER}}] connection = volttron_instance.build_connection(peer=CONFIGURATION_STORE, capabilities=capabilities) - # Reset master driver config store + # Reset platform driver config store connection.call("manage_delete_store", PLATFORM_DRIVER) - # Start the master driver agent which would in turn start the fake driver + # Start the platform driver agent which would in turn start the fake driver # using the configs created above - master_uuid = volttron_instance.install_agent( - agent_dir=get_services_core("MasterDriverAgent"), + platform_uuid = volttron_instance.install_agent( + agent_dir=get_services_core("PlatformDriverAgent"), config_file={}, start=True) - print("agent id: ", master_uuid) + print("agent id: ", platform_uuid) gevent.sleep(2) # wait for the agent to start and start the devices def stop_agent(): - volttron_instance.stop_agent(master_uuid) - volttron_instance.remove_agent(master_uuid) + volttron_instance.stop_agent(platform_uuid) + volttron_instance.remove_agent(platform_uuid) connection.kill() request.addfinalizer(stop_agent) @@ -185,7 +181,7 @@ def config_store(request, config_store_connection): config_store_connection.call("manage_store", PLATFORM_DRIVER, "fake.csv", registry_config_string, config_type="csv") def cleanup(): - # Reset master driver config store + # Reset platform driver config store print("Wiping out store.") config_store_connection.call("manage_delete_store", PLATFORM_DRIVER) gevent.sleep(0.1) @@ -203,7 +199,7 @@ def setup_config(config_store, config_name, config_string, **kwargs): @pytest.mark.driver def test_default_publish(config_store, subscriber_agent): - setup_config(config_store, "config", master_driver_config_default) + setup_config(config_store, "config", platform_driver_config_default) setup_config(config_store, "devices/fake", fake_device_config) subscriber_agent.reset_results() @@ -218,7 +214,7 @@ def test_default_publish(config_store, subscriber_agent): @pytest.mark.driver def test_default_global_off(config_store, subscriber_agent): - setup_config(config_store, "config", master_driver_config, + setup_config(config_store, "config", platform_driver_config, breadth_all="false", depth_all="false", breadth="false", @@ -237,7 +233,7 @@ def test_default_global_off(config_store, subscriber_agent): @pytest.mark.driver def test_default_global_breadth_all(config_store, subscriber_agent): - setup_config(config_store, "config", master_driver_config, + setup_config(config_store, "config", platform_driver_config, breadth_all="true", depth_all="false", breadth="false", @@ -256,7 +252,7 @@ def test_default_global_breadth_all(config_store, subscriber_agent): @pytest.mark.driver def test_default_global_depth_all(config_store, subscriber_agent): - setup_config(config_store, "config", master_driver_config, + setup_config(config_store, "config", platform_driver_config, breadth_all="false", depth_all="true", breadth="false", @@ -275,7 +271,7 @@ def test_default_global_depth_all(config_store, subscriber_agent): @pytest.mark.driver def test_default_global_depth(config_store, subscriber_agent): - setup_config(config_store, "config", master_driver_config, + setup_config(config_store, "config", platform_driver_config, breadth_all="false", depth_all="false", breadth="false", @@ -294,7 +290,7 @@ def test_default_global_depth(config_store, subscriber_agent): @pytest.mark.driver def test_default_global_breadth(config_store, subscriber_agent): - setup_config(config_store, "config", master_driver_config, + setup_config(config_store, "config", platform_driver_config, breadth_all="false", depth_all="false", breadth="true", @@ -313,7 +309,7 @@ def test_default_global_breadth(config_store, subscriber_agent): @pytest.mark.driver def test_default_override_all(config_store, subscriber_agent): - setup_config(config_store, "config", master_driver_config, + setup_config(config_store, "config", platform_driver_config, breadth_all="false", depth_all="false", breadth="false", @@ -337,7 +333,7 @@ def test_default_override_all(config_store, subscriber_agent): @pytest.mark.driver def test_default_override_breadth_all(config_store, subscriber_agent): - setup_config(config_store, "config", master_driver_config, + setup_config(config_store, "config", platform_driver_config, breadth_all="false", depth_all="false", breadth="false", @@ -361,7 +357,7 @@ def test_default_override_breadth_all(config_store, subscriber_agent): @pytest.mark.driver def test_default_override_depth_all(config_store, subscriber_agent): - setup_config(config_store, "config", master_driver_config, + setup_config(config_store, "config", platform_driver_config, breadth_all="false", depth_all="false", breadth="false", @@ -385,7 +381,7 @@ def test_default_override_depth_all(config_store, subscriber_agent): @pytest.mark.driver def test_default_override_depth(config_store, subscriber_agent): - setup_config(config_store, "config", master_driver_config, + setup_config(config_store, "config", platform_driver_config, breadth_all="false", depth_all="false", breadth="false", @@ -409,7 +405,7 @@ def test_default_override_depth(config_store, subscriber_agent): @pytest.mark.driver def test_default_override_breadth(config_store, subscriber_agent): - setup_config(config_store, "config", master_driver_config, + setup_config(config_store, "config", platform_driver_config, breadth_all="false", depth_all="false", breadth="false", @@ -433,7 +429,7 @@ def test_default_override_breadth(config_store, subscriber_agent): @pytest.mark.driver def test_default_override_single_breadth_all(config_store, subscriber_agent): - setup_config(config_store, "config", master_driver_config, + setup_config(config_store, "config", platform_driver_config, breadth_all="false", depth_all="false", breadth="false", @@ -454,7 +450,7 @@ def test_default_override_single_breadth_all(config_store, subscriber_agent): @pytest.mark.driver def test_default_override_single_depth_all(config_store, subscriber_agent): - setup_config(config_store, "config", master_driver_config, + setup_config(config_store, "config", platform_driver_config, breadth_all="false", depth_all="false", breadth="false", @@ -475,7 +471,7 @@ def test_default_override_single_depth_all(config_store, subscriber_agent): @pytest.mark.driver def test_default_override_single_depth(config_store, subscriber_agent): - setup_config(config_store, "config", master_driver_config, + setup_config(config_store, "config", platform_driver_config, breadth_all="false", depth_all="false", breadth="false", @@ -496,7 +492,7 @@ def test_default_override_single_depth(config_store, subscriber_agent): @pytest.mark.driver def test_default_override_single_breadth(config_store, subscriber_agent): - setup_config(config_store, "config", master_driver_config, + setup_config(config_store, "config", platform_driver_config, breadth_all="false", depth_all="false", breadth="false", diff --git a/services/core/MasterDriverAgent/tests/test_modbus_driver.py b/services/core/PlatformDriverAgent/tests/test_modbus_driver.py similarity index 91% rename from services/core/MasterDriverAgent/tests/test_modbus_driver.py rename to services/core/PlatformDriverAgent/tests/test_modbus_driver.py index f930a9936f..586846744e 100644 --- a/services/core/MasterDriverAgent/tests/test_modbus_driver.py +++ b/services/core/PlatformDriverAgent/tests/test_modbus_driver.py @@ -5,9 +5,9 @@ from struct import pack, unpack from volttron.platform import get_services_core, jsonapi -from master_driver.interfaces.modbus_tk.server import Server -from master_driver.interfaces.modbus_tk.client import Client, Field -from master_driver.interfaces.modbus_tk import helpers +from platform_driver.interfaces.modbus_tk.server import Server +from platform_driver.interfaces.modbus_tk.client import Client, Field +from platform_driver.interfaces.modbus_tk import helpers from volttrontesting.utils.utils import get_rand_ip_and_port from volttron.platform.agent.known_identities import PLATFORM_DRIVER @@ -66,15 +66,15 @@ @pytest.fixture(scope="module") def agent(request, volttron_instance): """ - Build MasterDriverAgent, add Modbus driver & csv configurations + Build PlatformDriverAgent, add Modbus driver & csv configurations """ - # Build master driver agent + # Build platform driver agent md_agent = volttron_instance.build_agent(identity="test_md_agent") capabilities = {'edit_config_store': {'identity': PLATFORM_DRIVER}} volttron_instance.add_capabilities(md_agent.core.publickey, capabilities) gevent.sleep(1) - # Clean out master driver configurations + # Clean out platform driver configurations # wait for it to return before adding new config md_agent.vip.rpc.call('config.store', 'manage_delete_store', @@ -96,17 +96,17 @@ def agent(request, volttron_instance): REGISTRY_CONFIG_STRING, config_type='csv') - master_uuid = volttron_instance.install_agent( - agent_dir=get_services_core("MasterDriverAgent"), + platform_uuid = volttron_instance.install_agent( + agent_dir=get_services_core("PlatformDriverAgent"), config_file={}, start=True) gevent.sleep(10) # wait for the agent to start and start the devices def stop(): - """Stop master driver agent + """Stop platform driver agent """ - volttron_instance.stop_agent(master_uuid) + volttron_instance.stop_agent(platform_uuid) md_agent.core.stop() request.addfinalizer(stop) @@ -115,7 +115,7 @@ def stop(): class PPSPi32Client(Client): """ - Define some registers to PPSPi32Client + Define some registers to PPSPi32Client """ def __init__(self, *args, **kwargs): @@ -191,7 +191,7 @@ def modbus_server(request): @pytest.mark.usefixtures("modbus_server") class TestModbusDriver: """ - Regression tests for the modbus driver interface. + Regression tests for the modbus driver interface. """ def get_point(self, agent, point_name): @@ -202,8 +202,7 @@ def get_point(self, agent, point_name): @param point_name: The name of the point to query. @return: The returned value from the RPC call. """ - return agent.vip.rpc.call(PLATFORM_DRIVER, 'get_point', 'modbus', - point_name).get(timeout=10) + return agent.vip.rpc.call(PLATFORM_DRIVER, 'get_point', 'modbus', point_name).get(timeout=10) def set_point(self, agent, point_name, point_value): """ @@ -215,8 +214,7 @@ def set_point(self, agent, point_name, point_value): @param point_value: The value to set on the point. @return: The returned value from the RPC call. """ - return agent.vip.rpc.call(PLATFORM_DRIVER, 'set_point', 'modbus', - point_name, point_value).get(timeout=10) + return agent.vip.rpc.call(PLATFORM_DRIVER, 'set_point', 'modbus', point_name, point_value).get(timeout=10) def scrape_all(self, agent): """ @@ -225,8 +223,7 @@ def scrape_all(self, agent): @param agent: The test Agent. @return: The returned value from the RPC call. """ - return agent.vip.rpc.call(PLATFORM_DRIVER, 'scrape_all', - 'modbus').get(timeout=10) + return agent.vip.rpc.call(PLATFORM_DRIVER, 'scrape_all', 'modbus').get(timeout=10) def test_default_values(self, agent): """ diff --git a/services/core/PlatformDriverAgent/tests/test_platform_driver.py b/services/core/PlatformDriverAgent/tests/test_platform_driver.py new file mode 100644 index 0000000000..1b7a2ea882 --- /dev/null +++ b/services/core/PlatformDriverAgent/tests/test_platform_driver.py @@ -0,0 +1,256 @@ +# -*- coding: utf-8 -*- {{{ +# vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et: +# +# Copyright 2020, Battelle Memorial Institute. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# This material was prepared as an account of work sponsored by an agency of +# the United States Government. Neither the United States Government nor the +# United States Department of Energy, nor Battelle, nor any of their +# employees, nor any jurisdiction or organization that has cooperated in the +# development of these materials, makes any warranty, express or +# implied, or assumes any legal liability or responsibility for the accuracy, +# completeness, or usefulness or any information, apparatus, product, +# software, or process disclosed, or represents that its use would not infringe +# privately owned rights. Reference herein to any specific commercial product, +# process, or service by trade name, trademark, manufacturer, or otherwise +# does not necessarily constitute or imply its endorsement, recommendation, or +# favoring by the United States Government or any agency thereof, or +# Battelle Memorial Institute. The views and opinions of authors expressed +# herein do not necessarily state or reflect those of the +# United States Government or any agency thereof. +# +# PACIFIC NORTHWEST NATIONAL LABORATORY operated by +# BATTELLE for the UNITED STATES DEPARTMENT OF ENERGY +# under Contract DE-AC05-76RL01830 +# }}} + +import contextlib +import logging +import os + +from datetime import datetime + +import pytest +import json +import gevent +import pytest + +from volttron.platform import get_services_core +from volttron.platform.messaging.health import STATUS_GOOD +from platform_driver import agent +from platform_driver.agent import PlatformDriverAgent, OverrideError +from volttrontesting.utils.utils import AgentMock +from volttron.platform.vip.agent import Agent + + +agent._log = logging.getLogger("test_logger") +PlatformDriverAgent.__bases__ = (AgentMock.imitate(Agent, Agent()),) + + +@pytest.mark.driver_unit +@pytest.mark.parametrize("pattern, expected_device_override", [("campus/building1/*", 1), + ("campus/building1/", 1), + ("wrongcampus/building", 0)]) +def test_set_override_on_should_succeed(pattern, expected_device_override): + with get_platform_driver_agent() as platform_driver_agent: + platform_driver_agent.set_override_on(pattern) + + assert len(platform_driver_agent._override_patterns) == 1 + assert len(platform_driver_agent._override_devices) == expected_device_override + platform_driver_agent.vip.config.set.assert_called_once() + + +@pytest.mark.driver_unit +def test_set_override_on_should_succeed_on_definite_duration(): + pattern = "campus/building1/*" + duration = 42.9 + override_interval_events = {"campus/building1/*": None} + + with get_platform_driver_agent(override_interval_events=override_interval_events) as platform_driver_agent: + platform_driver_agent.set_override_on(pattern, duration=duration) + + assert len(platform_driver_agent._override_patterns) == 1 + assert len(platform_driver_agent._override_devices) == 1 + platform_driver_agent.vip.config.set.assert_not_called() + + +@pytest.mark.driver_unit +def test_set_override_off_should_succeed(): + patterns = {"foobar", "device1"} + override_interval_events = {"device1": None} + pattern = "foobar" + + with get_platform_driver_agent(override_interval_events=override_interval_events, patterns=patterns) as platform_driver_agent: + override_patterns_count = len(platform_driver_agent._override_patterns) + + platform_driver_agent.set_override_off(pattern) + + assert len(platform_driver_agent._override_patterns) == override_patterns_count - 1 + platform_driver_agent.vip.config.set.assert_called_once() + + +@pytest.mark.driver_unit +def test_set_override_off_should_raise_override_error(): + with pytest.raises(OverrideError): + with get_platform_driver_agent() as platform_driver_agent: + pattern = "foobar" + + platform_driver_agent.set_override_off(pattern) + + +@pytest.mark.driver_unit +def test_derive_device_topic_should_succeed(): + config_name = "mytopic/foobar_topic" + expected_result = "foobar_topic" + + with get_platform_driver_agent() as platform_driver_agent: + result = platform_driver_agent.derive_device_topic(config_name) + + assert result == expected_result + + +@pytest.mark.driver_unit +def test_stop_driver_should_return_none(): + device_topic = "mytopic/foobar_topic" + + with get_platform_driver_agent() as platform_driver_agent: + result = platform_driver_agent.stop_driver(device_topic) + + assert result is None + + +@pytest.mark.driver_unit +def test_scrape_starting_should_return_none_on_false_scalability_test(): + topic = "mytopic/foobar" + + with get_platform_driver_agent() as platform_driver_agent: + result = platform_driver_agent.scrape_starting(topic) + + assert result is None + + +@pytest.mark.driver_unit +def test_scrape_starting_should_start_new_measurement_on_true_scalability_test(): + topic = "mytopic/foobar" + + with get_platform_driver_agent(scalability_test=True) as platform_driver_agent: + platform_driver_agent.scrape_starting(topic) + + assert platform_driver_agent.current_test_start < datetime.now() + # This should equal the size of the agent's instances + assert len(platform_driver_agent.waiting_to_finish) == 1 + + +@pytest.mark.driver_unit +def test_scrape_ending_should_return_none_on_false_scalability_test(): + topic = "mytopic/foobar" + + with get_platform_driver_agent() as platform_driver_agent: + result = platform_driver_agent.scrape_ending(topic) + assert result is None + + +@pytest.mark.driver_unit +def test_scrape_ending_should_increase_test_results_iterations(): + waiting_to_finish = set() + waiting_to_finish.add("mytopic/foobar") + topic = "mytopic/foobar" + + with get_platform_driver_agent(scalability_test=True, + waiting_to_finish=waiting_to_finish, + current_test_start=datetime.now()) as platform_driver_agent: + platform_driver_agent.scrape_ending(topic) + + assert len(platform_driver_agent.test_results) > 0 + assert platform_driver_agent.test_iterations > 0 + + +@pytest.mark.driver_unit +def test_clear_overrides(): + override_patterns = set("ffdfdsfd") + + with get_platform_driver_agent(override_patterns=override_patterns) as platform_driver_agent: + platform_driver_agent.clear_overrides() + + assert len(platform_driver_agent._override_interval_events) == 0 + assert len(platform_driver_agent._override_devices) == 0 + assert len(platform_driver_agent._override_patterns) == 0 + + +class MockedInstance: + def revert_all(self): + pass + + +@contextlib.contextmanager +def get_platform_driver_agent(override_patterns: set = set(), + override_interval_events: dict = {}, + patterns: dict = None, + scalability_test: bool = None, + waiting_to_finish: set = None, + current_test_start: datetime = None): + driver_config = json.dumps({ + "driver_scrape_interval": 0.05, + "publish_breadth_first_all": False, + "publish_depth_first": False, + "publish_breadth_first": False + }) + + if scalability_test: + platform_driver_agent = PlatformDriverAgent(driver_config, scalability_test=scalability_test) + else: + platform_driver_agent = PlatformDriverAgent(driver_config) + + platform_driver_agent._override_patterns = override_patterns + platform_driver_agent.instances = {"campus/building1/": MockedInstance()} + platform_driver_agent.core.spawn_return_value = None + platform_driver_agent._override_interval_events = override_interval_events + platform_driver_agent._cancel_override_events_return_value = None + platform_driver_agent.vip.config.set.return_value = "" + + if patterns is not None: + platform_driver_agent._override_patterns = patterns + if waiting_to_finish is not None: + platform_driver_agent.waiting_to_finish = waiting_to_finish + if current_test_start is not None: + platform_driver_agent.current_test_start = current_test_start + + try: + yield platform_driver_agent + finally: + platform_driver_agent.vip.reset_mock() + platform_driver_agent._override_patterns.clear() + + +@pytest.mark.driver +def test_default_config(volttron_instance): + """ + Test the default configuration file included with the agent + """ + publish_agent = volttron_instance.build_agent(identity="test_agent") + gevent.sleep(1) + + config_path = os.path.join(get_services_core("PlatformDriverAgent"), "platform-driver.agent") + with open(config_path, "r") as config_file: + config_json = json.load(config_file) + assert isinstance(config_json, dict) + + volttron_instance.install_agent( + agent_dir=get_services_core("PlatformDriverAgent"), + config_file=config_json, + start=True, + vip_identity="health_test") + + assert publish_agent.vip.rpc.call("health_test", "health.get_status").get(timeout=10).get('status') == STATUS_GOOD diff --git a/services/core/MasterDriverAgent/tests/test_rest_driver.py b/services/core/PlatformDriverAgent/tests/test_rest_driver.py similarity index 91% rename from services/core/MasterDriverAgent/tests/test_rest_driver.py rename to services/core/PlatformDriverAgent/tests/test_rest_driver.py index 3af4d17fe1..f93c4a3cb8 100644 --- a/services/core/MasterDriverAgent/tests/test_rest_driver.py +++ b/services/core/PlatformDriverAgent/tests/test_rest_driver.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- {{{ # vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et: # -# Copyright 2019, Battelle Memorial Institute. +# Copyright 2020, Battelle Memorial Institute. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -78,14 +78,14 @@ def handle(env, start_response): @pytest.fixture(scope='module') def agent(request, volttron_instance): agent = volttron_instance.build_agent() - # Clean out master driver configurations. + # Clean out platform driver configurations. capabilities = {'edit_config_store': {'identity': PLATFORM_DRIVER}} volttron_instance.add_capabilities(agent.core.publickey, capabilities) agent.vip.rpc.call(CONFIGURATION_STORE, 'manage_delete_store', PLATFORM_DRIVER).get(timeout=10) - #Add test configurations. + # Add test configurations. agent.vip.rpc.call(CONFIGURATION_STORE, 'manage_store', PLATFORM_DRIVER, @@ -100,18 +100,18 @@ def agent(request, volttron_instance): restful_csv_string, "csv").get(timeout=10) - master_uuid = volttron_instance.install_agent( - agent_dir=get_services_core("MasterDriverAgent"), + platform_uuid = volttron_instance.install_agent( + agent_dir=get_services_core("PlatformDriverAgent"), config_file={}, start=True) - print("agent id: ", master_uuid) + print("agent id: ", platform_uuid) gevent.sleep(2) # wait for the agent to start and start the devices server = pywsgi.WSGIServer((ip, int(port)), handle) server.start() def stop(): - volttron_instance.stop_agent(master_uuid) + volttron_instance.stop_agent(platform_uuid) agent.core.stop() server.stop() @@ -155,10 +155,10 @@ def test_restful_revert(agent): assert point == '42' # revert point - point = agent.vip.rpc.call(PLATFORM_DRIVER, - 'revert_point', - 'campus/building/unit', - 'test_point').get(timeout=10) + agent.vip.rpc.call(PLATFORM_DRIVER, + 'revert_point', + 'campus/building/unit', + 'test_point').get(timeout=10) # get point point = agent.vip.rpc.call(PLATFORM_DRIVER, diff --git a/services/core/MasterDriverAgent/tests/test_revert_mixin.py b/services/core/PlatformDriverAgent/tests/test_revert_mixin.py similarity index 92% rename from services/core/MasterDriverAgent/tests/test_revert_mixin.py rename to services/core/PlatformDriverAgent/tests/test_revert_mixin.py index 7627999362..2247131d9b 100644 --- a/services/core/MasterDriverAgent/tests/test_revert_mixin.py +++ b/services/core/PlatformDriverAgent/tests/test_revert_mixin.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- {{{ # vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et: # -# Copyright 2019, Battelle Memorial Institute. +# Copyright 2020, Battelle Memorial Institute. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -36,8 +36,9 @@ # under Contract DE-AC05-76RL01830 # }}} -from master_driver.interfaces.fakedriver import Interface import pytest + +from platform_driver.interfaces.fakedriver import Interface from volttron.platform.store import process_raw_config registry_config_string = """Point Name,Volttron Point Name,Units,Units Details,Writable,Starting Value,Type,Notes @@ -47,6 +48,7 @@ registry_config = process_raw_config(registry_config_string, config_type="csv") + @pytest.mark.driver def test_revert_point(): interface = Interface() @@ -61,7 +63,8 @@ def test_revert_point(): interface.revert_point("Float") value = interface.get_point("Float") assert value == 50.0 - + + @pytest.mark.driver def test_revert_device(): interface = Interface() @@ -76,7 +79,8 @@ def test_revert_device(): interface.revert_all() value = interface.get_point("Float") assert value == 50.0 - + + @pytest.mark.driver def test_revert_point_no_default(): interface = Interface() @@ -97,7 +101,7 @@ def test_revert_point_no_default(): temp_value = interface.get_point("FloatNoDefault") assert temp_value == initial_value - #Do it twice to make sure it restores state after revert + # Do it twice to make sure it restores state after revert interface.set_point("FloatNoDefault", test_value) temp_value = interface.get_point("FloatNoDefault") assert temp_value == test_value @@ -105,7 +109,8 @@ def test_revert_point_no_default(): interface.revert_point("FloatNoDefault") temp_value = interface.get_point("FloatNoDefault") assert temp_value == initial_value - + + @pytest.mark.driver def test_revert_all_no_default(): interface = Interface() @@ -126,7 +131,7 @@ def test_revert_all_no_default(): temp_value = interface.get_point("FloatNoDefault") assert temp_value == initial_value - #Do it twice to make sure it restores state after revert + # Do it twice to make sure it restores state after revert interface.set_point("FloatNoDefault", test_value) temp_value = interface.get_point("FloatNoDefault") assert temp_value == test_value @@ -134,23 +139,24 @@ def test_revert_all_no_default(): interface.revert_all() temp_value = interface.get_point("FloatNoDefault") assert temp_value == initial_value - + + @pytest.mark.driver def test_revert_no_default_changing_value(): interface = Interface() interface.configure({}, registry_config) initial_value = interface.get_point("FloatNoDefault") - #Initialize the revert value. + # Initialize the revert value. interface.scrape_all() new_value = initial_value + 1.0 - #Manually update the register values to give us something different to revert to. + # Manually update the register values to give us something different to revert to. register = interface.get_register_by_name("FloatNoDefault") register.value = new_value - #Update the revert value. + # Update the revert value. interface.scrape_all() test_value = new_value + 1.0 @@ -165,7 +171,7 @@ def test_revert_no_default_changing_value(): assert temp_value != initial_value - #Do it twice to make sure it restores state after revert + # Do it twice to make sure it restores state after revert interface.set_point("FloatNoDefault", test_value) temp_value = interface.get_point("FloatNoDefault") assert temp_value == test_value diff --git a/services/core/SQLAggregateHistorian/README.md b/services/core/SQLAggregateHistorian/README.md new file mode 100644 index 0000000000..f82f5ec586 --- /dev/null +++ b/services/core/SQLAggregateHistorian/README.md @@ -0,0 +1,108 @@ +# SQL Aggregate Historian + +An aggregate historian computes aggregates of data stored in a given +volttron historian\'s data store. It runs periodically to compute +aggregate data and store it in new tables/collections in the +historian\'s data store. Each historian implementation would use a +corresponding aggregate historian to compute and store aggregates. + +Aggregates can be defined for a specific time interval and can be +calculated for one or more topics. For example, 15 minute average of +topic1 or 15 minute average of values of topic1 and topic2. Current +version of this agent only computes aggregates supported by underlying +data store. When aggregation is done over more than one topic a unique +aggregation topic name should be configured by user. This topic name can +be used in historian\'s query api to query the collected aggregate data. + +Note: This agent doesn\'t not compute dynamic aggregates. It is only +useful when you know what kind of aggregate you would need before hand +and have them be collected periodically so that retrieval of that data +at a later point would be faster + +## Data flow between historian and aggregate historian + +1. Historian collects data from devices and stores it in its data store +2. Aggregate historian periodically queries historian\'s data store for data within configured time period. +3. Aggregate historian computes aggregates and stores it in historian\'s data store +4. Historian\'s query api queries aggregate data when used with additional parameters - agg_type, agg_period + +## Configuration + +``` {.python} +{ + # configuration from mysql historian - START + "connection": { + "type": "mysql", + "params": { + "host": "localhost", + "port": 3306, + "database": "test_historian", + "user": "historian", + "passwd": "historian" + } + }, + # configuration from mysql historian - END + # If you are using a differnt historian(sqlite3, mongo etc.) replace the + # above with connection details from the corresponding historian. + # the rest of the configuration would be the same for all aggregate + # historians + + "aggregations":[ + # list of aggregation groups each with unique aggregation_period and + # list of points that needs to be collected. value of "aggregations" is + # a list. you can configure this agent to collect multiple aggregates. + # aggregation_time_periiod + aggregation topic(s) together uniquely + # identify an aggregation + + { + # can be minutes(m), hours(h), weeks(w), or months(M) + + "aggregation_period": "1m", + + # Should aggregation period align to calendar time periods. + # Default False + # Example, + # if "aggregation_period":"1h" and "use_calendar_time_periods": False + # example periods: 10.15-11.15, 11.15-12.15, 12.15-13.15 etc. + # if "aggregation_period":"1h" and "use_calendar_time_periods": True + # example periods: 10.00-11.00, 11.00-12.00, 12.00-13.00 etc. + + "use_calendar_time_periods": "true", + + # topics to be aggregated + + "points": [ + { + # here since aggregation is done over a single topic name + # same topic name is used for the aggregation topic + "topic_names": ["device1/out_temp"], + "aggregation_type": "sum", + #minimum required records in the aggregation time period for aggregate to be recorded + "min_count": 2 + }, + { + "topic_names": ["device1/in_temp"], + "aggregation_type": "sum", + "min_count": 2 + } + ] + }, + { + "aggregation_period": "2m", + "use_calendar_time_periods": "false", + "points": [ + { + # aggregation over more than one topic so aggregation_topic_name should be specified + "topic_names": ["Building/device/point1", "Building/device/point2"], + "aggregation_topic_name":"building/device/point1_2/month_sum", + "aggregation_type": "avg", + "min_count": 2 + } + ] + } + ] +} +``` + +## See Also +[AggregateHistorianSpec](https://volttron.readthedocs.io/en/develop/developing-volttron/developing-agents/specifications/aggregate.html) diff --git a/services/core/SQLAggregateHistorian/README.rst b/services/core/SQLAggregateHistorian/README.rst deleted file mode 100644 index 5855f9cf48..0000000000 --- a/services/core/SQLAggregateHistorian/README.rst +++ /dev/null @@ -1,121 +0,0 @@ -.. _SQL_Aggregate_Historian: - -======================= -SQL Aggregate Historian -======================= - -An aggregate historian computes aggregates of data stored in a given volttron -historian's data store. It runs periodically to compute aggregate data -and store it in new tables/collections in the historian's data store. Each -historian implementation would use a corresponding aggregate historian to -compute and store aggregates. - -Aggregates can be defined for a specific time interval and can be calculated -for one or more topics. For example, 15 minute average of topic1 or 15 minute -average of values of topic1 and topic2. Current version of this agent only -computes aggregates supported by underlying data store. When aggregation is -done over more than one topic a unique aggregation topic name should be -configured by user. This topic name can be used in historian's query api to -query the collected aggregate data. - -Note: This agent doesn't not compute dynamic aggregates. It is only useful when -you know what kind of aggregate you would need before hand and have them be -collected periodically so that retrieval of that data at a later point would be -faster - -Data flow between historian and aggregate historian ---------------------------------------------------- - - 1. Historian collects data from devices and stores it in its data store - 2. Aggregate historian periodically queries historian's data store for data - within configured time period. - 3. Aggregate historian computes aggregates and stores it in historian's - data store - 3. Historian's query api queries aggregate data when used with additional - parameters - agg_type, agg_period - -Configuration -------------- - -.. code-block:: python - - { - # configuration from mysql historian - START - "connection": { - "type": "mysql", - "params": { - "host": "localhost", - "port": 3306, - "database": "test_historian", - "user": "historian", - "passwd": "historian" - } - }, - # configuration from mysql historian - END - # If you are using a differnt historian(sqlite3, mongo etc.) replace the - # above with connection details from the corresponding historian. - # the rest of the configuration would be the same for all aggregate - # historians - - "aggregations":[ - # list of aggregation groups each with unique aggregation_period and - # list of points that needs to be collected. value of "aggregations" is - # a list. you can configure this agent to collect multiple aggregates. - # aggregation_time_periiod + aggregation topic(s) together uniquely - # identify an aggregation - - { - # can be minutes(m), hours(h), weeks(w), or months(M) - - "aggregation_period": "1m", - - # Should aggregation period align to calendar time periods. - # Default False - # Example, - # if "aggregation_period":"1h" and "use_calendar_time_periods": False - # example periods: 10.15-11.15, 11.15-12.15, 12.15-13.15 etc. - # if "aggregation_period":"1h" and "use_calendar_time_periods": True - # example periods: 10.00-11.00, 11.00-12.00, 12.00-13.00 etc. - - "use_calendar_time_periods": "true", - - # topics to be aggregated - - "points": [ - { - # here since aggregation is done over a single topic name - # same topic name is used for the aggregation topic - "topic_names": ["device1/out_temp"], - "aggregation_type": "sum", - #minimum required records in the aggregation time period for - #aggregate to be recorded - "min_count": 2 - }, - { - "topic_names": ["device1/in_temp"], - "aggregation_type": "sum", - "min_count": 2 - } - ] - }, - { - "aggregation_period": "2m", - "use_calendar_time_periods": "false", - "points": [ - { - # aggregation over more than one topic so - # aggregation_topic_name should be specified - "topic_names": ["Building/device/point1", "Building/device/point2"], - "aggregation_topic_name":"building/device/point1_2/month_sum", - "aggregation_type": "avg", - "min_count": 2 - } - ] - } - ] - } - - -See Also --------- - `AggregateHistorianSpec`_ diff --git a/services/core/SQLAggregateHistorian/Tests/test_sql_aggregate_historian.py b/services/core/SQLAggregateHistorian/Tests/test_sql_aggregate_historian.py new file mode 100644 index 0000000000..0d54341e78 --- /dev/null +++ b/services/core/SQLAggregateHistorian/Tests/test_sql_aggregate_historian.py @@ -0,0 +1,68 @@ +# -*- coding: utf-8 -*- {{{ +# vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et: +# +# Copyright 2020, Battelle Memorial Institute. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# This material was prepared as an account of work sponsored by an agency of +# the United States Government. Neither the United States Government nor the +# United States Department of Energy, nor Battelle, nor any of their +# employees, nor any jurisdiction or organization that has cooperated in the +# development of these materials, makes any warranty, express or +# implied, or assumes any legal liability or responsibility for the accuracy, +# completeness, or usefulness or any information, apparatus, product, +# software, or process disclosed, or represents that its use would not infringe +# privately owned rights. Reference herein to any specific commercial product, +# process, or service by trade name, trademark, manufacturer, or otherwise +# does not necessarily constitute or imply its endorsement, recommendation, or +# favoring by the United States Government or any agency thereof, or +# Battelle Memorial Institute. The views and opinions of authors expressed +# herein do not necessarily state or reflect those of the +# United States Government or any agency thereof. +# +# PACIFIC NORTHWEST NATIONAL LABORATORY operated by +# BATTELLE for the UNITED STATES DEPARTMENT OF ENERGY +# under Contract DE-AC05-76RL01830 +# }}} + +import os +import json +import gevent +import pytest + +from volttron.platform import get_services_core +from volttron.platform.messaging.health import STATUS_GOOD + + +@pytest.mark.sqlhistorian +@pytest.mark.aggregator +def test_default_config(volttron_instance): + """ + Test the default configuration file included with the agent + """ + publish_agent = volttron_instance.build_agent(identity="test_agent") + gevent.sleep(1) + + config_path = os.path.join(get_services_core("SQLAggregateHistorian"), "config") + with open(config_path, "r") as config_file: + config_json = json.load(config_file) + assert isinstance(config_json, dict) + + volttron_instance.install_agent( + agent_dir=get_services_core("SQLAggregateHistorian"), + config_file=config_json, + start=True, + vip_identity="health_test") + + assert publish_agent.vip.rpc.call("health_test", "health.get_status").get(timeout=10).get('status') == STATUS_GOOD diff --git a/services/core/SQLAggregateHistorian/config b/services/core/SQLAggregateHistorian/config index 3773342e0e..72fe1301fb 100644 --- a/services/core/SQLAggregateHistorian/config +++ b/services/core/SQLAggregateHistorian/config @@ -1,17 +1,11 @@ { - # configuration from historian. given example is for mysql - START "connection": { - "type": "mysql", + "type": "sqlite", "params": { - "host": "localhost", - "port": 3306, - "database": "test_historian", - "user": "historian", - "passwd": "historian" + "database": "test.sqlite", + "timeout": 15 } }, - # configuration from historian. given example is for mysql - END - "aggregations":[ { "aggregation_period": "1m", @@ -40,4 +34,4 @@ ] } ] -} \ No newline at end of file +} diff --git a/services/core/SQLAggregateHistorian/conftest.py b/services/core/SQLAggregateHistorian/conftest.py new file mode 100644 index 0000000000..68e5e611b1 --- /dev/null +++ b/services/core/SQLAggregateHistorian/conftest.py @@ -0,0 +1,6 @@ +import sys + +from volttrontesting.fixtures.volttron_platform_fixtures import * + +# Add system path of the agent's directory +sys.path.insert(0, os.path.abspath(os.path.dirname(__file__))) diff --git a/services/core/SQLAggregateHistorian/setup.py b/services/core/SQLAggregateHistorian/setup.py index 9ec0dbcd76..d2a4ab07cd 100644 --- a/services/core/SQLAggregateHistorian/setup.py +++ b/services/core/SQLAggregateHistorian/setup.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- {{{ # vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et: # -# Copyright 2019, Battelle Memorial Institute. +# Copyright 2020, Battelle Memorial Institute. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/services/core/SQLAggregateHistorian/sqlaggregator/aggregator.py b/services/core/SQLAggregateHistorian/sqlaggregator/aggregator.py index 40c6098fd3..72401034cc 100644 --- a/services/core/SQLAggregateHistorian/sqlaggregator/aggregator.py +++ b/services/core/SQLAggregateHistorian/sqlaggregator/aggregator.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- {{{ # vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et: # -# Copyright 2019, Battelle Memorial Institute. +# Copyright 2020, Battelle Memorial Institute. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/services/core/SQLHistorian/README.md b/services/core/SQLHistorian/README.md new file mode 100644 index 0000000000..60289d7dd4 --- /dev/null +++ b/services/core/SQLHistorian/README.md @@ -0,0 +1,218 @@ +# SQLHistorian + +This is a historian agent that writes data to a SQLite, Mysql, Postgres, +TimeScale, or Redshift database based on the connection parameters in +the configuration. The sql historian has been programmed to allow for +inconsistent network connectivity (automatic re-connection to tcp based +databases). All additions to the historian are batched and wrapped +within a transaction with commit and rollback functions properly +implemented. This allows the maximum throughput of data with the most +protection. + +## MySQL + +### Installation notes + +1. In order to support timestamp with microseconds you need at least + MySql 5.6.4. Please see this [MySql + documentation](http://dev.mysql.com/doc/refman/5.6/en/fractional-seconds.html) + for more details +2. The mysql user must have SELECT INSERT, and DELETE privileges to the + historian database tables. +3. SQLHistorianAgent can create the database tables the first time it + runs if the database user has CREATE privileges. But we recommend + this only for development/test environments. For all other use + cases, use the mysql-create\*.sql script to create the tables and + then start agent. This way database user used by VOLTTRON historian + can work with minimum required privileges + +### Dependencies + +In order to use mysql one must install the **mysql-python connector** + +From an activated shell execute +``` +pip install mysql-connector-python-rf +``` +On Ubuntu 16.04 + > pip install does not work. Please download the connector from + > and follow instructions on README + +### Configuration + +The following is a minimal configuration file for using a MySQL based +historian. Other options are available and are documented +. +**Not all mysql connection parameters have been tested, use at your own +risk**. The configurations can be provided in JSON format or yml format + +JSON format : + + { + "connection": { + # type should be "mysql" + "type": "mysql", + # additional mysql connection parameters could be added but + # have not been tested + "params": { + "host": "localhost", + "port": 3306, + "database": "volttron", + "user": "user", + "passwd": "pass" + } + } + } + +YML format : + + connection: + type: mysql + params: + host: localhost + port: 3306 + database: test_historian + user: historian + passwd: historian + +## SQLite3 + +An Sqlite historian provides a convenient solution for under powered +systems. The database parameter is a location on the file system. By +default it is relative to the agents installation directory, however it +will respect a rooted or relative path to the database. + +### Configuration + + { + "connection": { + # type should be sqlite + "type": "sqlite", + "params": { + "database": "data/historian.sqlite", + } + } + } + +## PostgreSQL and Redshift + +### Installation notes + +1. The PostgreSQL database driver supports recent PostgreSQL versions. + It was tested on 10.x, but should work with 9.x and 11.x. +2. The user must have SELECT, INSERT, and UPDATE privileges on + historian tables. +3. The tables in the database are created as part of the execution of + the SQLHistorianAgent, but this will fail if the database user does + not have CREATE privileges. +4. Care must be exercised when using multiple historians with the same + database. This configuration may be used only if there is no overlap + in the topics handled by each instance. Otherwise, duplicate topic + IDs may be created, producing strange results. +5. Redshift databases do not support unique constraints. Therefore, it + is possible that tables may contain some duplicate data. The + Redshift driver handles this by using distinct queries. It does not + remove duplicates from the tables. + +### Dependencies + +The PostgreSQL and Redshift database drivers require the **psycopg2** +Python package. + +From an activated shell execute: +``` +pip install psycopg2-binary +``` + +### Configuration + +The following are minimal configuration files for using a psycopg2-based +historian. Other options are available and are documented + **Not all parameters have +been tested, use at your own risk**. + +#### Local PostgreSQL Database + +The following snippet demonstrates how to configure the +SQLHistorianAgent to use a PostgreSQL database on the local system that +is configured to use Unix domain sockets. The user executing volttron +must have appropriate privileges. +``` + { + "connection": { + "type": "postgresql", + "params": { "dbname": "volttron" } + } + } + +#### Remote PostgreSQL Database + +The following snippet demonstrates how to configure the +SQLHistorianAgent to use a remote PostgreSQL database. +``` + { + "connection": { + "type": "postgresql", + "params": { + "dbname": "volttron", + "host": "historian.example.com", + "port": 5432, + "user": "volttron", + "password": "secret" } + } + } + +#### TimescaleDB Support + +Both of the above PostgreSQL connection types can make use of +TimescaleDB\'s high performance Hypertable backend for the primary +timeseries table. The agent assumes you have completed the TimescaleDB +installation and setup the database by following the instructions here: + To use, simply +add \'timescale_dialect: true\' to the connection params in the Agent +Config as below + +``` + { + "connection": { + "type": "postgresql", + "params": { + "dbname": "volttron", + "host": "historian.example.com", + "port": 5432, + "user": "volttron", + "password": "secret" , + "timescale_dialect": true } + } + + } +``` + +#### Redshift Database + +The following snippet demonstrates how to configure the +SQLHistorianAgent to use a Redshift database. + +``` + { + "connection": { + "type": "redshift", + "params": { + "dbname": "volttron", + "host": "historian.example.com", + "port": 5432, + "user": "volttron", + "password": "secret" } + } + } +``` + +## Notes + +Do not use the \"identity\" setting in configuration file. Instead use +the new method provided by the platform to set an agent\'s identity. See +scripts/core/make-sqlite-historian.sh for an example of how this is +done. Setting a historian\'s VIP IDENTITY from its configuration file +will not be supported after VOLTTRON 4.0. Using the identity +configuration setting will override the value provided by the platform. +This new value will not be reported correctly by \'volttron-ctl status\' diff --git a/services/core/SQLHistorian/README.rst b/services/core/SQLHistorian/README.rst deleted file mode 100644 index 647dc6cf2c..0000000000 --- a/services/core/SQLHistorian/README.rst +++ /dev/null @@ -1,232 +0,0 @@ -.. _SQL_Historian: - -============ -SQLHistorian -============ - -This is a historian agent that writes data to a SQLite or Mysql database -based on the connection parameters in the configuration. The sql historian has -been programmed to allow for inconsistent network connectivity -(automatic re-connection to tcp based databases). All additions to the -historian are batched and wrapped within a transaction with commit and -rollback functions properly implemented. This allows the maximum -throughput of data with the most protection. - -MySQL -~~~~~ - -Installation notes ------------------- - -1. In order to support timestamp with microseconds you need at least - MySql 5.6.4. Please see this `MySql documentation - `__ - for more details - -2. The mysql user must have READ, WRITE, UPDATE, and DELETE privileges. - -3. The tables in the sql database be created as part of the execution of - the SQLHistorianAgent only if the database user has CREATE privileges. - If not, use the mysql-create*.sql script to create the tables and then - start agent. - -Dependencies ------------- - -In order to use mysql one must install the **mysql-python connector** - - From an activated shell execute - - pip install mysql-connector-python-rf - - On Ubuntu 16.04 - - pip install does not work. Please download the connector from - ``__ - and follow instructions on README - -Configuration -------------- - -The following is a minimal configuration file for using a MySQL based -historian. Other options are available and are documented -http://dev.mysql.com/doc/connector-python/en/connector-python-connectargs.html. -**Not all parameters have been tested, use at your own risk**. - -:: - - { - "connection": { - # type should be "mysql" - "type": "mysql", - # additional mysql connection parameters could be added but - # have not been tested - "params": { - "host": "localhost", - "port": 3306, - "database": "volttron", - "user": "user", - "passwd": "pass" - } - } - } - -SQLite3 -~~~~~~~ - -An Sqlite historian provides a convenient solution for under powered -systems. The database parameter is a location on the file system. By -default it is relative to the agents installation directory, however it -will respect a rooted or relative path to the database. - -Configuration -------------- -:: - - { - "connection": { - # type should be sqlite - "type": "sqlite", - "params": { - "database": "data/historian.sqlite", - } - } - } - -PostgreSQL and Redshift -~~~~~~~~~~~~~~~~~~~~~~~ - -Installation notes ------------------- - -1. The PostgreSQL database driver supports recent PostgreSQL versions. - It was tested on 10.x, but should work with 9.x and 11.x. - -2. The user must have SELECT, INSERT, and UPDATE privileges on historian - tables. - -3. The tables in the database are created as part of the execution of - the SQLHistorianAgent, but this will fail if the database user does not - have CREATE privileges. - -4. Care must be exercised when using multiple historians with the same - database. This configuration may be used only if there is no overlap in - the topics handled by each instance. Otherwise, duplicate topic IDs - may be created, producing strange results. - -5. Redshift databases do not support unique constraints. Therefore, it is - possible that tables may contain some duplicate data. The Redshift driver - handles this by using distinct queries. It does not remove duplicates - from the tables. - -Dependencies ------------- - -The PostgreSQL and Redshift database drivers require the **psycopg2** Python package. - - From an activated shell execute: - - pip install psycopg2-binary - -Configuration -------------- - -The following are minimal configuration files for using a psycopg2-based -historian. Other options are available and are documented -http://initd.org/psycopg/docs/module.html -**Not all parameters have been tested, use at your own risk**. - -Local PostgreSQL Database -+++++++++++++++++++++++++ - -The following snippet demonstrates how to configure the -SQLHistorianAgent to use a PostgreSQL database on the local system -that is configured to use Unix domain sockets. The user executing -volttron must have appropriate privileges. - -:: - { - "connection": { - "type": "postgresql", - "params": { - "dbname": "volttron" - } - } - } - -Remote PostgreSQL Database -++++++++++++++++++++++++++ - -The following snippet demonstrates how to configure the -SQLHistorianAgent to use a remote PostgreSQL database. - -:: - { - "connection": { - "type": "postgresql", - "params": { - "dbname": "volttron", - "host": "historian.example.com", - "port": 5432, - "user": "volttron", - "password": "secret" - } - } - } - -TimescaleDB Support -++++++++++++++++++++++++++ - -Both of the above PostgreSQL connection types can make -use of TimescaleDB's high performance Hypertable backend -for the primary timeseries table. The agent assumes you -have completed the TimescaleDB installation and setup -the database by following the instructions here: -https://docs.timescale.com/latest/getting-started/setup -To use, simply add 'timescale_dialect: true' to the -connection params in the Agent Config as below - -:: - { - "connection": { - "type": "postgresql", - "params": { - "dbname": "volttron", - "host": "historian.example.com", - "port": 5432, - "user": "volttron", - "password": "secret", - "timescale_dialect": true - } - } - } - -Redshift Database -+++++++++++++++++ - -The following snippet demonstrates how to configure the -SQLHistorianAgent to use a Redshift database. - -:: - { - "connection": { - "type": "redshift", - "params": { - "dbname": "volttron", - "host": "historian.example.com", - "port": 5432, - "user": "volttron", - "password": "secret" - } - } - } - -Notes -~~~~~ -Do not use the "identity" setting in configuration file. Instead use the -new method provided by the platform to set an agent's identity. -See scripts/core/make-sqlite-historian.sh for an example of how this -is done. Setting a historian's VIP IDENTITY from its configuration file will -not be supported after VOLTTRON 4.0. Using the identity configuration setting -will override the value provided by the platform. This new value will not be -reported correctly by 'volttron-ctl status' diff --git a/services/core/SQLHistorian/config.mysql.yml b/services/core/SQLHistorian/config.mysql.yml new file mode 100644 index 0000000000..accf26f22c --- /dev/null +++ b/services/core/SQLHistorian/config.mysql.yml @@ -0,0 +1,8 @@ +connection: + type: mysql + params: + host: localhost + port: 3306 + database: test_historian + user: historian + passwd: historian diff --git a/services/core/SQLHistorian/config.postgresql b/services/core/SQLHistorian/config.postgresql index b9b79fbdef..0d8f45972f 100644 --- a/services/core/SQLHistorian/config.postgresql +++ b/services/core/SQLHistorian/config.postgresql @@ -2,11 +2,11 @@ "connection": { "type": "postgresql", "params": { - "dbname": "historian" - #, "host": "127.0.0.1" - #, "port": 5432 - #, "user": "volttron" - #, "password": "secret" + "dbname": "historian", + "host": "127.0.0.1", + "port": 5432, + "user": "volttron", + "password": "secret" } } } diff --git a/services/core/SQLHistorian/config.sqlite b/services/core/SQLHistorian/config.sqlite index 1640fbc63f..1f542200f9 100644 --- a/services/core/SQLHistorian/config.sqlite +++ b/services/core/SQLHistorian/config.sqlite @@ -2,9 +2,6 @@ "connection": { "type": "sqlite", "params": { - # if no directory is given, location will be under install_dir/.agent-data directory - # in secure mode as this will be only directory in which agent will have write access - # In regular mode it will be under install_dir/data for backward compatibility "database": "historian_test.sqlite" } }, diff --git a/services/core/SQLHistorian/config_device_data_filter.sqlite b/services/core/SQLHistorian/config_device_data_filter.sqlite index 11f0510ffb..ac9c4260ea 100644 --- a/services/core/SQLHistorian/config_device_data_filter.sqlite +++ b/services/core/SQLHistorian/config_device_data_filter.sqlite @@ -2,9 +2,6 @@ "connection": { "type": "sqlite", "params": { - # if no directory is given, location will be under install_dir/.agent-data directory - # in secure mode as this will be only directory in which agent will have write access - # In regular mode it will be under install_dir/data for backward compatibility "database": "historian_test.sqlite" } }, diff --git a/services/core/SQLHistorian/mysql-create.sql b/services/core/SQLHistorian/mysql-create.sql index 63288dcf6b..bb88be2c9b 100644 --- a/services/core/SQLHistorian/mysql-create.sql +++ b/services/core/SQLHistorian/mysql-create.sql @@ -1,4 +1,8 @@ -- This script assumes that the user has access to create the database. +-- update database name, user name, and password before executing the below commands +-- table names used below are default names used by historian. If you would like to customize table names +-- customize using the configuration tables_def and change the names in the below commands + CREATE DATABASE test_historian; USE test_historian; @@ -11,9 +15,9 @@ CREATE TABLE data (ts timestamp(6) NOT NULL, CREATE INDEX data_idx ON data (ts ASC); CREATE TABLE topics (topic_id INTEGER NOT NULL AUTO_INCREMENT, - topic_name varchar(512) NOT NULL, - PRIMARY KEY (topic_id), - UNIQUE(topic_name)); + topic_name varchar(512) NOT NULL, + PRIMARY KEY (topic_id), + UNIQUE(topic_name)); CREATE TABLE meta(topic_id INTEGER NOT NULL, metadata TEXT NOT NULL, @@ -27,15 +31,43 @@ CREATE TABLE volttron_table_definitions( #Use the below syntax for creating user and grant access to the historian database -CREATE USER 'username'@'localhost' IDENTIFIED BY 'password'; +#CREATE USER 'username'@'localhost' IDENTIFIED BY 'password'; +CREATE USER 'historian'@'localhost' IDENTIFIED BY 'historian'; + +# GRANT ON . TO ''@'host' +GRANT SELECT, INSERT, DELETE ON test_historian.* TO 'historian'@'localhost'; + +# GRANT UPDATE ON . TO 'username'@'localhost'; +GRANT UPDATE ON test_historian.topics TO 'historian'@'localhost'; + +# TO Run test_historian.py you need additional create and index privileges +GRANT CREATE, INDEX ON test_historian.* TO 'historian'@'localhost'; + +# If you are using aggregate historians with mysql create and grant access to additional tables + +CREATE TABLE aggregate_topics + (agg_topic_id INTEGER NOT NULL AUTO_INCREMENT, + agg_topic_name varchar(512) NOT NULL, + agg_type varchar(512) NOT NULL, + agg_time_period varchar(512) NOT NULL, + PRIMARY KEY (agg_topic_id), + UNIQUE(agg_topic_name, agg_type, agg_time_period)); + +CREATE TABLE aggregate_meta + (agg_topic_id INTEGER NOT NULL, + metadata TEXT NOT NULL, + PRIMARY KEY(agg_topic_id)); + +# FOR EACH CONFIGURED AGGREGATION execute the following where aggregate_data_table is aggregation_type+"_"+aggregation_period +# for example avg_10m for 10 minute average -# GRANT ON . TO 'username'@'host' -GRANT SELECT, CREATE, INDEX, INSERT ON test_historian.* TO 'user'@'localhost'; -GRANT UPDATE ON test_historian. TO 'user'@'localhost'; -GRANT UPDATE ON test_historian.topics TO 'user'@'localhost'; +CREATE TABLE + (ts timestamp(6) NOT NULL, topic_id INTEGER NOT NULL, + value_string TEXT NOT NULL, topics_list TEXT, + UNIQUE(topic_id, ts), + INDEX (ts ASC)) -# If you are using aggregate historians with mysql also grant udpate access to aggregate_topics -GRANT UPDATE ON test_historian.aggregate_topics TO 'user'@'localhost'; +# GRANT UPDATE ON .aggregate_topics TO 'username'@'localhost'; +GRANT UPDATE ON test_historian.aggregate_topics TO 'historian'@'localhost'; +GRANT UPDATE ON test_historian.aggregate_meta TO 'historian'@'localhost'; -# For running test cases additional provide DELETE permission on the test database to the test user -GRANT DELETE ON test_historian.* TO 'user'@'localhost'; diff --git a/services/core/SQLHistorian/mysql-create_before_5.6.4.sql b/services/core/SQLHistorian/mysql-create_before_5.6.4.sql index d5f3bde0ba..5d29687900 100644 --- a/services/core/SQLHistorian/mysql-create_before_5.6.4.sql +++ b/services/core/SQLHistorian/mysql-create_before_5.6.4.sql @@ -1,32 +1,73 @@ -- This script assumes that the user has access to create the database. +-- update database name, user name, and password before executing the below commands +-- table names used below are default names used by historian. If you would like to customize table names +-- customize using the configuration tables_def and change the names in the below commands + CREATE DATABASE test_historian; USE test_historian; - CREATE TABLE data (ts timestamp NOT NULL, - topic_id INTEGER NOT NULL, - value_string TEXT NOT NULL, + topic_id INTEGER NOT NULL, + value_string TEXT NOT NULL, UNIQUE(ts, topic_id)); - + CREATE INDEX data_idx ON data (ts ASC); CREATE TABLE topics (topic_id INTEGER NOT NULL AUTO_INCREMENT, topic_name varchar(512) NOT NULL, PRIMARY KEY (topic_id), UNIQUE(topic_name)); + CREATE TABLE meta(topic_id INTEGER NOT NULL, metadata TEXT NOT NULL, PRIMARY KEY(topic_id)); +CREATE TABLE volttron_table_definitions( + table_id varchar(512) PRIMARY KEY, + table_name varchar(512) NOT NULL, + table_prefix varchar(512)); + #Use the below syntax for creating user and grant access to the historian database -CREATE USER 'username'@'localhost' IDENTIFIED BY 'password'; +#CREATE USER 'username'@'localhost' IDENTIFIED BY 'password'; +CREATE USER 'historian'@'localhost' IDENTIFIED BY 'historian'; + +# GRANT ON . TO ''@'host' +GRANT SELECT, INSERT, DELETE ON test_historian.* TO 'historian'@'localhost'; -#GRANT ON . TO 'username'@'host' -GRANT SELECT, CREATE, INDEX, INSERT ON test_historian.* TO 'user'@'localhost'; +# GRANT UPDATE ON . TO 'username'@'localhost'; GRANT UPDATE ON test_historian.topics TO 'historian'@'localhost'; -# For running test cases additional provide DELETE permission on the test database to the test user -GRANT DELETE ON test_historian.* TO 'user'@'localhost'; \ No newline at end of file + +# TO Run test_historian.py you need additional create and index privileges +GRANT CREATE, INDEX ON test_historian.* TO 'historian'@'localhost'; + +# If you are using aggregate historians with mysql create and grant access to additional tables + +CREATE TABLE aggregate_topics + (agg_topic_id INTEGER NOT NULL AUTO_INCREMENT, + agg_topic_name varchar(512) NOT NULL, + agg_type varchar(512) NOT NULL, + agg_time_period varchar(512) NOT NULL, + PRIMARY KEY (agg_topic_id), + UNIQUE(agg_topic_name, agg_type, agg_time_period)); + +CREATE TABLE aggregate_meta + (agg_topic_id INTEGER NOT NULL, + metadata TEXT NOT NULL, + PRIMARY KEY(agg_topic_id)); + +# FOR EACH CONFIGURED AGGREGATION execute the following where aggregate_data_table is aggregation_type+"_"+aggregation_period +# for example avg_10m for 10 minute average + +CREATE TABLE + (ts timestamp NOT NULL, topic_id INTEGER NOT NULL, + value_string TEXT NOT NULL, topics_list TEXT, + UNIQUE(topic_id, ts), + INDEX (ts ASC)) + +# GRANT UPDATE ON .aggregate_topics TO 'username'@'localhost'; +GRANT UPDATE ON test_historian.aggregate_topics TO 'historian'@'localhost'; +GRANT UPDATE ON test_historian.aggregate_meta TO 'historian'@'localhost'; diff --git a/services/core/SQLHistorian/setup.py b/services/core/SQLHistorian/setup.py index ff8359aa57..72d5048278 100644 --- a/services/core/SQLHistorian/setup.py +++ b/services/core/SQLHistorian/setup.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- {{{ # vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et: # -# Copyright 2019, Battelle Memorial Institute. +# Copyright 2020, Battelle Memorial Institute. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/services/core/SQLHistorian/sqlhistorian/historian.py b/services/core/SQLHistorian/sqlhistorian/historian.py index 9e8cce80be..b696171e2d 100644 --- a/services/core/SQLHistorian/sqlhistorian/historian.py +++ b/services/core/SQLHistorian/sqlhistorian/historian.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- {{{ # vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et: # -# Copyright 2019, Battelle Memorial Institute. +# Copyright 2020, Battelle Memorial Institute. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -62,7 +62,6 @@ def historian(config_path, **kwargs): This method is called by the :py:func:`sqlhistorian.historian.main` to parse the passed config file or configuration dictionary object, validate the configuration entries, and create an instance of SQLHistorian - :param config_path: could be a path to a configuration file or can be a dictionary object :param kwargs: additional keyword arguments if any @@ -90,23 +89,21 @@ def historian(config_path, **kwargs): SQLHistorian.__name__ = 'SQLHistorian' utils.update_kwargs_with_config(kwargs, config_dict) - _log.debug("In sql historian before calling class kwargs is {}".format( - kwargs)) + _log.debug("In sql historian before calling class kwargs is {}".format(kwargs)) return SQLHistorian(**kwargs) class SQLHistorian(BaseHistorian): - """This is a historian agent that writes data to a SQLite or Mysql + """ + This is a historian agent that writes data to a SQLite or Mysql database based on the connection parameters in the configuration. - .. seealso:: - :py:mod:`volttron.platform.dbutils.basedb` - :py:mod:`volttron.platform.dbutils.mysqlfuncts` - :py:mod:`volttron.platform.dbutils.sqlitefuncts` - """ - def __init__(self, connection, tables_def = None, **kwargs): + def __init__(self, connection, tables_def=None, **kwargs): """Initialise the historian. The historian makes two connections to the data store. Both of @@ -150,31 +147,23 @@ def __init__(self, connection, tables_def = None, **kwargs): # everything else happens in the MainThread # One utils class instance( hence one db connection) for main thread - self.main_thread_dbutils = self.db_functs_class( - self.connection['params'], - self.table_names) + self.main_thread_dbutils = self.db_functs_class(self.connection['params'], self.table_names) # One utils class instance( hence one db connection) for main thread # this gets initialized in the bg_thread within historian_setup self.bg_thread_dbutils = None super(SQLHistorian, self).__init__(**kwargs) def record_table_definitions(self, meta_table_name): - self.bg_thread_dbutils.record_table_definitions(self.tables_def, - meta_table_name) + self.bg_thread_dbutils.record_table_definitions(self.tables_def, meta_table_name) def manage_db_size(self, history_limit_timestamp, storage_limit_gb): """ Optional function to manage database size. """ - self.bg_thread_dbutils.manage_db_size(history_limit_timestamp, - storage_limit_gb) + self.bg_thread_dbutils.manage_db_size(history_limit_timestamp, storage_limit_gb) @doc_inherit def publish_to_historian(self, to_publish_list): - #thread_name = threading.currentThread().getName() - #_log.debug( - # "publish_to_historian number of items: {} Thread: {}:{}".format( - # len(to_publish_list), threading.current_thread(), thread_name)) try: published = 0 with self.bg_thread_dbutils.bulk_insert() as insert_data: @@ -184,8 +173,7 @@ def publish_to_historian(self, to_publish_list): value = x['value'] meta = x['meta'] - # look at the topics that are stored in the database - # already to see if this topic has a value + # look at the topics that are stored in the database already to see if this topic has a value lowercase_name = topic.lower() topic_id = self.topic_id_map.get(lowercase_name, None) db_topic_name = self.topic_name_map.get(lowercase_name, @@ -194,49 +182,37 @@ def publish_to_historian(self, to_publish_list): # _log.debug('Inserting topic: {}'.format(topic)) # Insert topic name as is in db topic_id = self.bg_thread_dbutils.insert_topic(topic) - # user lower case topic name when storing in map - # for case insensitive comparison + # user lower case topic name when storing in map for case insensitive comparison self.topic_id_map[lowercase_name] = topic_id self.topic_name_map[lowercase_name] = topic - # _log.debug('TopicId: {} => {}'.format(topic_id, topic)) elif db_topic_name != topic: - # _log.debug('Updating topic: {}'.format(topic)) self.bg_thread_dbutils.update_topic(topic, topic_id) self.topic_name_map[lowercase_name] = topic old_meta = self.topic_meta.get(topic_id, {}) if set(old_meta.items()) != set(meta.items()): - # _log.debug( - # 'Updating meta for topic: {} {}'.format(topic, - # meta)) self.bg_thread_dbutils.insert_meta(topic_id, meta) self.topic_meta[topic_id] = meta if insert_data(ts, topic_id, value): - # _log.debug('item was inserted') published += 1 if published: if self.bg_thread_dbutils.commit(): - # _log.debug('published {} data values'.format(published)) self.report_all_handled() else: - _log.debug('Commit error. Rolling back {} values.'.format( - published)) + _log.debug('Commit error. Rolling back {} values.'.format(published)) self.bg_thread_dbutils.rollback() else: - _log.debug( - 'Unable to publish {}'.format(len(to_publish_list))) + _log.debug('Unable to publish {}'.format(len(to_publish_list))) except Exception as e: - #TODO Unable to send alert from here + # TODO Unable to send alert from here # if isinstance(e, ConnectionError): # _log.debug("Sending alert. Exception {}".format(e.args)) - # err_message = "Unable to connect to database. " \ - # "Exception:{}".format(e.args) + # err_message = "Unable to connect to database. Exception:{}".format(e.args) # alert_id = DB_CONNECTION_FAILURE # else: - # err_message = "Unknown exception when publishing data. " \ - # "Exception: {}".format(e.args) + # err_message = "Unknown exception when publishing data. Exception: {}".format(e.args) # alert_id = ERROR_PUBLISHING_DATA # self.vip.health.set_status(STATUS_BAD, err_message) # status = Status.from_json(self.vip.health.get_status()) @@ -248,8 +224,7 @@ def publish_to_historian(self, to_publish_list): @doc_inherit def query_topic_list(self): - _log.debug("query_topic_list Thread is: {}".format( - threading.currentThread().getName())) + _log.debug("query_topic_list Thread is: {}".format(threading.currentThread().getName())) if len(self.topic_name_map) > 0: return list(self.topic_name_map.values()) else: @@ -278,11 +253,9 @@ def query_aggregate_topics(self): return self.main_thread_dbutils.get_agg_topics() @doc_inherit - def query_historian(self, topic, start=None, end=None, agg_type=None, - agg_period=None, skip=0, count=None, + def query_historian(self, topic, start=None, end=None, agg_type=None, agg_period=None, skip=0, count=None, order="FIRST_TO_LAST"): - _log.debug("query_historian Thread is: {}".format( - threading.currentThread().getName())) + _log.debug("query_historian Thread is: {}".format(threading.currentThread().getName())) results = dict() topics_list = [] if isinstance(topic, str): @@ -299,59 +272,45 @@ def query_historian(self, topic, start=None, end=None, agg_type=None, topic_id = self.topic_id_map.get(topic_lower) if agg_type: agg_type = agg_type.lower() - topic_id = self.agg_topic_id_map.get( - (topic_lower, agg_type, agg_period)) + topic_id = self.agg_topic_id_map.get((topic_lower, agg_type, agg_period)) if topic_id is None: - # load agg topic id again as it might be a newly - # configured aggregation + # load agg topic id again as it might be a newly configured aggregation agg_map = self.main_thread_dbutils.get_agg_topic_map() self.agg_topic_id_map.update(agg_map) - _log.debug(" Agg topic map after updating {} " - "".format(self.agg_topic_id_map)) - topic_id = self.agg_topic_id_map.get( - (topic_lower, agg_type, agg_period)) + _log.debug(" Agg topic map after updating {} ".format(self.agg_topic_id_map)) + topic_id = self.agg_topic_id_map.get((topic_lower, agg_type, agg_period)) if topic_id: topic_ids.append(topic_id) id_name_map[topic_id] = topic else: - _log.warn('No such topic {}'.format(topic)) + _log.warning('No such topic {}'.format(topic)) if not topic_ids: - _log.warn('No topic ids found for topics{}. Returning ' - 'empty result'.format(topics_list)) + _log.warning('No topic ids found for topics{}. Returning empty result'.format(topics_list)) return results - _log.debug( - "Querying db reader with topic_ids {} ".format(topic_ids)) + _log.debug("Querying db reader with topic_ids {} ".format(topic_ids)) - values = self.main_thread_dbutils.query( - topic_ids, id_name_map, start=start, end=end, agg_type=agg_type, - agg_period=agg_period, skip=skip, count=count, order=order) - metadata = {} + values = self.main_thread_dbutils.query(topic_ids, id_name_map, start=start, end=end, agg_type=agg_type, + agg_period=agg_period, skip=skip, count=count, order=order) meta_tid = None if len(values) > 0: - # If there are results add metadata if it is a query on a - # single topic + # If there are results add metadata if it is a query on a single topic if not multi_topic_query: values = list(values.values())[0] if agg_type: - # if aggregation is on single topic find the topic id - # in the topics table that corresponds to agg_topic_id - # so that we can grab the correct metadata - # if topic name does not have entry in topic_id_map - # it is a user configured aggregation_topic_name - # which denotes aggregation across multiple points - _log.debug("Single topic aggregate query. Try to get " - "metadata") + # if aggregation is on single topic find the topic id in the topics table that corresponds to + # agg_topic_id so that we can grab the correct metadata if topic name does not have entry in + # topic_id_map it is a user configured aggregation_topic_name which denotes aggregation across + # multiple points + _log.debug("Single topic aggregate query. Try to get metadata") meta_tid = self.topic_id_map.get(topic.lower(), None) else: - # this is a query on raw data, get metadata for - # topic from topic_meta map + # this is a query on raw data, get metadata for topic from topic_meta map meta_tid = topic_ids[0] if values: metadata = self.topic_meta.get(meta_tid, {}) - # _log.debug("metadata is {}".format(metadata)) results = {'values': values, 'metadata': metadata} else: results = dict() @@ -361,9 +320,7 @@ def query_historian(self, topic, start=None, end=None, agg_type=None, def historian_setup(self): thread_name = threading.currentThread().getName() _log.debug("historian_setup on Thread: {}".format(thread_name)) - self.bg_thread_dbutils = self.db_functs_class( - self.connection['params'], - self.table_names) + self.bg_thread_dbutils = self.db_functs_class(self.connection['params'], self.table_names) if not self._readonly: self.bg_thread_dbutils.setup_historian_tables() @@ -371,16 +328,12 @@ def historian_setup(self): topic_id_map, topic_name_map = self.bg_thread_dbutils.get_topic_map() self.topic_id_map.update(topic_id_map) self.topic_name_map.update(topic_name_map) - #_log.debug("updated topic name map. {}".format(self.topic_name_map)) self.agg_topic_id_map = self.bg_thread_dbutils.get_agg_topic_map() - def main(argv=sys.argv): - """ Main entry point for the agent. - - :param argv: - :return: + """ + Main entry point for the agent. """ try: diff --git a/services/core/SQLHistorian/sqlhistorian/settings.py b/services/core/SQLHistorian/sqlhistorian/settings.py deleted file mode 100644 index 7c9d04d4ed..0000000000 --- a/services/core/SQLHistorian/sqlhistorian/settings.py +++ /dev/null @@ -1 +0,0 @@ -SCHEDULE_PERIOD = 180 diff --git a/services/core/SQLHistorian/tests/test_sqlitehistorian.py b/services/core/SQLHistorian/tests/test_sqlitehistorian.py index 29c6b7f35c..a9b8333396 100644 --- a/services/core/SQLHistorian/tests/test_sqlitehistorian.py +++ b/services/core/SQLHistorian/tests/test_sqlitehistorian.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- {{{ # vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et: # -# Copyright 2019, Battelle Memorial Institute. +# Copyright 2020, Battelle Memorial Institute. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -35,47 +35,27 @@ # BATTELLE for the UNITED STATES DEPARTMENT OF ENERGY # under Contract DE-AC05-76RL01830 # }}} -""" -pytest test cases for SQLite Historian -""" -import copy -from datetime import datetime, timedelta + import os +import json import random import sqlite3 -import sys - -from tzlocal import get_localzone import gevent import pytest from pytest import approx -import re -import pytz +from datetime import datetime, timedelta -from volttron.platform import get_volttron_root, get_services_core +from volttron.platform import get_services_core from volttron.platform.agent import utils -from volttron.platform.jsonrpc import RemoteError from volttron.platform.messaging import headers as headers_mod -from volttron.platform.messaging import topics from volttron.platform.vip.agent import Agent -# Module level variables +db_connection = None + DEVICES_ALL_TOPIC = "devices/Building/LAB/Device/all" -MICROSECOND_PRECISION = 0 -table_names = dict() -connection_type = "" -query_points = { - "oat_point": "Building/LAB/Device/OutsideAirTemperature", - "mixed_point": "Building/LAB/Device/MixedAirTemperature", - "damper_point": "Building/LAB/Device/DamperSignal" -} -## NOTE - In the below configuration, source_historian' is added -## only for test case setup purposes. It is removed from config before -## using the configuration for installing the agent. -# default table_defs +# test config sqlite_platform = { - "source_historian": get_services_core("SQLHistorian"), "connection": { "type": "sqlite", "params": { @@ -84,45 +64,26 @@ } } +# default config included in agent dir +config_path = os.path.join(get_services_core("SQLHistorian"), "config.sqlite") +with open(config_path, "r") as config_file: + default_config = json.load(config_file) +assert isinstance(default_config, dict) -offset = timedelta(seconds=3) -db_connection = None -identity = None - -# Don't like declaring this global but I am not able to find a way -# to introspect this using pytest request object in the clean fixture -data_table = 'data' -topics_table = 'topics' -meta_table = 'meta' - - - -def setup_sqlite(connection_params, table_names): - print ("setup sqlite") - database_path = connection_params['database'] - print ("connecting to sqlite path " + database_path) - db_connection = sqlite3.connect(database_path) - print ("successfully connected to sqlite") - db_connection.commit() - return db_connection, 6 - - -def cleanup_sql(db_connection, truncate_tables): - cursor = db_connection.cursor() - for table in truncate_tables: - cursor.execute("DELETE FROM " + table) - db_connection.commit() - +QUERY_POINTS = { + "oat_point": "Building/LAB/Device/OutsideAirTemperature", + "mixed_point": "Building/LAB/Device/MixedAirTemperature", + "damper_point": "Building/LAB/Device/DamperSignal" +} def random_uniform(a, b): """ Creates a random uniform value for using within our tests. This function will chop a float off at a specific uniform number of decimals. - :param a: lower bound of range for return value :param b: upper bound of range for return value - :return: A psuedo random uniform float. + :return: A pseudo random uniform float. :type a: int :type b: int :rtype: float @@ -131,40 +92,51 @@ def random_uniform(a, b): return float(format_spec.format(random.uniform(a, b))) +def setup_sqlite(connection_params): + print("setup sqlite") + database_path = connection_params['database'] + print("connecting to sqlite path " + database_path) + db_connection = sqlite3.connect(database_path) + print("successfully connected to sqlite") + db_connection.commit() + return db_connection, 6. + + +def cleanup_sql(truncate_tables): + global db_connection + cursor = db_connection.cursor() + for table in truncate_tables: + cursor.execute("DELETE FROM " + table) + db_connection.commit() + + def get_table_names(config): default_table_def = {"table_prefix": "", "data_table": "data", "topics_table": "topics", "meta_table": "meta"} - tables_def = config.get('tables_def', None) - if not tables_def: - tables_def = default_table_def + tables_def = config.get('tables_def', default_table_def) table_names = dict(tables_def) - table_names["agg_topics_table"] = \ - "aggregate_" + tables_def["topics_table"] - table_names["agg_meta_table"] = \ - "aggregate_" + tables_def["meta_table"] + table_names["agg_topics_table"] = "aggregate_" + tables_def["topics_table"] + table_names["agg_meta_table"] = "aggregate_" + tables_def["meta_table"] - table_prefix = tables_def.get('table_prefix', None) - table_prefix = table_prefix + "_" if table_prefix else "" - if table_prefix: - for key, value in table_names.items(): - table_names[key] = table_prefix + table_names[key] + # table_prefix = tables_def.get('table_prefix', None) + # table_prefix = table_prefix + "_" if table_prefix else "" + # if table_prefix: + # for key, value in table_names.items(): + # table_names[key] = table_prefix + table_names[key] return table_names -@pytest.fixture(scope="module", - params=['volttron_3']) +@pytest.fixture(scope="module") def publish_agent(request, volttron_instance): # 1: Start a fake agent to publish to message bus - print("**In setup of publish_agent volttron is_running {}".format( - volttron_instance.is_running)) + print("**In setup of publish_agent volttron is_running {}".format(volttron_instance.is_running)) agent = volttron_instance.build_agent() - # 2: add a tear down method to stop the fake - # agent that published to message bus + # 2: add a tear down method to stop the fake agent that published to message bus def stop_agent(): print("In teardown method of publish_agent") if isinstance(agent, Agent): @@ -174,84 +146,29 @@ def stop_agent(): return agent -@pytest.fixture(scope="module") -def query_agent(request, volttron_instance): - # 1: Start a fake agent to query the historian agent in volttron_instance - agent = volttron_instance.build_agent() - - # 2: add a tear down method to stop the fake - # agent that published to message bus - def stop_agent(): - print("In teardown method of query_agent") - agent.core.stop() - - request.addfinalizer(stop_agent) - return agent - - -# Fixtures for setup and teardown of historian agent -@pytest.fixture(scope="module") -def historian(request, volttron_instance, query_agent): - global db_connection, table_names, \ - connection_type, identity - - print("** Setting up test_historian module **") - # Make database connection - sqlite_platform['connection']['params']['database'] = \ - volttron_instance.volttron_home + "/historian.sqlite" - - table_names = get_table_names(sqlite_platform) - - # 2: Open db connection that can be used for row deletes after - # each test method. Create tables - db_connection, MICROSECOND_PRECISION = \ - setup_sqlite(sqlite_platform['connection']['params'], table_names) - - print ("sqlite_platform -- {}".format(sqlite_platform)) - # 2. Install agent - historian - temp_config = copy.copy(sqlite_platform) - source = temp_config.pop('source_historian') - historian_uuid = volttron_instance.install_agent( - vip_identity='platform.historian', - agent_dir=source, - config_file=temp_config, - start=True) - print("agent id: ", historian_uuid) - identity = 'platform.historian' - - # 3: add a tear down method to stop historian agent - def stop_agent(): - print("In teardown method of sqlagent") - if volttron_instance.is_running(): - volttron_instance.stop_agent(historian_uuid) - volttron_instance.remove_agent(historian_uuid) - - request.addfinalizer(stop_agent) - - return sqlite_platform - - -@pytest.fixture() -def clean(request): - global db_connection, connection_type, table_names - def delete_rows(): - cleanup_sql(db_connection, [table_names['data_table']]) - request.addfinalizer(delete_rows) +# @pytest.fixture(params=[sqlite_platform, default_config]) +# def clean(request): +# global db_connection +# +# table_names = get_table_names(request.param) +# table_names.pop("table_prefix") +# +# def delete_rows(): +# cleanup_sql(table_names.values()) +# request.addfinalizer(delete_rows) def publish(publish_agent, topic, header, message): if isinstance(publish_agent, Agent): - publish_agent.vip.pubsub.publish('pubsub', - topic, - headers=header, - message=message).get(timeout=10) + publish_agent.vip.pubsub.publish('pubsub', topic, headers=header, message=message).get(timeout=10) else: publish_agent.publish_json(topic, header, message) @pytest.mark.historian -def test_sqlite_timeout(request, historian, publish_agent, query_agent, - clean, volttron_instance): +@pytest.mark.sqlhistorian +@pytest.mark.parametrize("config", [sqlite_platform, default_config]) +def test_sqlite_timeout(request, publish_agent, volttron_instance, config): """ Test basic functionality of historian. Inserts three points as part of all topic and checks if all three got into the database @@ -259,40 +176,32 @@ def test_sqlite_timeout(request, historian, publish_agent, query_agent, Should be able to query data based on topic name. Result should contain both data and metadata :param request: pytest request object - :param publish_agent: instance of volttron 2.0/3.0agent used to publish - :param query_agent: instance of fake volttron 3.0 agent used to query - using rpc - :param historian: instance of the historian tested + :param publish_agent: instance of agent used to publish :param clean: teardown function + :param config: historian config """ - global query_points, DEVICES_ALL_TOPIC, db_connection + global db_connection + db_connection, microsecond_precision = setup_sqlite(config['connection']['params']) + + print("\n** test_sqlite_timeout for {}**".format(request.keywords.node.name)) - # print('HOME', volttron_instance.volttron_home) - print( - "\n** test_sqlite_timeout for {}**".format(request.keywords.node.name)) agent_uuid = None + table_names = {} try: - new_historian = copy.copy(historian) - new_historian["connection"]["params"]["timeout"] = 15 - new_historian["tables_def"] = {"table_prefix": "timeout_param", - "data_table": "data", "topics_table": "topics", - "meta_table": "meta"} + config["connection"]["params"]["timeout"] = 15 + table_names = get_table_names(config) + config["tables_def"] = table_names # 1: Install historian agent # Install and start historian agent - source = new_historian.pop('source_historian') - agent_uuid = volttron_instance.install_agent(agent_dir=source, - config_file=new_historian, start=True, - vip_identity='sqlite.historian') + agent_uuid = volttron_instance.install_agent(agent_dir=get_services_core("SQLHistorian"), + config_file=config, start=True, + vip_identity='sqlite.historian') print("agent id: ", agent_uuid) - # Publish fake data. The format mimics the format used by VOLTTRON drivers. - # Make some random readings. Randome readings are going to be - # within the tolerance here. - format_spec = "{0:.13f}" + # Make some random readings. Random readings are going to be within the tolerance here. oat_reading = random_uniform(30, 100) - mixed_reading = oat_reading + random_uniform(-5, 5) damper_reading = random_uniform(0, 100) float_meta = {'units': 'F', 'tz': 'UTC', 'type': 'float'} @@ -320,49 +229,21 @@ def test_sqlite_timeout(request, historian, publish_agent, query_agent, gevent.sleep(1) # Query the historian - result = query_agent.vip.rpc.call('sqlite.historian', - 'query', - topic=query_points['oat_point'], - count=20, - order="LAST_TO_FIRST").get(timeout=100) + result = publish_agent.vip.rpc.call('sqlite.historian', 'query', + topic=QUERY_POINTS['oat_point'], + count=20, + order="LAST_TO_FIRST").get(timeout=100) print('Query Result', result) assert (len(result['values']) == 1) (now_date, now_time) = now.split("T") assert result['values'][0][0] == now_date + 'T' + now_time + '+00:00' assert (result['values'][0][1] == approx(oat_reading)) assert set(result['metadata'].items()) == set(float_meta.items()) + except Exception as e: + print(e) finally: - if agent_uuid: - cleanup_sql(db_connection, ['timeout_param_data', - 'timeout_param_topics', - 'timeout_param_meta']) + if agent_uuid and table_names: volttron_instance.stop_agent(agent_uuid) volttron_instance.remove_agent(agent_uuid) - - -def publish_devices_fake_data(publish_agent, time=None): - # Publish fake data. The format mimics the format used by VOLTTRON drivers. - # Make some random readings - global DEVICES_ALL_TOPIC - reading = random_uniform(30, 100) - meta = {'units': 'F', 'tz': 'UTC', 'type': 'float'} - # Create a message for all points. - all_message = [{'OutsideAirTemperature': reading, - 'MixedAirTemperature': reading, - 'DamperSignal': reading}, - {'OutsideAirTemperature': meta, - 'MixedAirTemperature': meta, - 'DamperSignal': meta - }] - # Create timestamp - if not time: - time = utils.format_timestamp(datetime.utcnow()) - # now = '2015-12-02T00:00:00' - headers = { - headers_mod.DATE: time, - headers_mod.TIMESTAMP: time - } - print("Published time in header: " + time) - # Publish messages - publish(publish_agent, DEVICES_ALL_TOPIC, headers, all_message) - return time, reading, meta + # table_names.pop("table_prefix") + # cleanup_sql(['topics']) diff --git a/services/core/SQLHistorian/tests/test_sqlitehistorian_unit.py b/services/core/SQLHistorian/tests/test_sqlitehistorian_unit.py new file mode 100644 index 0000000000..ac2bfe6251 --- /dev/null +++ b/services/core/SQLHistorian/tests/test_sqlitehistorian_unit.py @@ -0,0 +1,92 @@ +import os +from shutil import rmtree +import subprocess + +import pytest +from gevent import sleep +from datetime import timedelta +from services.core.SQLHistorian.sqlhistorian import historian + +CACHE_NAME = "backup.sqlite" +HISTORIAN_DB = "./data/historian.sqlite" + + +def test_historian_should_filter_duplicates(sql_historian): + # Add duplicates to queue + # Uniqueness is defined as a combination of topic and timestamp + # Thus a duplicate has the same topic and timestamp + for num in range(40, 43): + sql_historian._capture_record_data( + peer=None, + sender=None, + bus=None, + topic="duplicate_topic", + headers={ + "Date": "2015-11-17 21:24:10.189393+00:00", + "TimeStamp": "2015-11-17 21:24:10.189393+00:00", + }, + message=f"last_duplicate_{num}", + ) + + # Add unique records to queue + for num in range(2, 5): + sql_historian._capture_record_data( + peer=None, + sender=None, + bus=None, + topic=f"unique_record_topic{num}", + headers={ + "Date": f"2020-11-17 21:2{num}:10.189393+00:00", + "TimeStamp": f"2020-11-17 21:2{num}:10.189393+00:00", + }, + message=f"unique_record_{num}", + ) + + # default is 300 seconds or 5 minutes; setting to 1 second so tests don't take so long + sql_historian._retry_period = 1 + # When SQLHistorian is normally started on the platform, this attribute is set. + # Since the SQLHistorian is being tested without the volttron platform, + # this attribute must be set so that the test can run + sql_historian._max_time_publishing = timedelta(float(1)) + + sql_historian.start_process_thread() + # give time for all databases to initialize and historian to process workflow + sleep(3) + + assert query_db("""select * from outstanding""", CACHE_NAME) == "" + # check that the historian saves the last duplicate from the cache in the "data" table + assert f'2015-11-17T21:24:10.189393+00:00|1|"last_duplicate_42"' in query_db( + """select * from data""", HISTORIAN_DB + ) + # check that the historian saves only one duplicate in the "topics" table + assert f"1|duplicate_topic" in query_db("""select * from topics""", HISTORIAN_DB) + + +@pytest.fixture() +def sql_historian(): + config = {"connection": {"type": "sqlite", "params": {"database": HISTORIAN_DB}}} + + yield historian.historian(config) + + # Teardown + # the backup database is an sqlite database with the name "backup.sqlite". + # the db is created if it doesn't exist; see the method: BackupDatabase._setupdb(check_same_thread) for details + # also, delete the historian database for this test, which is an sqlite db in folder /data + if os.path.exists("./data"): + rmtree("./data") + if os.path.exists(CACHE_NAME): + os.remove(CACHE_NAME) + + +def query_db(query, db): + output = subprocess.run(["sqlite3", db, query], text=True, capture_output=True) + # check_returncode() will raise a CalledProcessError if the query fails + # see https://docs.python.org/3/library/subprocess.html#subprocess.CompletedProcess.returncode + output.check_returncode() + return output.stdout + + +def get_tables(db): + result = query_db(""".tables""", db) + res = set(result.replace("\n", "").split()) + return res diff --git a/services/core/SQLiteTaggingService/README.md b/services/core/SQLiteTaggingService/README.md new file mode 100644 index 0000000000..3c55f61f29 --- /dev/null +++ b/services/core/SQLiteTaggingService/README.md @@ -0,0 +1,72 @@ +# SQLite Tagging Service + +SQLite tagging service provide APIs to tag both topic names(device +points) and topic name prefixes (campus, building, unit/equipment, sub +unit) and then query for relevant topics based on saved tag names and +values. The SQLite tagging services stores the tags in a sqlite3 +database and hence provides a way to use this feature in VOLTTRON out of +the box. + +Tags used by this agent are not user defined. They have to be +pre-defined in a resource file at volttron_data/tagging_resources. The +agent validates against this predefined list of tags every time user add +tags to topics. Tags can be added to one topic at a time or multiple +topics by using a topic name pattern(regular expression). This agent +uses tags from [project haystack](https://project-haystack.org/). and +adds a few custom tags for campus and VOLTTRON point name. + +Each tag has an associated value and users can query for topic names +based tags and its values using a simplified sql-like query string. +Queries can specify tag names with values or tags without values for +boolean tags(markers). Queries can combine multiple conditions with +keyword AND and OR, and use the keyword NOT to negate a conditions. + +## Dependencies and Limitations + +1. When adding tags to topics this agent calls the + platform.historian\'s get_topic_list and hence requires the + platform.historian to be running but it doesn\'t require the + historian to use sqlite or any specific database. It does not + require platform.historian to be running for using its query APIs. +2. Resource files that provides the list of valid tags is mandatory and + should be in volttron_data/tagging_reosurces/tags.csv +3. Tagging service only provides APIs query for topic names based on + tags. Once the list of topic names is retrieved, users should use + the historian APIs to get the data corresponding to those topics. +4. Since RDMS is not a natural fit for tagname=value kind of data, + performance of queries will not be high if you have several + thousands of topics and several hundreds tags for each topic and + perform complex queries. For intermediate level data and query + complexity, performance can be improved by increasing the page limit + of sqlite. +5. Current version of tagging service does not support versioning of + tag/values. When tags values set using tagging service APIs + update/overwrite any existing tag entries in the database + +## Configuration Options + +The following JSON configuration file shows all the options currently +supported by this agent. + +``` {.python} +{ + # sqlite connection parameters + "connection": { + "type": "sqlite", + "params": { + "database": "~/.volttron/data/volttron.tags.sqlite" + } + }, + # optional. Specify if collections created for tagging should have names + # starting with a specific prefix _ + "table_prefix":"volttron", + + # optional. Specify if you want tagging service to query the historian + # with this vip identity. defaults to platform.historian + "historian_vip_identity": "crate.historian" +} +``` + +## See Also + +[TaggingServiceSpec](https://volttron.readthedocs.io/en/develop/developing-volttron/developing-agents/specifications/tagging-service.html) diff --git a/services/core/SQLiteTaggingService/README.rst b/services/core/SQLiteTaggingService/README.rst deleted file mode 100644 index 983dc42bc4..0000000000 --- a/services/core/SQLiteTaggingService/README.rst +++ /dev/null @@ -1,78 +0,0 @@ -.. _Sqlite_Tagging_Service: - -====================== -SQLite Tagging Service -====================== - -SQLite tagging service provide APIs to tag both topic names(device points) and -topic name prefixes (campus, building, unit/equipment, sub unit) and then -query for relevant topics based on saved tag names and values. The SQLite -tagging services stores the tags in a sqlite3 database and hence provides a -way to use this feature in VOLTTRON out of the box. - -Tags used by this agent are not user defined. They have to be pre-defined in a -resource file at volttron_data/tagging_resources. The agent validates against -this predefined list of tags every time user add tags to topics. Tags can be -added to one topic at a time or multiple topics by using a topic name -pattern(regular expression). This agent uses tags from -`project haystack `_. and adds a few custom -tags for campus and VOLTTRON point name. - -Each tag has an associated value and users can query for topic names based -tags and its values using a simplified sql-like query string. Queries can -specify tag names with values or tags without values for boolean tags(markers). -Queries can combine multiple conditions with keyword AND and OR, -and use the keyword NOT to negate a conditions. - -Dependencies and Limitations ----------------------------- - -1. When adding tags to topics this agent calls the platform.historian's - get_topic_list and hence requires the platform.historian to be running - but it doesn't require the historian to use sqlite or any specific - database. It does not require platform.historian to be running for using its - query APIs. -2. Resource files that provides the list of valid tags is mandatory and should - be in volttron_data/tagging_reosurces/tags.csv -3. Tagging service only provides APIs query for topic names based on tags. - Once the list of topic names is retrieved, users should use the historian - APIs to get the data corresponding to those topics. -4. Since RDMS is not a natural fit for tagname=value kind of data, performance - of queries will not be high if you have several thousands of topics and - several hundreds tags for each topic and perform complex queries. For - intermediate level data and query complexity, performance can be improved - by increasing the page limit of sqlite. -5. Current version of tagging service does not support versioning of - tag/values. When tags values set using tagging service APIs update/overwrite - any existing tag entries in the database - -Configuration Options ---------------------- - -The following JSON configuration file shows all the options currently supported -by this agent. - -.. code-block:: python - - { - # sqlite connection parameters - "connection": { - "type": "sqlite", - "params": { - "database": "~/.volttron/data/volttron.tags.sqlite" - } - }, - # optional. Specify if collections created for tagging should have names - # starting with a specific prefix _ - "table_prefix":"volttron", - - # optional. Specify if you want tagging service to query the historian - # with this vip identity. defaults to platform.historian - "historian_vip_identity": "crate.historian" - } - - -See Also --------- - -`TaggingServiceSpec`_ \ No newline at end of file diff --git a/services/core/SQLiteTaggingService/Tests/test_sqlite_tagging.py b/services/core/SQLiteTaggingService/Tests/test_sqlite_tagging.py new file mode 100644 index 0000000000..62483e549f --- /dev/null +++ b/services/core/SQLiteTaggingService/Tests/test_sqlite_tagging.py @@ -0,0 +1,68 @@ +# -*- coding: utf-8 -*- {{{ +# vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et: +# +# Copyright 2020, Battelle Memorial Institute. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# This material was prepared as an account of work sponsored by an agency of +# the United States Government. Neither the United States Government nor the +# United States Department of Energy, nor Battelle, nor any of their +# employees, nor any jurisdiction or organization that has cooperated in the +# development of these materials, makes any warranty, express or +# implied, or assumes any legal liability or responsibility for the accuracy, +# completeness, or usefulness or any information, apparatus, product, +# software, or process disclosed, or represents that its use would not infringe +# privately owned rights. Reference herein to any specific commercial product, +# process, or service by trade name, trademark, manufacturer, or otherwise +# does not necessarily constitute or imply its endorsement, recommendation, or +# favoring by the United States Government or any agency thereof, or +# Battelle Memorial Institute. The views and opinions of authors expressed +# herein do not necessarily state or reflect those of the +# United States Government or any agency thereof. +# +# PACIFIC NORTHWEST NATIONAL LABORATORY operated by +# BATTELLE for the UNITED STATES DEPARTMENT OF ENERGY +# under Contract DE-AC05-76RL01830 +# }}} + +import os +import json +import gevent +import pytest + +from volttron.platform import get_services_core +from volttron.platform.messaging.health import STATUS_GOOD + + +@pytest.mark.sqlhistorian +@pytest.mark.tagging +def test_default_config(volttron_instance): + """ + Test the default configuration file included with the agent + """ + publish_agent = volttron_instance.build_agent(identity="test_agent") + gevent.sleep(1) + + config_path = os.path.join(get_services_core("SQLiteTaggingService"), "config") + with open(config_path, "r") as config_file: + config_json = json.load(config_file) + assert isinstance(config_json, dict) + + volttron_instance.install_agent( + agent_dir=get_services_core("SQLiteTaggingService"), + config_file=config_json, + start=True, + vip_identity="health_test") + + assert publish_agent.vip.rpc.call("health_test", "health.get_status").get(timeout=10).get('status') == STATUS_GOOD diff --git a/services/core/SQLiteTaggingService/config b/services/core/SQLiteTaggingService/config index 872b468afa..f2a634d3d0 100644 --- a/services/core/SQLiteTaggingService/config +++ b/services/core/SQLiteTaggingService/config @@ -5,6 +5,6 @@ "database": "~/.volttron/data/volttron.tags.sqlite" } }, - "table_prefix":"volttron", #optional - "historian_vip_identity":"platform.historian" #optional -} \ No newline at end of file + "table_prefix":"volttron", + "historian_vip_identity":"platform.historian" +} diff --git a/services/core/SQLiteTaggingService/conftest.py b/services/core/SQLiteTaggingService/conftest.py new file mode 100644 index 0000000000..68e5e611b1 --- /dev/null +++ b/services/core/SQLiteTaggingService/conftest.py @@ -0,0 +1,6 @@ +import sys + +from volttrontesting.fixtures.volttron_platform_fixtures import * + +# Add system path of the agent's directory +sys.path.insert(0, os.path.abspath(os.path.dirname(__file__))) diff --git a/services/core/SQLiteTaggingService/setup.py b/services/core/SQLiteTaggingService/setup.py index 7aabdc3931..38d236e876 100644 --- a/services/core/SQLiteTaggingService/setup.py +++ b/services/core/SQLiteTaggingService/setup.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- {{{ # vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et: # -# Copyright 2019, Battelle Memorial Institute. +# Copyright 2020, Battelle Memorial Institute. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/services/core/SQLiteTaggingService/sqlite/tagging.py b/services/core/SQLiteTaggingService/sqlite/tagging.py index f25b2ff74e..8e042fedc0 100644 --- a/services/core/SQLiteTaggingService/sqlite/tagging.py +++ b/services/core/SQLiteTaggingService/sqlite/tagging.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- {{{ # vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et: # -# Copyright 2019, Battelle Memorial Institute. +# Copyright 2020, Battelle Memorial Institute. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -309,8 +309,7 @@ def _init_category_tags(self): "VALUES (?, ?);".format(self.category_tags_table), to_db) self.sqlite_utils.commit() else: - _log.warn("No category to tags mapping to initialize. No such " - "file " + file_path) + _log.warning("No category to tags mapping to initialize. No such file " + file_path) def _init_topic_tags(self): self.sqlite_utils.execute_stmt( diff --git a/services/core/VolttronCentral/README.md b/services/core/VolttronCentral/README.md new file mode 100644 index 0000000000..2aa67db4d4 --- /dev/null +++ b/services/core/VolttronCentral/README.md @@ -0,0 +1,60 @@ +# VOLTTRON Central Agent + +The VOLTTRON Central agent allows the control of remote VOLTTRON +platforms through the registered platform agents. The registration of +platforms can be initiated from a remote platform agent. Once a platform +agent is registered the allowed operations are start, stop, install, and +run methods on the registered platform\'s agents. + +# Configuration + +The agentid does not have to be unique. It is what will be used as a +human readable name on volttron central. If it is not set the default +\'volttron central\' will be used. The default config file is pasted +below. in the following. + + # By default the webroot will be relative to the installation directory + # of the agent when it is installed. One can override this by specifying + # the root directory here. + # "webroot": "path/to/webroot", + + # Authentication for users is handled through a naive password algorithm + # import hashlib + # hashlib.sha512(password).hexdigest() where password is the plain text password. + + { + "agentid": "volttron central", + "vip_identity": "volttron.central", + "log_file": "~/.volttron/volttron.log", + "server" : { + "host": "127.0.0.1", + "port": 8080, + "debug": "False" + }, + "users" : { + "reader" : { + "password" : "2d7349c51a3914cd6f5dc28e23c417ace074400d7c3e176bcf5da72fdbeb6ce7ed767ca00c6c1fb754b8df5114fc0b903960e7f3befe3a338d4a640c05dfaf2d", + "groups" : [ + "reader" + ] + }, + "writer" : { + "password" : "f7c31a682a838bbe0957cfa0bb060daff83c488fa5646eb541d334f241418af3611ff621b5a1b0d327f1ee80da25e04099376d3bc533a72d2280964b4fab2a32", + "groups" : [ + "writer" + ] + }, + "admin" : { + "password" : "c7ad44cbad762a5da0a452f9e854fdc1e0e7a52a38015f23f3eab1d80b931dd472634dfac71cd34ebc35d16ab7fb8a90c81f975113d6c7538dc69dd8de9077ec", + "groups" : [ + "admin" + ] + }, + "dorothy" : { + "password" : "cf1b67402d648f51ef6ff8805736d588ca07cbf018a5fba404d28532d839a1c046bfcd31558dff658678b3112502f4da9494f7a655c3bdc0e4b0db3a5577b298", + "groups" : [ + "reader, writer" + ] + } + } + } diff --git a/services/core/VolttronCentral/README.rst b/services/core/VolttronCentral/README.rst deleted file mode 100644 index 4773efc5ba..0000000000 --- a/services/core/VolttronCentral/README.rst +++ /dev/null @@ -1,49 +0,0 @@ -The volttron central agent allows the control of different volttron platforms -through the platform agent that are registered. The registration of -platforms can be initiated either from the platform agent side or from volttron -central requesting to manage a specified platform agent. Once a platform -agent is registered the allowed operations are start, stop, install, and run -methods on the registered platform's agents. - -Configuration ------------------------ -The agentid does not have to be unique. It is what will be used -as a human readable name on volttron central. If it is not set the -default 'volttron central' will be used. The default config file is pasted below. -in the following. -{ - "agentid": "volttron central", - "vip_identity": "volttron.central", - "log_file": "~/.volttron/volttron.log", - "server" : { - "host": "127.0.0.1", - "port": 8080, - "debug": "False" - }, - "users" : { - "reader" : { - "password" : "2d7349c51a3914cd6f5dc28e23c417ace074400d7c3e176bcf5da72fdbeb6ce7ed767ca00c6c1fb754b8df5114fc0b903960e7f3befe3a338d4a640c05dfaf2d", - "groups" : [ - "reader" - ] - }, - "writer" : { - "password" : "f7c31a682a838bbe0957cfa0bb060daff83c488fa5646eb541d334f241418af3611ff621b5a1b0d327f1ee80da25e04099376d3bc533a72d2280964b4fab2a32", - "groups" : [ - "writer" - ] - }, - "admin" : { - "password" : "c7ad44cbad762a5da0a452f9e854fdc1e0e7a52a38015f23f3eab1d80b931dd472634dfac71cd34ebc35d16ab7fb8a90c81f975113d6c7538dc69dd8de9077ec", - "groups" : [ - "admin" - ] - }, - "dorothy" : { - "password" : "cf1b67402d648f51ef6ff8805736d588ca07cbf018a5fba404d28532d839a1c046bfcd31558dff658678b3112502f4da9494f7a655c3bdc0e4b0db3a5577b298", - "groups" : [ - "reader, writer" - ] - } - } -} diff --git a/services/core/VolttronCentral/config b/services/core/VolttronCentral/config index c94e423796..0a01e3f143 100644 --- a/services/core/VolttronCentral/config +++ b/services/core/VolttronCentral/config @@ -1,36 +1,26 @@ -{ - # By default the webroot will be relative to the installation directory - # of the agent when it is installed. One can override this by specifying - # the root directory here. - # "webroot": "path/to/webroot", +# By default the webroot will be relative to the installation directory +# of the agent when it is installed. One can override this by specifying +# the root directory here. +# webroot: path/to/webroot, - # Authentication for users is handled through a naive password algorithm - # import hashlib - # hashlib.sha512(password).hexdigest() where password is the plain text password. - "users" : { - "reader" : { - "password" : "2d7349c51a3914cd6f5dc28e23c417ace074400d7c3e176bcf5da72fdbeb6ce7ed767ca00c6c1fb754b8df5114fc0b903960e7f3befe3a338d4a640c05dfaf2d", - "groups" : [ - "reader" - ] - }, - "writer" : { - "password" : "f7c31a682a838bbe0957cfa0bb060daff83c488fa5646eb541d334f241418af3611ff621b5a1b0d327f1ee80da25e04099376d3bc533a72d2280964b4fab2a32", - "groups" : [ - "writer" - ] - }, - "admin" : { - "password" : "c7ad44cbad762a5da0a452f9e854fdc1e0e7a52a38015f23f3eab1d80b931dd472634dfac71cd34ebc35d16ab7fb8a90c81f975113d6c7538dc69dd8de9077ec", - "groups" : [ - "admin" - ] - }, - "dorothy" : { - "password" : "cf1b67402d648f51ef6ff8805736d588ca07cbf018a5fba404d28532d839a1c046bfcd31558dff658678b3112502f4da9494f7a655c3bdc0e4b0db3a5577b298", - "groups" : [ - "reader, writer" - ] - } - } -} +# Authentication for users is handled through a naive password algorithm +# import hashlib +# hashlib.sha512(password).hexdigest() where password is the plain text password. +users: + reader: + password: 2d7349c51a3914cd6f5dc28e23c417ace074400d7c3e176bcf5da72fdbeb6ce7ed767ca00c6c1fb754b8df5114fc0b903960e7f3befe3a338d4a640c05dfaf2d + groups: + - reader + writer: + password: f7c31a682a838bbe0957cfa0bb060daff83c488fa5646eb541d334f241418af3611ff621b5a1b0d327f1ee80da25e04099376d3bc533a72d2280964b4fab2a32 + groups: + - writer + admin: + password: c7ad44cbad762a5da0a452f9e854fdc1e0e7a52a38015f23f3eab1d80b931dd472634dfac71cd34ebc35d16ab7fb8a90c81f975113d6c7538dc69dd8de9077ec + groups: + - admin + dorothy: + password: cf1b67402d648f51ef6ff8805736d588ca07cbf018a5fba404d28532d839a1c046bfcd31558dff658678b3112502f4da9494f7a655c3bdc0e4b0db3a5577b298 + groups: + - reader + - writer diff --git a/services/core/VolttronCentral/setup.py b/services/core/VolttronCentral/setup.py index 017308ea92..f9da1b0c0c 100644 --- a/services/core/VolttronCentral/setup.py +++ b/services/core/VolttronCentral/setup.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- {{{ # vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et: # -# Copyright 2019, Battelle Memorial Institute. +# Copyright 2020, Battelle Memorial Institute. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/services/core/VolttronCentral/tests/test_platforms.py b/services/core/VolttronCentral/tests/test_platforms.py index 12533884d6..3264d05ec3 100644 --- a/services/core/VolttronCentral/tests/test_platforms.py +++ b/services/core/VolttronCentral/tests/test_platforms.py @@ -1,13 +1,22 @@ import pytest import base64 from mock import MagicMock +from volttrontesting.utils.utils import AgentMock +from volttron.platform.vip.agent import Agent from volttroncentral.platforms import PlatformHandler, Platforms from volttroncentral.agent import VolttronCentralAgent -def test_when_platform_added_disconnected(): - vc = MagicMock() - platforms = Platforms(vc=vc) +@pytest.fixture +def mock_vc(): + VolttronCentralAgent.__bases__ = (AgentMock.imitate(Agent, VolttronCentralAgent()),) + vc = VolttronCentralAgent() + vc._configure("test_config", "NEW", {}) + yield vc + + +def test_when_platform_added_disconnected(mock_vc): + platforms = Platforms(vc=mock_vc) assert platforms assert len(platforms.get_platform_vip_identities()) == 0 assert len(platforms.get_platform_list(None, None)) == 0 @@ -16,7 +25,7 @@ def test_when_platform_added_disconnected(): platforms.add_platform(new_platform_vip) assert len(platforms.get_platform_vip_identities()) == 1 assert len(platforms.get_platform_list(None, None)) == 1 - encoded_vip = base64.b64encode(new_platform_vip.encode('utf-8')) + encoded_vip = base64.b64encode(new_platform_vip.encode('utf-8')).decode('utf-8') platform = platforms.get_platform(encoded_vip) assert isinstance(platform, PlatformHandler) diff --git a/services/core/VolttronCentral/tests/test_vc.py b/services/core/VolttronCentral/tests/test_vc.py index aaba200543..ddb24f37d1 100644 --- a/services/core/VolttronCentral/tests/test_vc.py +++ b/services/core/VolttronCentral/tests/test_vc.py @@ -1,12 +1,171 @@ -# import pytest -# import os +# -*- coding: utf-8 -*- {{{ +# vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et: # -# from volttron.platform.web import DiscoveryInfo -# # noinspection PyUnresolvedReferences -# from vc_fixtures import vc_instance, vcp_instance, vc_and_vcp_together +# Copyright 2020, Battelle Memorial Institute. # -# import gevent +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at # +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# This material was prepared as an account of work sponsored by an agency of +# the United States Government. Neither the United States Government nor the +# United States Department of Energy, nor Battelle, nor any of their +# employees, nor any jurisdiction or organization that has cooperated in the +# development of these materials, makes any warranty, express or +# implied, or assumes any legal liability or responsibility for the accuracy, +# completeness, or usefulness or any information, apparatus, product, +# software, or process disclosed, or represents that its use would not infringe +# privately owned rights. Reference herein to any specific commercial product, +# process, or service by trade name, trademark, manufacturer, or otherwise +# does not necessarily constitute or imply its endorsement, recommendation, or +# favoring by the United States Government or any agency thereof, or +# Battelle Memorial Institute. The views and opinions of authors expressed +# herein do not necessarily state or reflect those of the +# United States Government or any agency thereof. +# +# PACIFIC NORTHWEST NATIONAL LABORATORY operated by +# BATTELLE for the UNITED STATES DEPARTMENT OF ENERGY +# under Contract DE-AC05-76RL01830 +# }}} + + +import pytest +import os +import yaml +import json +import requests + +from volttron.platform.messaging.health import STATUS_GOOD +from volttron.platform.web import DiscoveryInfo +# noinspection PyUnresolvedReferences +from vc_fixtures import vc_instance, vcp_instance, vc_and_vcp_together +from volttrontesting.utils.utils import AgentMock +from volttron.platform import jsonapi, jsonrpc, get_services_core +from volttron.platform.web.websocket import VolttronWebSocket +from volttrontesting.utils.web_utils import get_test_web_env +from volttron.platform.vip.agent import Agent +from services.core.VolttronCentral.volttroncentral.agent import VolttronCentralAgent +import gevent + + +@pytest.fixture +def mock_vc(): + VolttronCentralAgent.__bases__ = (AgentMock.imitate(Agent, VolttronCentralAgent()),) + vc = VolttronCentralAgent() + vc._configure("test_config", "NEW", {}) + yield vc + + +@pytest.fixture +def mock_jsonrpc_env(path="jsonrpc", input_data=None, method="POST"): + yield get_test_web_env(path, input_data, method=method) + + +@pytest.fixture +def mock_response(monkeypatch): + def mock_resp(*args, **kwargs): + class MockResp(): + def __init__(self): + mock_args = kwargs['json'] + if mock_args['username'] == 'test' and mock_args['password'] == 'test': + self.ok = True + self.text = "super_secret_auth_token" + else: + self.ok = False + self.text = "invalid username/password" + return MockResp() + monkeypatch.setattr(requests, "post", mock_resp) + + +@pytest.mark.vc +def test_jsonrpc_get_authorization(mock_response, mock_vc, mock_jsonrpc_env, monkeypatch): + + mock_claims = {"groups": ["test_admin"]} + mock_vc.vip.web.configure_mock(**{"get_user_claims.return_value": mock_claims}) + + data = jsonrpc.json_method("12345", "get_authorization", {"username": "test", "password": "test"}, None) + + assert len(mock_vc._authenticated_sessions._sessions) == 0 + + mock_vc.jsonrpc(mock_jsonrpc_env, data) + + assert len(mock_vc._authenticated_sessions._sessions) == 1 + + data = jsonrpc.json_method("12345", "get_authorization", {"username": "test", "password": "nah"}, None) + response = mock_vc.jsonrpc(mock_jsonrpc_env, data) + assert response['error']['message'] == "Invalid username/password specified." + + +@pytest.fixture +def mock_vc_jsonrpc(mock_response, mock_vc, mock_jsonrpc_env, monkeypatch): + + mock_claims = {"groups": ["test_admin"]} + mock_vc.vip.web.configure_mock(**{"get_user_claims.return_value": mock_claims}) + # mock_vc.vip.web.configure_mock(**{"register_websocket.return_value": VolttronWebSocket}) + data = jsonrpc.json_method("12345", "get_authorization", {"username": "test", "password": "test"}, None) + mock_vc.jsonrpc(mock_jsonrpc_env, data) + #mock_vc_env = {"mock_vc": mock_vc, "mock_env": mock_jsonrpc_env} + + yield mock_vc + +@pytest.fixture +def mock_websocket(mock_vc): + mock_vc.vip.web.configure_mock(**{"register_websocket.return_value": VolttronWebSocket}) + #.vip.web.configure_mock(**{"register_websocket.return_value": VolttronWebSocket}) + + + +@pytest.mark.vc +def test_jsonrpc_is_authorized(mock_vc_jsonrpc, mock_jsonrpc_env): + data = jsonrpc.json_method("12345", "list_platforms", None, None) + data['authorization'] = "super_secret_auth_token" + response = mock_vc_jsonrpc.jsonrpc(mock_jsonrpc_env, data) + assert len(response['result']) is 0 and type(response['result']) is list + + +@pytest.mark.vc +def test_jsonrpc_is_unauthorized(mock_vc_jsonrpc, mock_jsonrpc_env): + data = jsonrpc.json_method("12345", "list_platforms", None, None) + data['authorization'] = "really_bad_auth_token" + response = mock_vc_jsonrpc.jsonrpc(mock_jsonrpc_env, data) + assert response['error']['message'] == "Invalid authentication token" + + +@pytest.mark.vc +def test_websocket_open_authenticate(mock_vc_jsonrpc, mock_jsonrpc_env): + vc = mock_vc_jsonrpc + print("BREAK") + assert True + +@pytest.mark.vc +def test_default_config(volttron_instance): + """ + Test the default configuration file included with the agent + """ + publish_agent = volttron_instance.build_agent(identity="test_agent") + gevent.sleep(1) + + config_path = os.path.join(get_services_core("VolttronCentral"), "config") + with open(config_path, "r") as config_file: + config_json = yaml.safe_load(config_file) + assert isinstance(config_json, dict) + + volttron_instance.install_agent( + agent_dir=get_services_core("VolttronCentral"), + config_file=config_json, + start=True, + vip_identity="health_test") + + assert publish_agent.vip.rpc.call("health_test", "health.get_status").get(timeout=10).get('status') == STATUS_GOOD + # # def test_platform_was_registered(vc_and_vcp_together): # diff --git a/services/core/VolttronCentral/tests/test_vc_autoregister.py b/services/core/VolttronCentral/tests/test_vc_autoregister.py index b320879a7c..01d7cb2041 100644 --- a/services/core/VolttronCentral/tests/test_vc_autoregister.py +++ b/services/core/VolttronCentral/tests/test_vc_autoregister.py @@ -8,10 +8,10 @@ VOLTTRON_CENTRAL_PLATFORM) from volttrontesting.utils.agent_additions import (add_volttron_central, add_volttron_central_platform) - from vctestutils import APITester + @pytest.fixture(scope="module") def multi_messagebus_vc_vcp(volttron_multi_messagebus): vcp_instance, vc_instance = volttron_multi_messagebus() @@ -31,8 +31,8 @@ def multi_messagebus_vc_vcp(volttron_multi_messagebus): # Update vcp_config store to add the volttron-central-address from vc to the # config store config = jsonapi.dumps({'volttron-central-address': vc_instance.bind_web_address}) - capabilities = {'edit_config_store': {'identity': VOLTTRON_CENTRAL_PLATFORM}} - vcp_instance.add_capabilities(vcp_instance.dynamic_agent.core.publickey, capabilities) + # capabilities = {'edit_config_store': {'identity': VOLTTRON_CENTRAL_PLATFORM}} + # vcp_instance.add_capabilities(vcp_instance.dynamic_agent.core.publickey, capabilities) vcp_instance.dynamic_agent.vip.rpc.call(CONFIGURATION_STORE, "manage_store", VOLTTRON_CENTRAL_PLATFORM, @@ -46,15 +46,16 @@ def multi_messagebus_vc_vcp(volttron_multi_messagebus): vcp_instance.remove_agent(vcp_uuid) vc_instance.remove_agent(vc_uuid) - +@pytest.mark.timeout(360) def test_able_to_register_unregister(multi_messagebus_vc_vcp): - gevent.sleep(10) vcp_instance, vc_instance, vcp_uuid = multi_messagebus_vc_vcp apitester = APITester(vc_instance) platforms = apitester.list_platforms() - + assert vc_instance.is_running() + assert vcp_instance.is_running() + gevent.sleep(7) assert len(platforms) == 1 platform = platforms[0] @@ -62,7 +63,7 @@ def test_able_to_register_unregister(multi_messagebus_vc_vcp): vcp_instance.stop_agent(vcp_uuid) - gevent.sleep(12) + gevent.sleep(7) assert not vcp_instance.is_agent_running(vcp_uuid) # print(vc_instance.dynamic_agent.vip.peerlist().get(timeout=10)) platforms = apitester.list_platforms() diff --git a/services/core/VolttronCentral/tests/test_webapi.py b/services/core/VolttronCentral/tests/test_webapi.py index 3d423b5d9b..06f54b224c 100644 --- a/services/core/VolttronCentral/tests/test_webapi.py +++ b/services/core/VolttronCentral/tests/test_webapi.py @@ -3,17 +3,16 @@ import gevent import pytest -from volttron.platform import get_examples +from volttron.platform import get_examples, jsonapi from volttrontesting.utils.agent_additions import \ add_volttron_central_platform, add_volttron_central, add_listener from volttrontesting.utils.platformwrapper import PlatformWrapper, \ start_wrapper_platform -from zmq.utils import jsonapi from vctestutils import APITester - -from vc_fixtures import vc_and_vcp_together, vc_instance, vcp_instance +from services.core.VolttronCentral.tests.vc_fixtures import \ + vc_and_vcp_together, vc_instance, vcp_instance @pytest.fixture(scope="module") @@ -25,12 +24,13 @@ def auto_registered_local(vc_and_vcp_together): def test_platform_list(auto_registered_local): webapi = auto_registered_local - + gevent.sleep(5) assert len(webapi.list_platforms()) == 1 def test_platform_inspect(auto_registered_local): webapi = auto_registered_local + gevent.sleep(5) platforms = webapi.list_platforms() platform_uuid = platforms[0]["uuid"] @@ -65,10 +65,10 @@ def vc_vcp_platforms(): volttron_central_serverkey=vc.serverkey) vc_uuid = add_volttron_central(vc) + gevent.sleep(5) vcp_uuid = add_volttron_central_platform(vcp) - + gevent.sleep(5) # Sleep so we know we are registered - gevent.sleep(15) yield vc, vcp vc.shutdown_platform() @@ -135,12 +135,12 @@ def test_store_list_get_configuration(auto_registered_local): config_name = "fuzzywidgets" webapi = auto_registered_local - + gevent.sleep(5) platforms = webapi.list_platforms() platform_uuid = platforms[0]["uuid"] resp = webapi.store_agent_config(platform_uuid, identity, config_name, - str_data) + str_data) assert resp is None resp = webapi.list_agent_configs(platform_uuid, identity) @@ -163,7 +163,7 @@ def test_store_delete_configuration(auto_registered_local): config_name = "fuzzywidgets" webapi = auto_registered_local - + gevent.sleep(5) platforms = webapi.list_platforms() platform_uuid = platforms[0]["uuid"] @@ -191,7 +191,7 @@ def test_correct_reader_permissions_on_vcp_vc_and_listener_agent(vc_vcp_platform vc, vcp = vc_vcp_platforms api = APITester(vc, username="reader", password="reader") - + gevent.sleep(5) platform = api.list_platforms()[0] print('The platform is {}'.format(platform)) @@ -217,7 +217,7 @@ def test_correct_reader_permissions_on_vcp_vc_and_listener_agent(vc_vcp_platform def test_correct_admin_permissions_on_vcp_vc_and_listener_agent(auto_registered_local): apitester = auto_registered_local - + gevent.sleep(5) platform = apitester.list_platforms()[0] print('The platform is {}'.format(platform)) @@ -253,7 +253,7 @@ def test_correct_admin_permissions_on_vcp_vc_and_listener_agent(auto_registered_ def test_listagent(auto_registered_local): webapi = auto_registered_local - + gevent.sleep(5) platform = webapi.list_platforms()[0] print('The platform is {}'.format(platform)) @@ -285,16 +285,26 @@ def test_installagent(auto_registered_local): import base64 import random + # with open(agent_wheel, 'r+b') as f: + # hold = f.read() + # file_str = str(hold).encode('utf-8') + # decoded_str = str(base64.decodestring(hold)) + # # From the web this is what is added to the file. + # filestr = "base64," + file_str + # # filestr = "base64,"+str(base64.b64encode(hold)) + with open(agent_wheel, 'r+b') as f: # From the web this is what is added to the file. - filestr = "base64,"+base64.b64encode(f.read()) - + hold = f.read() + print(f"Package is {hold}") + filestr = "base64,"+base64.b64encode(hold).decode('utf-8') + print(f"file string is {filestr}") file_props = dict( file_name=os.path.basename(agent_wheel), file=filestr, vip_identity='bar.full.{}'.format(random.randint(1, 100000)) ) - + gevent.sleep(5) platform = webapi.list_platforms()[0] agents = webapi.list_agents(platform['uuid']) @@ -317,4 +327,4 @@ def test_installagent(auto_registered_local): # def test_login_rejected_for_foo(vc_instance): # vc_jsonrpc = vc_instance[2] # with pytest.raises(AssertionError): -# tester = APITester(vc_jsonrpc, "foo", "") \ No newline at end of file +# tester = APITester(vc_jsonrpc, "foo", "") diff --git a/services/core/VolttronCentral/tests/vc_fixtures.py b/services/core/VolttronCentral/tests/vc_fixtures.py index f0f62ca80c..c232701a89 100644 --- a/services/core/VolttronCentral/tests/vc_fixtures.py +++ b/services/core/VolttronCentral/tests/vc_fixtures.py @@ -58,7 +58,7 @@ @pytest.fixture(scope="module") def vc_and_vcp_together(volttron_instance_web): - if volttron_instance_web.messagebus == 'rmq': + if volttron_instance_web.ssl_auth is True: os.environ['REQUESTS_CA_BUNDLE'] = volttron_instance_web.requests_ca_bundle vc_uuid = volttron_instance_web.install_agent( agent_dir=get_services_core("VolttronCentral"), @@ -71,6 +71,7 @@ def vc_and_vcp_together(volttron_instance_web): # Allow all rmq based csr connections. if volttron_instance_web.messagebus == 'rmq': volttron_instance_web.enable_auto_csr() + gevent.sleep(7) # vcp_config = PLATFORM_AGENT_CONFIG.copy() # vcp_config['volttron-central-address'] = volttron_instance_web.bind_web_address @@ -79,7 +80,7 @@ def vc_and_vcp_together(volttron_instance_web): config_file=PLATFORM_AGENT_CONFIG, start=True ) - gevent.sleep(10) + gevent.sleep(7) yield volttron_instance_web @@ -115,7 +116,7 @@ def vc_instance(volttron_instance_web): # Allow all rmq based csr connections. if volttron_instance_web.messagebus == 'rmq': volttron_instance_web.enable_auto_csr() - volttron_instance_web.web_admin_api.create_web_admin('admin', 'admin') + # volttron_instance_web.web_admin_api.create_web_admin('admin', 'admin') yield volttron_instance_web, agent_uuid, rpc_addr diff --git a/services/core/VolttronCentral/tests/vctestutils.py b/services/core/VolttronCentral/tests/vctestutils.py index 6ab7a115b7..bf69d5978b 100644 --- a/services/core/VolttronCentral/tests/vctestutils.py +++ b/services/core/VolttronCentral/tests/vctestutils.py @@ -30,7 +30,12 @@ def do_rpc(self, method, **params): print('Posting: {}'.format(data)) - r = requests.post(self._url, json=data) + if self._wrapper.ssl_auth: + r = requests.post(self._url, json=data, + verify=self._wrapper.certsobj.cert_file(self._wrapper.certsobj.root_ca_name)) + else: + r = requests.post(self._url, json=data, + verify=False) validate_response(r) rpcjson = r.json() diff --git a/services/core/VolttronCentral/ui-src/js/action-creators/platform-chart-action-creators.js b/services/core/VolttronCentral/ui-src/js/action-creators/platform-chart-action-creators.js index 6632e98975..480af62904 100644 --- a/services/core/VolttronCentral/ui-src/js/action-creators/platform-chart-action-creators.js +++ b/services/core/VolttronCentral/ui-src/js/action-creators/platform-chart-action-creators.js @@ -202,7 +202,7 @@ function loadChart(panelItem, emitChange, authorization) { var platform = platformsPanelItemsStore.getItem(platformPath); message = "Unable to load chart: No data was retrieved for " + topic + ". Check for proper configuration " + - " of any forwarder, master driver, and platform agents on platform '" + platform[uuid].name + "'."; + " of any forwarder, platform driver, and platform agents on platform '" + platform[uuid].name + "'."; orientation = "left"; highlight = topic; } diff --git a/services/core/VolttronCentral/volttroncentral/agent.py b/services/core/VolttronCentral/volttroncentral/agent.py index a0c25b2811..a8caa5bdc3 100644 --- a/services/core/VolttronCentral/volttroncentral/agent.py +++ b/services/core/VolttronCentral/volttroncentral/agent.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- {{{ # vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et: # -# Copyright 2019, Battelle Memorial Institute. +# Copyright 2020, Battelle Memorial Institute. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -245,10 +245,6 @@ def _configure(self, config_name, action, contents): self.vip.web.register_endpoint(r'/vc/jsonrpc', self.jsonrpc) - self.vip.web.register_websocket(r'/vc/ws', - self.open_authenticate_ws_endpoint, - self._ws_closed, - self._ws_received) self.vip.web.register_path(r'^/vc/.*', config.get('webroot')) @@ -269,7 +265,7 @@ def _handle_platform_connection(self, platform_vip_identity): platform = self._platforms.add_platform(platform_vip_identity) def _handle_platform_disconnect(self, platform_vip_identity): - _log.warn("Handling disconnection of connection from identity: {}".format( + _log.warning("Handling disconnection of connection from identity: {}".format( platform_vip_identity )) # TODO send alert that there was a platform disconnect. @@ -312,7 +308,7 @@ def configure_platforms(self, config_name, action, contents): def open_authenticate_ws_endpoint(self, fromip, endpoint): """ - Callback method from when websockets are opened. The endpoine must + Callback method from when websockets are opened. The endpoint must be '/' delimited with the second to last section being the session of a logged in user to volttron central itself. @@ -477,7 +473,7 @@ def jsonrpc(self, env, data): except Exception as e: return jsonrpc.json_error( - 'NA', UNHANDLED_EXCEPTION, e + 'NA', UNHANDLED_EXCEPTION, str(e) ) return self._get_jsonrpc_response(rpcdata.id, result_or_error) diff --git a/services/core/VolttronCentral/volttroncentral/platforms.py b/services/core/VolttronCentral/volttroncentral/platforms.py index 32fd21bc88..e82ba472ac 100644 --- a/services/core/VolttronCentral/volttroncentral/platforms.py +++ b/services/core/VolttronCentral/volttroncentral/platforms.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- {{{ # vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et: # -# Copyright 2019, Battelle Memorial Institute. +# Copyright 2020, Battelle Memorial Institute. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -458,7 +458,7 @@ def store_agent_config(self, session_user, params): agent_identity = params.get("agent_identity") if config_name.startswith("devices"): # Since we start with devices, we assume that we are attempting - # to save a master driver config file. + # to save a platform driver config file. rawdict = jsonapi.loads(params['raw_contents']) # if this is not a bacnet device_type then we cannot do anything @@ -515,9 +515,7 @@ def get_agent_list(self, session_user, params): agents = self.call('list_agents') if agents is None: - self._log.warn('No agents found for vcp: {} ({})'.format( - self.vip_identity, self.address - )) + self._log.warning('No agents found for vcp: {} ({})'.format(self.vip_identity, self.address)) agents = [] for a in agents: @@ -583,7 +581,7 @@ def get_devices(self, session_user, params): def get_stats(self, stat_type): # TODO Change so stat_type is available. if stat_type != 'status/cpu': - self._log.warn('The only stats available are cpu stats currently') + self._log.warning('The only stats available are cpu stats currently') return {} return self._platform_stats.get(stat_type, {}).copy() @@ -685,9 +683,7 @@ def _on_platform_stats(self, peer, sender, bus, topic, headers, message): expected_prefix = "platforms/{}/".format(self.vip_identity) if not topic.startswith(expected_prefix): - self._log.warn("Unexpected topic published to stats function: {}".format( - topic - )) + self._log.warning("Unexpected topic published to stats function: {}".format(topic)) return self._log.debug("TOPIC WAS: {}".format(topic)) @@ -745,10 +741,7 @@ def _on_platform_message(self,peer, sender, bus, topic, headers, message): expected_prefix = "platforms/{}/".format(self.vip_identity) if not topic.startswith(expected_prefix): - self._log.warn( - "Unexpected topic published to stats function: {}".format( - topic - )) + self._log.warning("Unexpected topic published to stats function: {}".format(topic)) return self._log.debug("TOPIC WAS: {}".format(topic)) @@ -772,8 +765,8 @@ def _on_platform_message(self,peer, sender, bus, topic, headers, message): topicsplit = topic.split('/') if len(topicsplit) < 3: - self._log.warn("Invalid topic length no operation or datatype.") - self._log.warn("Topic was {}".format(topic)) + self._log.warning("Invalid topic length no operation or datatype.") + self._log.warning("Topic was {}".format(topic)) return _, platform_uuid, op_or_datatype, other = topicsplit[0], \ diff --git a/services/core/VolttronCentral/volttroncentral/webroot/vc/jqxdraw.js b/services/core/VolttronCentral/volttroncentral/webroot/vc/jqxdraw.js deleted file mode 100644 index 263c37b236..0000000000 --- a/services/core/VolttronCentral/volttroncentral/webroot/vc/jqxdraw.js +++ /dev/null @@ -1,15 +0,0 @@ -/* -jQWidgets v4.5.0 (2017-Jan) -Copyright (c) 2011-2017 jQWidgets. -License: http://jqwidgets.com/license/ -*/ -/* -jQWidgets v4.5.0 (2017-Jan) -Copyright (c) 2011-2017 jQWidgets. -License: http://jqwidgets.com/license/ -*/ -!function(a){a.jqx.jqxWidget("jqxDraw","",{}),a.extend(a.jqx._jqxDraw.prototype,{defineInstance:function(){var b={renderEngine:""};a.extend(!0,this,b);var c=["clear","on","off","removeElement","attr","getAttr","line","circle","rect","path","pieslice","text","measureText"];for(var d in c)this._addFn(a.jqx._jqxDraw.prototype,c[d])},_addFn:function(a,b){a[b]||(a[b]=function(){return this.renderer[b].apply(this.renderer,arguments)})},createInstance:function(a){},_initRenderer:function(b){return a.jqx.createRenderer(this,b)},_internalRefresh:function(){var b=this;if(!a.jqx.isHidden(b.host)){b.renderer||(b.host.empty(),b._initRenderer(b.host));var c=b.renderer;if(c){var d=c.getRect();b._render({x:1,y:1,width:d.width,height:d.height}),c instanceof a.jqx.HTML5Renderer&&c.refresh()}}},_saveAsImage:function(b,c,d,e){return a.jqx._widgetToImage(this,b,c,d,e)},_render:function(a){var b=this;b.renderer;b._plotRect=a},refresh:function(){this._internalRefresh()},getSize:function(){var a=this._plotRect;return{width:a.width,height:a.height}},saveAsPNG:function(a,b,c){return this._saveAsImage("png",a,b,c)},saveAsJPEG:function(a,b,c){return this._saveAsImage("jpeg",a,b,c)}})}(jqxBaseFramework),function(a){a.jqx.toGreyScale=function(b){if(b.indexOf("#")==-1)return b;var c=a.jqx.cssToRgb(b);c[0]=c[1]=c[2]=Math.round(.3*c[0]+.59*c[1]+.11*c[2]);var d=a.jqx.rgbToHex(c[0],c[1],c[2]);return"#"+d[0]+d[1]+d[2]},a.jqx.adjustColor=function(b,c){if("string"!=typeof b)return"#000000";if(b.indexOf("#")==-1)return b;var d=a.jqx.cssToRgb(b),e=a.jqx.rgbToHsl(d);e[2]=Math.min(1,e[2]*c),e[1]=Math.min(1,e[1]*c*1.1),d=a.jqx.hslToRgb(e);for(var b="#",f=0;f<3;f++){var g=Math.round(d[f]);g=a.jqx.decToHex(g),1==g.toString().length&&(b+="0"),b+=g}return b.toUpperCase()},a.jqx.decToHex=function(a){return a.toString(16)},a.jqx.hexToDec=function(a){return parseInt(a,16)},a.jqx.rgbToHex=function(b,c,d){return[a.jqx.decToHex(b),a.jqx.decToHex(c),a.jqx.decToHex(d)]},a.jqx.hexToRgb=function(b,c,d){return[a.jqx.hexToDec(b),a.jqx.hexToDec(c),a.jqx.hexToDec(d)]},a.jqx.cssToRgb=function(b){return b.indexOf("rgb")<=-1?a.jqx.hexToRgb(b.substring(1,3),b.substring(3,5),b.substring(5,7)):b.substring(4,b.length-1).split(",")},a.jqx.hslToRgb=function(c){var d=parseFloat(c[0]),e=parseFloat(c[1]),f=parseFloat(c[2]);if(0==e)r=g=b=f;else{var h=f<.5?f*(1+e):f+e-f*e,i=2*f-h;r=a.jqx.hueToRgb(i,h,d+1/3),g=a.jqx.hueToRgb(i,h,d),b=a.jqx.hueToRgb(i,h,d-1/3)}return[255*r,255*g,255*b]},a.jqx.hueToRgb=function(a,b,c){return c<0&&(c+=1),c>1&&(c-=1),c<1/6?a+6*(b-a)*c:c<.5?b:c<2/3?a+(b-a)*(2/3-c)*6:a},a.jqx.rgbToHsl=function(a){var b,c,d=parseFloat(a[0])/255,e=parseFloat(a[1])/255,f=parseFloat(a[2])/255,g=Math.max(d,e,f),h=Math.min(d,e,f),i=(g+h)/2;if(g==h)b=c=0;else{var j=g-h;switch(c=i>.5?j/(2-g-h):j/(g+h),g){case d:b=(e-f)/j+(eb?c-.5:c+.5:c},a.jqx._ptRotate=function(a,b,c,d,e){var f=Math.sqrt(Math.pow(Math.abs(a-c),2)+Math.pow(Math.abs(b-d),2)),g=Math.asin((a-c)/f),h=g+e;return a=c+Math.cos(h)*f,b=d+Math.sin(h)*f,{x:a,y:b}},a.jqx._rup=function(a){var b=Math.round(a);return a>b&&b++,b},a.jqx.log=function(a,b){return Math.log(a)/(b?Math.log(b):1)},a.jqx._mod=function(a,b){var c=Math.abs(a>b?b:a),d=1;if(0!=c)for(;c*d<100;)d*=10;return a*=d,b*=d,a%b/d},a.jqx._rnd=function(b,c,d,e){if(isNaN(b))return b;void 0===e&&(e=!0);var f=b-(1==e?b%c:a.jqx._mod(b,c));return b==f?f:(d?b>f&&(f+=c):f>b&&(f-=c),1==c?Math.round(f):f)},a.jqx.commonRenderer={pieSlicePath:function(a,b,c,d,e,f,g){d||(d=1);var h=Math.abs(e-f),i=h>180?1:0;h>=360&&(f=e+359.99);var j=e*Math.PI*2/360,k=f*Math.PI*2/360,l=a,m=a,n=b,o=b,p=!isNaN(c)&&c>0;if(p&&(g=0),g+c>0){if(g>0){var q=h/2+e,r=q*Math.PI*2/360;a+=g*Math.cos(r),b-=g*Math.sin(r)}if(p){var s=c;l=a+s*Math.cos(j),n=b-s*Math.sin(j),m=a+s*Math.cos(k),o=b-s*Math.sin(k)}}var t=a+d*Math.cos(j),u=a+d*Math.cos(k),v=b-d*Math.sin(j),w=b-d*Math.sin(k),x="",y=Math.abs(Math.abs(f-e)-360)>.02;return p?(x="M "+m+","+o,x+=" a"+c+","+c,x+=" 0 "+i+",1 "+(l-m)+","+(n-o),x+=y?" L"+t+","+v:" M"+t+","+v,x+=" a"+d+","+d,x+=" 0 "+i+",0 "+(u-t)+","+(w-v),y&&(x+=" Z")):(x="M "+u+","+w,x+=" a"+d+","+d,x+=" 0 "+i+",1 "+(t-u)+","+(v-w),y&&(x+=" L"+a+","+b,x+=" Z")),x},measureText:function(b,c,d,e,f){var g=f._getTextParts(b,c,d),h=g.width,i=g.height;0==e&&(i/=.6);var j={};if(isNaN(c)&&(c=0),0==c)j={width:a.jqx._rup(h),height:a.jqx._rup(i)};else{var k=c*Math.PI*2/360,l=Math.abs(Math.sin(k)),m=Math.abs(Math.cos(k)),n=Math.abs(h*l+i*m),o=Math.abs(h*m+i*l);j={width:a.jqx._rup(o),height:a.jqx._rup(n)}}return e&&(j.textPartsInfo=g),j},alignTextInRect:function(b,c,d,e,f,g,h,i,j,k){var l=j*Math.PI*2/360,m=Math.sin(l),n=Math.cos(l),o=f*m,p=f*n;"center"==h||""==h||"undefined"==h?b+=d/2:"right"==h&&(b+=d),"center"==i||"middle"==i||""==i||"undefined"==i?c+=e/2:"bottom"==i?c+=e-g/2:"top"==i&&(c+=g/2),k=k||"";var q="middle";k.indexOf("top")!=-1?q="top":k.indexOf("bottom")!=-1&&(q="bottom");var r="center";return k.indexOf("left")!=-1?r="left":k.indexOf("right")!=-1&&(r="right"),"center"==r?(b-=p/2,c-=o/2):"right"==r&&(b-=p,c-=o),"top"==q?(b-=g*m,c+=g*n):"middle"==q&&(b-=g*m/2,c+=g*n/2),b=a.jqx._rup(b),c=a.jqx._rup(c),{x:b,y:c}}},a.jqx.svgRenderer=function(){},a.jqx.svgRenderer.prototype={_svgns:"http://www.w3.org/2000/svg",init:function(a){var b="
";a.append(b),this.host=a;var c=a.find(".chartContainer");c[0].style.width=a.width()+"px",c[0].style.height=a.height()+"px";try{var d=document.createElementNS(this._svgns,"svg");d.setAttribute("id","svgChart"),d.setAttribute("version","1.1"),d.setAttribute("width","100%"),d.setAttribute("height","100%"),d.setAttribute("overflow","hidden"),c[0].appendChild(d),this.canvas=d}catch(a){return!1}return this._id=(new Date).getTime(),this.clear(),this._layout(),this._runLayoutFix(),!0},getType:function(){return"SVG"},refresh:function(){},_runLayoutFix:function(){this._fixLayout()},_fixLayout:function(){var b=this.canvas.getBoundingClientRect(),c=parseFloat(b.left)==parseInt(b.left),d=parseFloat(b.top)==parseInt(b.top);if(a.jqx.browser.msie){for(var c=!0,d=!0,e=this.host,f=0,g=0;e&&e.position&&e[0].parentNode;){var h=e.position();f+=parseFloat(h.left)-parseInt(h.left),g+=parseFloat(h.top)-parseInt(h.top),e=e.parent()}c=parseFloat(f)==parseInt(f),d=parseFloat(g)==parseInt(g)}c||(this.host.find(".tdLeft")[0].style.width="0.5px"),d||(this.host.find(".tdTop")[0].style.height="0.5px")},_layout:function(){var b=this.host.find(".chartContainer");this._width=Math.max(a.jqx._rup(this.host.width())-1,0),this._height=Math.max(a.jqx._rup(this.host.height())-1,0),b[0].style.width=this._width,b[0].style.height=this._height,this._fixLayout()},getRect:function(){return{x:0,y:0,width:this._width,height:this._height}},getContainer:function(){var a=this.host.find(".chartContainer");return a},clear:function(){for(;this.canvas.childElementCount>0;)this.removeElement(this.canvas.firstElementChild);this._defaultParent=void 0,this._defs=document.createElementNS(this._svgns,"defs"),this._gradients={},this.canvas.appendChild(this._defs)},removeElement:function(a){if(void 0!=a){this.removeHandler(a);try{for(;a.firstChild;)this.removeElement(a.firstChild);a.parentNode?a.parentNode.removeChild(a):this.canvas.removeChild(a)}catch(a){}}},_openGroups:[],beginGroup:function(){var a=this._activeParent(),b=document.createElementNS(this._svgns,"g");return a.appendChild(b),this._openGroups.push(b),b},endGroup:function(){0!=this._openGroups.length&&this._openGroups.pop()},_activeParent:function(){return 0==this._openGroups.length?this.canvas:this._openGroups[this._openGroups.length-1]},createClipRect:function(a){var b=document.createElementNS(this._svgns,"clipPath"),c=document.createElementNS(this._svgns,"rect");return this.attr(c,{x:a.x,y:a.y,width:a.width,height:a.height,fill:"none"}),this._clipId=this._clipId||0,b.id="cl"+this._id+"_"+(++this._clipId).toString(),b.appendChild(c),this._defs.appendChild(b),b},getWindowHref:function(){var b=a.jqx.browser;if(b&&"msie"==b.browser&&b.version<10)return"";var c=window.location.href;return c?(c=c.replace(/([\('\)])/g,"\\$1"),c=c.replace(/#.*$/,"")):c},setClip:function(a,b){var c="url("+this.getWindowHref()+"#"+b.id+")";return this.attr(a,{"clip-path":c})},_clipId:0,addHandler:function(b,c,d){a(b).on?a(b).on(c,d):a(b).bind(c,d)},removeHandler:function(b,c,d){a(b).off?a(b).off(c,d):a(b).unbind(c,d)},on:function(a,b,c){this.addHandler(a,b,c)},off:function(a,b,c){this.removeHandler(a,b,c)},shape:function(a,b){var c=document.createElementNS(this._svgns,a);if(c){for(var d in b)c.setAttribute(d,b[d]);return this._activeParent().appendChild(c),c}},_getTextParts:function(b,c,d){var e={width:0,height:0,parts:[]};if(void 0===b)return e;var f=.6,g=b.toString().split("
"),h=this._activeParent(),i=document.createElementNS(this._svgns,"text");this.attr(i,d);for(var j=0;j0?4:0),e.parts.push({width:n,height:o,text:k})}return h.removeChild(i),e},_measureText:function(b,c,d,e){return a.jqx.commonRenderer.measureText(b,c,d,e,this)},measureText:function(a,b,c){return this._measureText(a,b,c,!1)},text:function(b,c,d,e,f,g,h,i,j,k,l){var m,n=this._measureText(b,g,h,!0),o=n.textPartsInfo,p=o.parts;if(j||(j="center"),k||(k="center"),(p.length>1||i)&&(m=this.beginGroup()),i){var q=this.createClipRect({x:a.jqx._rup(c)-1,y:a.jqx._rup(d)-1,width:a.jqx._rup(e)+2,height:a.jqx._rup(f)+2});this.setClip(m,q)}var r=this._activeParent(),s=0,t=0;s=o.width,t=o.height,(isNaN(e)||e<=0)&&(e=s),(isNaN(f)||f<=0)&&(f=t);var u=e||0,v=f||0;if(!g||0==g){d+=t,"center"==k||"middle"==k?d+=(v-t)/2:"bottom"==k&&(d+=v-t),e||(e=s),f||(f=t);for(var r=this._activeParent(),w=0,x=p.length-1;x>=0;x--){var y=document.createElementNS(this._svgns,"text");this.attr(y,h),this.attr(y,{cursor:"default"});var z=y.ownerDocument.createTextNode(p[x].text);y.appendChild(z);var A=c,B=p[x].width,C=p[x].height;"center"==j?A+=(u-B)/2:"right"==j&&(A+=u-B),this.attr(y,{x:a.jqx._rup(A),y:a.jqx._rup(d+w),width:a.jqx._rup(B),height:a.jqx._rup(C)}),r.appendChild(y),w-=p[x].height+4}return m?(this.endGroup(),m):y}var D=a.jqx.commonRenderer.alignTextInRect(c,d,e,f,s,t,j,k,g,l);c=D.x,d=D.y;var E=this.shape("g",{transform:"translate("+c+","+d+")"}),F=this.shape("g",{transform:"rotate("+g+")"});E.appendChild(F);for(var w=0,x=p.length-1;x>=0;x--){var G=document.createElementNS(this._svgns,"text");this.attr(G,h),this.attr(G,{cursor:"default"});var z=G.ownerDocument.createTextNode(p[x].text);G.appendChild(z);var A=0,B=p[x].width,C=p[x].height;"center"==j?A+=(o.width-B)/2:"right"==j&&(A+=o.width-B),this.attr(G,{x:a.jqx._rup(A),y:a.jqx._rup(w),width:a.jqx._rup(B),height:a.jqx._rup(C)}),F.appendChild(G),w-=C+4}return r.appendChild(E),m&&this.endGroup(),E},line:function(a,b,c,d,e){var f=this.shape("line",{x1:a,y1:b,x2:c,y2:d});return this.attr(f,e),f},path:function(a,b){var c=this.shape("path");return c.setAttribute("d",a),b&&this.attr(c,b),c},rect:function(b,c,d,e,f){b=a.jqx._ptrnd(b),c=a.jqx._ptrnd(c),d=Math.max(1,a.jqx._rnd(d,1,!1)),e=Math.max(1,a.jqx._rnd(e,1,!1));var g=this.shape("rect",{x:b,y:c,width:d,height:e});return f&&this.attr(g,f),g},circle:function(a,b,c,d){var e=this.shape("circle",{cx:a,cy:b,r:c});return d&&this.attr(e,d),e},pieSlicePath:function(b,c,d,e,f,g,h){return a.jqx.commonRenderer.pieSlicePath(b,c,d,e,f,g,h)},pieslice:function(a,b,c,d,e,f,g,h){var i=this.pieSlicePath(a,b,c,d,e,f,g),j=this.shape("path");return j.setAttribute("d",i),h&&this.attr(j,h),j},attr:function(a,b){if(a&&b)for(var c in b)"textContent"==c?a.textContent=b[c]:a.setAttribute(c,b[c])},removeAttr:function(a,b){if(a&&b)for(var c in b)"textContent"==c?a.textContent="":a.removeAttribute(b[c])},getAttr:function(a,b){return a.getAttribute(b)},_gradients:{},_toLinearGradient:function(b,c,d){var e="grd"+this._id+b.replace("#","")+(c?"v":"h"),f="url("+this.getWindowHref()+"#"+e+")";if(this._gradients[f])return f;var g=document.createElementNS(this._svgns,"linearGradient");this.attr(g,{x1:"0%",y1:"0%",x2:c?"0%":"100%",y2:c?"100%":"0%",id:e});for(var h=0;h
";b.append(c),this.host=b;var d=b.find(".chartContainer");d[0].style.width=b.width()+"px",d[0].style.height=b.height()+"px";var e=!0;try{for(var f=0;f0&&document.childNodes[0].data&&document.childNodes[0].data.indexOf("DOCTYPE")!=-1?(e&&document.namespaces.add("v","urn:schemas-microsoft-com:vml"),this._ie8mode=!0):e&&(document.namespaces.add("v","urn:schemas-microsoft-com:vml"),document.createStyleSheet().cssText="v\\:* { behavior: url(#default#VML); display: inline-block; }"),this.canvas=d[0],this._width=Math.max(a.jqx._rup(d.width()),0),this._height=Math.max(a.jqx._rup(d.height()),0),d[0].style.width=this._width+2,d[0].style.height=this._height+2,this._id=(new Date).getTime(),this.clear(),!0},getType:function(){return"VML"},refresh:function(){},getRect:function(){return{x:0,y:0,width:this._width,height:this._height}},getContainer:function(){var a=this.host.find(".chartContainer");return a},clear:function(){for(;this.canvas.childElementCount>0;)this.removeHandler(this.canvas.firstElementChild),this.canvas.removeChild(this.canvas.firstElementChild);this._gradients={},this._defaultParent=void 0},removeElement:function(a){null!=a&&(this.removeHandler(a),a.parentNode.removeChild(a))},_openGroups:[],beginGroup:function(){var a=this._activeParent(),b=document.createElement("v:group");return b.style.position="absolute",b.coordorigin="0,0",b.coordsize=this._width+","+this._height,b.style.left=0,b.style.top=0,b.style.width=this._width,b.style.height=this._height,a.appendChild(b),this._openGroups.push(b),b},endGroup:function(){0!=this._openGroups.length&&this._openGroups.pop()},_activeParent:function(){return 0==this._openGroups.length?this.canvas:this._openGroups[this._openGroups.length-1]},createClipRect:function(a){var b=document.createElement("div");return b.style.height=a.height+1+"px",b.style.width=a.width+1+"px",b.style.position="absolute",b.style.left=a.x+"px",b.style.top=a.y+"px",b.style.overflow="hidden",this._clipId=this._clipId||0,b.id="cl"+this._id+"_"+(++this._clipId).toString(),this._activeParent().appendChild(b),b},setClip:function(a,b){},_clipId:0,addHandler:function(b,c,d){a(b).on?a(b).on(c,d):a(b).bind(c,d)},removeHandler:function(b,c,d){a(b).off?a(b).off(c,d):a(b).unbind(c,d)},on:function(a,b,c){this.addHandler(a,b,c)},off:function(a,b,c){this.removeHandler(a,b,c)},_getTextParts:function(b,c,d){var e={width:0,height:0,parts:[]},f=.6,g=b.toString().split("
"),h=this._activeParent(),i=document.createElement("v:textbox");this.attr(i,d),h.appendChild(i);for(var j=0;j0?2:0),e.parts.push({width:n,height:o,text:k})}return h.removeChild(i),e},_measureText:function(b,c,d,e){return c=Math.abs(c)>45?90:0,a.jqx.commonRenderer.measureText(b,c,d,e,this)},measureText:function(a,b,c){return this._measureText(a,b,c,!1)},text:function(b,c,d,e,f,g,h,i,j,k){var l;h&&h.stroke&&(l=h.stroke),void 0==l&&(l="black");var m=this._measureText(b,g,h,!0),n=m.textPartsInfo,o=n.parts,p=m.width,q=m.height;(isNaN(e)||0==e)&&(e=p),(isNaN(f)||0==f)&&(f=q);var r;if(j||(j="center"),k||(k="center"),(o.length>0||i)&&(r=this.beginGroup()),i){var s=this.createClipRect({x:a.jqx._rup(c),y:a.jqx._rup(d),width:a.jqx._rup(e),height:a.jqx._rup(f)});this.setClip(r,s)}var t=this._activeParent(),u=e||0,v=f||0;g=Math.abs(g)>45?90:0;var w=0,x=0;"center"==j?w+=(u-p)/2:"right"==j&&(w+=u-p),"center"==k?x=(v-q)/2:"bottom"==k&&(x=v-q),0==g?(d+=q+x,c+=w):(c+=p+w,d+=x);for(var y,z=0,A=o.length-1;A>=0;A--){var B=o[A],C=(p-B.width)/2;0==g&&"left"==j?C=0:0==g&&"right"==j?C=p-B.width:90==g&&(C=(q-B.width)/2);var D=z-B.height;x=90==g?C:D,w=90==g?D:C,y=document.createElement("v:textbox"),y.style.position="absolute",y.style.left=a.jqx._rup(c+w),y.style.top=a.jqx._rup(d+x),y.style.width=a.jqx._rup(B.width),y.style.height=a.jqx._rup(B.height),90==g&&(y.style.filter="progid:DXImageTransform.Microsoft.BasicImage(rotation=3)",y.style.height=a.jqx._rup(B.height)+5);var E=document.createElement("span");E.appendChild(document.createTextNode(B.text)),h&&h.class&&(E.className=h.class),y.appendChild(E),t.appendChild(y),z-=B.height+(A>0?2:0)}return r?(this.endGroup(),t):y},shape:function(a,b){var c=document.createElement(this._createElementMarkup(a));if(c){for(var d in b)c.setAttribute(d,b[d]);return this._activeParent().appendChild(c),c}},line:function(a,b,c,d,e){var f="M "+a+","+b+" L "+c+","+d+" X E",g=this.path(f);return this.attr(g,e),g},_createElementMarkup:function(a){var b="";return this._ie8mode&&(b=b.replace('style=""','style="behavior: url(#default#VML);"')),b},path:function(a,b){var c=document.createElement(this._createElementMarkup("shape"));return c.style.position="absolute",c.coordsize=this._width+" "+this._height,c.coordorigin="0 0",c.style.width=parseInt(this._width),c.style.height=parseInt(this._height),c.style.left="0px",c.style.top="0px",c.setAttribute("path",a),this._activeParent().appendChild(c),b&&this.attr(c,b),c},rect:function(b,c,d,e,f){b=a.jqx._ptrnd(b),c=a.jqx._ptrnd(c),d=a.jqx._rup(d),e=a.jqx._rup(e);var g=this.shape("rect",f);return g.style.position="absolute",g.style.left=b,g.style.top=c,g.style.width=d,g.style.height=e,g.strokeweight=0,f&&this.attr(g,f),g},circle:function(b,c,d,e){var f=this.shape("oval");return b=a.jqx._ptrnd(b-d),c=a.jqx._ptrnd(c-d),d=a.jqx._rup(d),f.style.position="absolute",f.style.left=b,f.style.top=c,f.style.width=2*d,f.style.height=2*d,e&&this.attr(f,e),f},updateCircle:function(b,c,d,e){void 0==c&&(c=parseFloat(b.style.left)+parseFloat(b.style.width)/2),void 0==d&&(d=parseFloat(b.style.top)+parseFloat(b.style.height)/2),void 0==e&&(e=parseFloat(b.width)/2),c=a.jqx._ptrnd(c-e),d=a.jqx._ptrnd(d-e),e=a.jqx._rup(e),b.style.left=c,b.style.top=d,b.style.width=2*e,b.style.height=2*e},pieSlicePath:function(b,c,d,e,f,g,h){e||(e=1);var i=Math.abs(f-g);i>360&&(f=0,g=360);var j=f*Math.PI*2/360,k=g*Math.PI*2/360,l=b,m=b,n=c,o=c,p=!isNaN(d)&&d>0;if(p&&(h=0),h>0){var q=i/2+f,r=q*Math.PI*2/360;b+=h*Math.cos(r),c-=h*Math.sin(r)}if(p){var s=d;l=a.jqx._ptrnd(b+s*Math.cos(j)),n=a.jqx._ptrnd(c-s*Math.sin(j)),m=a.jqx._ptrnd(b+s*Math.cos(k)),o=a.jqx._ptrnd(c-s*Math.sin(k))}var t=(a.jqx._ptrnd(b+e*Math.cos(j)),a.jqx._ptrnd(b+e*Math.cos(k))),u=(a.jqx._ptrnd(c-e*Math.sin(j)),a.jqx._ptrnd(c-e*Math.sin(k)));e=a.jqx._ptrnd(e),d=a.jqx._ptrnd(d),b=a.jqx._ptrnd(b),c=a.jqx._ptrnd(c);var v=Math.round(65535*f),w=Math.round(65536*(g-f));d<0&&(d=1);var x="";return p?(x="M"+l+" "+n,x+=" AE "+b+" "+c+" "+d+" "+d+" "+v+" "+w,x+=" L "+t+" "+u,v=Math.round(65535*(f-g)),w=Math.round(65536*g),x+=" AE "+b+" "+c+" "+e+" "+e+" "+w+" "+v,x+=" L "+l+" "+n):(x="M"+b+" "+c,x+=" AE "+b+" "+c+" "+e+" "+e+" "+v+" "+w),x+=" X E"},pieslice:function(a,b,c,d,e,f,g,h){var i=this.pieSlicePath(a,b,c,d,e,f,g),j=this.path(i,h);return h&&this.attr(j,h),j},_keymap:[{svg:"fill",vml:"fillcolor"},{svg:"stroke",vml:"strokecolor"},{svg:"stroke-width",vml:"strokeweight"},{svg:"stroke-dasharray",vml:"dashstyle"},{svg:"fill-opacity",vml:"fillopacity"},{svg:"stroke-opacity",vml:"strokeopacity"},{svg:"opacity",vml:"opacity"},{svg:"cx",vml:"style.left"},{svg:"cy",vml:"style.top"},{svg:"height",vml:"style.height"},{svg:"width",vml:"style.width"},{svg:"x",vml:"style.left"},{svg:"y",vml:"style.top"},{svg:"d",vml:"v"},{svg:"display",vml:"style.display"}],_translateParam:function(a){for(var b in this._keymap)if(this._keymap[b].svg==a)return this._keymap[b].vml;return a},attr:function(a,b){if(a&&b)for(var c in b){var d=this._translateParam(c);void 0!=b[c]&&("fillcolor"==d&&b[c].indexOf("grd")!=-1?a.type=b[c]:"fillcolor"==d&&"transparent"==b[c]?(a.style.filter="alpha(opacity=0)",a["-ms-filter"]="progid:DXImageTransform.Microsoft.Alpha(Opacity=0)"):"opacity"==d||"fillopacity"==d?a.fill&&(a.fill.opacity=b[c]):"textContent"==d?a.children[0].innerText=b[c]:"dashstyle"==d?a.dashstyle=b[c].replace(","," "):d.indexOf("style.")==-1?a[d]=b[c]:a.style[d.replace("style.","")]=b[c])}},removeAttr:function(a,b){if(a&&b)for(var c in b)a.removeAttribute(b[c])},getAttr:function(a,b){var c=this._translateParam(b);return"opacity"==c||"fillopacity"==c?a.fill?a.fill.opacity:1:c.indexOf("style.")==-1?a[c]:a.style[c.replace("style.","")]},_gradients:{},_toRadialGradient:function(a,b,c){return a},_toLinearGradient:function(b,c,d){if(this._ie8mode)return b;var e="grd"+b.replace("#","")+(c?"v":"h"),f="#"+e;if(this._gradients[f])return f;var g=document.createElement(this._createElementMarkup("fill"));g.type="gradient",g.method="linear",g.angle=c?0:90;for(var h="",i=0;i0&&(h+=", "),h+=j[0]+"% "+a.jqx.adjustColor(b,j[1])}g.colors=h;var k=document.createElement(this._createElementMarkup("shapetype"));return k.appendChild(g),k.id=e,this.canvas.appendChild(k),f}},a.jqx.HTML5Renderer=function(){},a.jqx.ptrnd=function(a){if(.5==Math.abs(Math.round(a)-a))return a;var b=Math.round(a);return b
"),this.canvas=a.find("#__jqxCanvasWrap"),this.canvas[0].width=a.width(),this.canvas[0].height=a.height(),this.ctx=this.canvas[0].getContext("2d"),this._elements={},this._maxId=0,this._gradientId=0,this._gradients={},this._currentPoint={x:0,y:0},this._lastCmd="",this._pos=0}catch(a){return!1}return!0},getType:function(){return"HTML5"},getContainer:function(){var a=this.host.find(".chartContainer");return a},getRect:function(){return{x:0,y:0,width:this.canvas[0].width-1,height:this.canvas[0].height-1}},beginGroup:function(){},endGroup:function(){},setClip:function(){},createClipRect:function(a){},addHandler:function(a,b,c){},removeHandler:function(a,b,c){},on:function(a,b,c){this.addHandler(a,b,c)},off:function(a,b,c){this.removeHandler(a,b,c)},clear:function(){this._elements={},this._maxId=0,this._renderers._gradients={},this._gradientId=0},removeElement:function(a){void 0!=a&&this._elements[a.id]&&delete this._elements[a.id]},shape:function(a,b){var c={type:a,id:this._maxId++};for(var d in b)c[d]=b[d];return this._elements[c.id]=c,c},attr:function(a,b){for(var c in b)a[c]=b[c]},removeAttr:function(a,b){for(var c in b)delete a[b[c]]},rect:function(a,b,c,d,e){if(isNaN(a))throw'Invalid value for "x"';if(isNaN(b))throw'Invalid value for "y"';if(isNaN(c))throw'Invalid value for "width"';if(isNaN(d))throw'Invalid value for "height"';var f=this.shape("rect",{x:a,y:b,width:c,height:d});return e&&this.attr(f,e),f},path:function(a,b){var c=this.shape("path",b);return this.attr(c,{d:a}),c},line:function(a,b,c,d,e){return this.path("M "+a+","+b+" L "+c+","+d,e)},circle:function(a,b,c,d){var e=this.shape("circle",{x:a,y:b,r:c});return d&&this.attr(e,d),e},pieSlicePath:function(b,c,d,e,f,g,h){return a.jqx.commonRenderer.pieSlicePath(b,c,d,e,f,g,h)},pieslice:function(a,b,c,d,e,f,g,h){var i=this.path(this.pieSlicePath(a,b,c,d,e,f,g),h);return this.attr(i,{x:a,y:b,innerRadius:c,outerRadius:d,angleFrom:e,angleTo:f}),i},_getCSSStyle:function(a){var b=document.styleSheets;try{for(var c=0;c"),l=0;l0?4:0),i.parts.push({width:n,height:p,text:m})}return i},_measureText:function(b,c,d,e){return a.jqx.commonRenderer.measureText(b,c,d,e,this)},measureText:function(a,b,c){return this._measureText(a,b,c,!1)},text:function(a,b,c,d,e,f,g,h,i,j,k){var l=this.shape("text",{text:a,x:b,y:c,width:d,height:e,angle:f,clip:h,halign:i,valign:j,rotateAround:k});if(g&&this.attr(l,g),l.fontFamily="Arial",l.fontSize="10pt",l.fontWeight="",l.color="#000000",g&&g.class){var m=this._getCSSStyle(g.class);l.fontFamily=m.fontFamily||l.fontFamily,l.fontSize=m.fontSize||l.fontSize,l.fontWeight=m.fontWeight||l.fontWeight,l.color=m.color||l.color}var n=this._measureText(a,0,g,!0);return this.attr(l,{textPartsInfo:n.textPartsInfo,textWidth:n.width,textHeight:n.height}),(d<=0||isNaN(d))&&this.attr(l,{width:n.width}),(e<=0||isNaN(e))&&this.attr(l,{height:n.height}),l},_toLinearGradient:function(b,c,d){if(this._renderers._gradients[b])return b;for(var e=[],f=0;f="0"&&a[c]<="9"||"."==a[c]||"e"==a[c]||"-"==a[c]&&!b||"-"==a[c]&&c>=1&&"e"==a[c-1])b=!0;else{if(b||" "!=a[c]&&","!=a[c])break;this._pos++}var d=parseFloat(a.substring(this._pos,c));if(!isNaN(d))return this._pos=c,d},_cmds:"mlcazq",_isRelativeCmd:function(b){return a.jqx.string.contains(this._cmds,b)},_parseCmd:function(b){for(var c=this._pos;c="0"&&b[c]<="9"){if(this._pos=c,""==this._lastCmd)break;return this._lastCmd}}else this._pos++}},_toAbsolutePoint:function(a){return{x:this._currentPoint.x+a.x,y:this._currentPoint.y+a.y}},path:function(a,b){var c=b.d;this._pos=0,this._lastCmd="";var d=void 0;this._currentPoint={x:0,y:0},a.beginPath();for(;this._pos1&&(i*=Math.sqrt(q),j*=Math.sqrt(q));var r=(l==m?-1:1)*Math.sqrt((Math.pow(i,2)*Math.pow(j,2)-Math.pow(i,2)*Math.pow(p.y,2)-Math.pow(j,2)*Math.pow(p.x,2))/(Math.pow(i,2)*Math.pow(p.y,2)+Math.pow(j,2)*Math.pow(p.x,2)));isNaN(r)&&(r=0);var s={x:r*i*p.y/j,y:r*-j*p.x/i},t={x:(o.x+n.x)/2+Math.cos(k)*s.x-Math.sin(k)*s.y,y:(o.y+n.y)/2+Math.sin(k)*s.x+Math.cos(k)*s.y},u=function(a){return Math.sqrt(Math.pow(a[0],2)+Math.pow(a[1],2))},v=function(a,b){return(a[0]*b[0]+a[1]*b[1])/(u(a)*u(b))},w=function(a,b){return(a[0]*b[1]=1&&(A=0),0==m&&A>0&&(A-=2*Math.PI),1==m&&A<0&&(A+=2*Math.PI);var v=i>j?i:j,B=i>j?1:i/j,C=i>j?j/i:1;a.translate(t.x,t.y),a.rotate(k),a.scale(B,C),a.arc(0,0,v,x,x+A,1-m),a.scale(1/B,1/C),a.rotate(-k),a.translate(-t.x,-t.y)}else{var D=this._parsePoint(c);if(void 0==D)break;a.lineTo(D.x,D.y),this._currentPoint=D}else{var D=this._parsePoint(c);if(void 0==D)break;a.moveTo(D.x,D.y),this._currentPoint=D,void 0==d&&(d=D)}}a.fill(),a.stroke(),a.closePath()},text:function(b,c){var d=a.jqx.ptrnd(c.x),e=a.jqx.ptrnd(c.y),f=a.jqx.ptrnd(c.width),g=a.jqx.ptrnd(c.height),h=c.halign,i=c.valign,j=c.angle,k=c.rotateAround,l=c.textPartsInfo,m=l.parts,n=c.clip;void 0==n&&(n=!0),b.save(),h||(h="center"),i||(i="center"),n&&(b.rect(d,e,f,g),b.clip());var o=c.textWidth,p=c.textHeight,q=f||0,r=g||0;if(b.fillStyle=c.color,b.font=c.fontWeight+" "+c.fontSize+" "+c.fontFamily,!j||0==j){e+=p,"center"==i||"middle"==i?e+=(r-p)/2:"bottom"==i&&(e+=r-p),f||(f=o),g||(g=p);for(var s=0,t=m.length-1;t>=0;t--){var u=m[t],v=d,w=m[t].width;m[t].height;"center"==h?v+=(q-w)/2:"right"==h&&(v+=q-w),b.fillText(u.text,v,e+s),s-=u.height+(t>0?4:0)}return void b.restore()}var x=a.jqx.commonRenderer.alignTextInRect(d,e,f,g,o,p,h,i,j,k);d=x.x,e=x.y;var y=j*Math.PI*2/360;b.translate(d,e),b.rotate(y);for(var s=0,z=l.width,t=m.length-1;t>=0;t--){var v=0;"center"==h?v+=(z-m[t].width)/2:"right"==h&&(v+=z-m[t].width),b.fillText(m[t].text,v,s),s-=m[t].height+4}b.restore()}},refresh:function(){this.ctx.clearRect(0,0,this.canvas[0].width,this.canvas[0].height);for(var a in this._elements){var b=this._elements[a];this._renderers.setFillStyle(this,b),this._renderers.setStroke(this,b),this._renderers[this._elements[a].type](this.ctx,b)}}},a.jqx.createRenderer=function(b,c){var d=b,e=d.renderer=null;if(document.createElementNS&&"HTML5"!=d.renderEngine&&"VML"!=d.renderEngine&&(e=new a.jqx.svgRenderer,!e.init(c))){if("SVG"==d.renderEngine)throw"Your browser does not support SVG";return null}if(null==e&&"HTML5"!=d.renderEngine){if(e=new a.jqx.vmlRenderer,!e.init(c)){if("VML"==d.renderEngine)throw"Your browser does not support VML";return null}d._isVML=!0}if(null==e&&("HTML5"==d.renderEngine||void 0==d.renderEngine)&&(e=new a.jqx.HTML5Renderer,!e.init(c)))throw"Your browser does not support HTML5 Canvas";return d.renderer=e,e},a.jqx._widgetToImage=function(b,c,d,e,f,g){var h=b;if(!h)return!1;void 0!=d&&""!=d||(d="image."+c);var i=h.renderEngine,j=h.enableAnimations;if(h.enableAnimations=!1,h.renderEngine="HTML5",h.renderEngine!=i)try{h.refresh()}catch(a){return h.renderEngine=i,h.refresh(),h.enableAnimations=j,!1}var k=h.renderer.getContainer().find("canvas")[0],l=!0;a.isFunction(g)&&(l=g(b,k));var m=!0;return l&&(m=a.jqx.exportImage(k,c,d,e,f)),h.renderEngine!=i&&(h.renderEngine=i,h.refresh(),h.enableAnimations=j),m},a.jqx.getByPriority=function(a){for(var b=void 0,c=0;c=j-20&&(l=j-20);var m=new pdfDataExport(a.jqx.pdfExport.orientation,"pt",a.jqx.pdfExport.paperSize);return m.addImage(i,"JPEG",10,10,l,0),void m.save(d)}if(i=i.replace("data:image/"+c+";base64,",""),f)a.ajax({dataType:"string",url:e,type:"POST",data:{content:i,fname:d},async:!1,success:function(a,b,c){h=!0},error:function(a,b,c){h=!1}});else{var n=document.createElement("form");n.method="POST",n.action=e,n.style.display="none",document.body.appendChild(n);var o=document.createElement("input");o.name="fname",o.value=d,o.style.display="none";var p=document.createElement("input");p.name="content",p.value=i,p.style.display="none",n.appendChild(o),n.appendChild(p),n.submit(),document.body.removeChild(n),h=!0}}}catch(a){h=!1}return h}}(jqxBaseFramework),function(a){jqxPlot=function(){},jqxPlot.prototype={get:function(a,b,c){return void 0!==c?a[b][c]:a[b]},min:function(a,b){for(var c=NaN,d=0;dc)&&(c=e)}return c},sum:function(a,b){for(var c=0,d=0;dMath.max(c.min,c.max))&&(!e||e.ignore_range!==!0))return NaN;var f=NaN,g=1;if(void 0===c.type||"logarithmic"!=c.type){var h=Math.abs(c.max-c.min);h||(h=1),g=Math.abs(b-Math.min(c.min,c.max))/h}else if("logarithmic"===c.type){var i=c.base;isNaN(i)&&(i=10);var j=Math.min(c.min,c.max);j<=0&&(j=1);var k=Math.max(c.min,c.max);k<=0&&(k=1);var l=a.jqx.log(k,i);k=Math.pow(i,l);var m=a.jqx.log(j,i);j=Math.pow(i,m);var n=a.jqx.log(b,i);g=Math.abs(n-m)/(l-m)}if("logarithmic"===d.type){var i=d.base;isNaN(i)&&(i=10);var l=a.jqx.log(d.max,i),m=a.jqx.log(d.min,i);d.flip&&(g=1-g);var n=Math.min(m,l)+g*Math.abs(l-m);f=Math.pow(i,n)}else f=Math.min(d.min,d.max)+g*Math.abs(d.max-d.min),d.flip&&(f=Math.max(d.min,d.max)-f+d.min);return f},axis:function(b,c,d){if(d<=1)return[c,b];(isNaN(d)||d<2)&&(d=2);for(var e=0;Math.round(b)!=b&&Math.round(c)!=c&&e<10;)b*=10,c*=10,e++;for(var f=(c-b)/d;e<10&&Math.round(f)!=f;)b*=10,c*=10,f*=10,e++;for(var g=[1,2,5],h=0;;){var i=h%g.length,j=Math.floor(h/g.length),k=Math.pow(10,j)*g[i];i=(h+1)%g.length,j=Math.floor((h+1)/g.length);var l=Math.pow(10,j)*g[i];if(f>=k&&f0?void 0:(0,f.default)(!1),null!=c&&(i+=encodeURI(c))):"("===l?o+=1:")"===l?o-=1:":"===l.charAt(0)?(u=l.substring(1),c=t[u],null!=c||o>0?void 0:(0,f.default)(!1),null!=c&&(i+=encodeURIComponent(c))):i+=l;return i.replace(/\/+/g,"/")}t.__esModule=!0,t.compilePattern=a,t.matchPattern=s,t.getParamNames=l,t.getParams=u,t.formatPattern=c;var d=n(15),f=r(d),h=Object.create(null)},function(e,t){"use strict";t.__esModule=!0;var n="PUSH";t.PUSH=n;var r="REPLACE";t.REPLACE=r;var o="POP";t.POP=o,t.default={PUSH:n,REPLACE:r,POP:o}},function(e,t,n){"use strict";var r=n(8),o=n(368),i=n(710),a=n(711),s=n(54),l=n(712),u=n(713),c=n(714),d=n(718),f=s.createElement,h=s.createFactory,p=s.cloneElement,m=r,_=function(e){return e},g={Children:{map:i.map,forEach:i.forEach,count:i.count,toArray:i.toArray,only:d},Component:o.Component,PureComponent:o.PureComponent,createElement:f,cloneElement:p,isValidElement:s.isValidElement,PropTypes:l,createClass:c,createFactory:h,createMixin:_,DOM:a,version:u,__spread:m};e.exports=g},function(e,t,n){"use strict";function r(e){return void 0!==e.ref}function o(e){return void 0!==e.key}var i=n(8),a=n(33),s=(n(90),n(372),Object.prototype.hasOwnProperty),l=n(370),u={key:!0,ref:!0,__self:!0,__source:!0},c=function(e,t,n,r,o,i,a){var s={$$typeof:l,type:e,key:t,ref:n,props:a,_owner:i};return s};c.createElement=function(e,t,n){var i,l={},d=null,f=null,h=null,p=null;if(null!=t){r(t)&&(f=t.ref),o(t)&&(d=""+t.key),h=void 0===t.__self?null:t.__self,p=void 0===t.__source?null:t.__source;for(i in t)s.call(t,i)&&!u.hasOwnProperty(i)&&(l[i]=t[i])}var m=arguments.length-2;if(1===m)l.children=n;else if(m>1){for(var _=Array(m),g=0;g1){for(var v=Array(y),b=0;b2?arguments[2]:{},i=r(t);o&&(i=a.call(i,Object.getOwnPropertySymbols(t)));for(var s=0;s>>0;if(""+n!==t||4294967295===n)return NaN;t=n}return t<0?p(e)+t:t}function _(){return!0}function g(e,t,n){return(0===e||void 0!==n&&e<=-n)&&(void 0===t||void 0!==n&&t>=n)}function y(e,t){return b(e,t,0)}function v(e,t){return b(e,t,t)}function b(e,t,n){return void 0===e?n:e<0?Math.max(0,t+e):void 0===t?e:Math.min(t,e)}function w(e){this.next=e}function M(e,t,n,r){var o=0===e?t:1===e?n:[t,n];return r?r.value=o:r={value:o,done:!1},r}function k(){return{value:void 0,done:!0}}function S(e){return!!C(e)}function x(e){return e&&"function"==typeof e.next}function L(e){var t=C(e);return t&&t.call(e)}function C(e){var t=e&&(kn&&e[kn]||e[Sn]);if("function"==typeof t)return t}function T(e){return e&&"number"==typeof e.length}function E(e){return null===e||void 0===e?F():i(e)?e.toSeq():z(e)}function D(e){return null===e||void 0===e?F().toKeyedSeq():i(e)?a(e)?e.toSeq():e.fromEntrySeq():N(e)}function O(e){return null===e||void 0===e?F():i(e)?a(e)?e.entrySeq():e.toIndexedSeq():H(e)}function P(e){return(null===e||void 0===e?F():i(e)?a(e)?e.entrySeq():e:H(e)).toSetSeq()}function A(e){this._array=e,this.size=e.length}function Y(e){var t=Object.keys(e);this._object=e,this._keys=t,this.size=t.length}function I(e){this._iterable=e,this.size=e.length||e.size}function R(e){this._iterator=e,this._iteratorCache=[]}function j(e){return!(!e||!e[Ln])}function F(){return Cn||(Cn=new A([]))}function N(e){var t=Array.isArray(e)?new A(e).fromEntrySeq():x(e)?new R(e).fromEntrySeq():S(e)?new I(e).fromEntrySeq():"object"==typeof e?new Y(e):void 0;if(!t)throw new TypeError("Expected Array or iterable object of [k, v] entries, or keyed object: "+e);return t}function H(e){var t=W(e);if(!t)throw new TypeError("Expected Array or iterable object of values: "+e);return t}function z(e){var t=W(e)||"object"==typeof e&&new Y(e);if(!t)throw new TypeError("Expected Array or iterable object of values, or keyed object: "+e);return t}function W(e){return T(e)?new A(e):x(e)?new R(e):S(e)?new I(e):void 0}function B(e,t,n,r){var o=e._cache;if(o){for(var i=o.length-1,a=0;a<=i;a++){var s=o[n?i-a:a];if(t(s[1],r?s[0]:a,e)===!1)return a+1}return a}return e.__iterateUncached(t,n)}function U(e,t,n,r){var o=e._cache;if(o){var i=o.length-1,a=0;return new w(function(){var e=o[n?i-a:a];return a++>i?k():M(t,r?e[0]:a-1,e[1])})}return e.__iteratorUncached(t,n)}function V(e,t){return t?G(t,e,"",{"":e}):q(e)}function G(e,t,n,r){return Array.isArray(t)?e.call(r,n,O(t).map(function(n,r){return G(e,n,r,t)})):K(t)?e.call(r,n,D(t).map(function(n,r){return G(e,n,r,t)})):t}function q(e){return Array.isArray(e)?O(e).map(q).toList():K(e)?D(e).map(q).toMap():e}function K(e){return e&&(e.constructor===Object||void 0===e.constructor)}function J(e,t){if(e===t||e!==e&&t!==t)return!0;if(!e||!t)return!1;if("function"==typeof e.valueOf&&"function"==typeof t.valueOf){if(e=e.valueOf(),t=t.valueOf(),e===t||e!==e&&t!==t)return!0;if(!e||!t)return!1}return!("function"!=typeof e.equals||"function"!=typeof t.equals||!e.equals(t))}function $(e,t){if(e===t)return!0;if(!i(t)||void 0!==e.size&&void 0!==t.size&&e.size!==t.size||void 0!==e.__hash&&void 0!==t.__hash&&e.__hash!==t.__hash||a(e)!==a(t)||s(e)!==s(t)||u(e)!==u(t))return!1;if(0===e.size&&0===t.size)return!0;var n=!l(e);if(u(e)){var r=e.entries();return t.every(function(e,t){var o=r.next().value;return o&&J(o[1],e)&&(n||J(o[0],t))})&&r.next().done}var o=!1;if(void 0===e.size)if(void 0===t.size)"function"==typeof e.cacheResult&&e.cacheResult();else{o=!0;var c=e;e=t,t=c}var d=!0,f=t.__iterate(function(t,r){if(n?!e.has(t):o?!J(t,e.get(r,gn)):!J(e.get(r,gn),t))return d=!1,!1});return d&&e.size===f}function X(e,t){if(!(this instanceof X))return new X(e,t);if(this._value=e,this.size=void 0===t?1/0:Math.max(0,t),0===this.size){if(Tn)return Tn;Tn=this}}function Q(e,t){if(!e)throw new Error(t)}function Z(e,t,n){if(!(this instanceof Z))return new Z(e,t,n);if(Q(0!==n,"Cannot step a Range by 0"),e=e||0,void 0===t&&(t=1/0),n=void 0===n?1:Math.abs(n),t>>1&1073741824|3221225471&e}function ie(e){if(e===!1||null===e||void 0===e)return 0;if("function"==typeof e.valueOf&&(e=e.valueOf(),e===!1||null===e||void 0===e))return 0;if(e===!0)return 1;var t=typeof e;if("number"===t){if(e!==e||e===1/0)return 0;var n=0|e;for(n!==e&&(n^=4294967295*e);e>4294967295;)e/=4294967295,n^=e;return oe(n)}if("string"===t)return e.length>jn?ae(e):se(e);if("function"==typeof e.hashCode)return e.hashCode();if("object"===t)return le(e);if("function"==typeof e.toString)return se(e.toString());throw new Error("Value type "+t+" cannot be hashed.")}function ae(e){var t=Hn[e];return void 0===t&&(t=se(e),Nn===Fn&&(Nn=0,Hn={}),Nn++,Hn[e]=t),t}function se(e){for(var t=0,n=0;n0)switch(e.nodeType){case 1:return e.uniqueID;case 9:return e.documentElement&&e.documentElement.uniqueID}}function ce(e){Q(e!==1/0,"Cannot perform this action with an infinite size.")}function de(e){return null===e||void 0===e?Me():fe(e)&&!u(e)?e:Me().withMutations(function(t){var r=n(e);ce(r.size),r.forEach(function(e,n){return t.set(n,e)})})}function fe(e){return!(!e||!e[zn])}function he(e,t){this.ownerID=e,this.entries=t}function pe(e,t,n){this.ownerID=e,this.bitmap=t,this.nodes=n}function me(e,t,n){this.ownerID=e,this.count=t,this.nodes=n}function _e(e,t,n){this.ownerID=e,this.keyHash=t,this.entries=n}function ge(e,t,n){this.ownerID=e,this.keyHash=t,this.entry=n}function ye(e,t,n){this._type=t,this._reverse=n,this._stack=e._root&&be(e._root)}function ve(e,t){return M(e,t[0],t[1])}function be(e,t){return{node:e,index:0,__prev:t}}function we(e,t,n,r){var o=Object.create(Wn);return o.size=e,o._root=t,o.__ownerID=n,o.__hash=r,o.__altered=!1,o}function Me(){return Bn||(Bn=we(0))}function ke(e,t,n){var r,o;if(e._root){var i=c(yn),a=c(vn);if(r=Se(e._root,e.__ownerID,0,void 0,t,n,i,a),!a.value)return e;o=e.size+(i.value?n===gn?-1:1:0)}else{if(n===gn)return e;o=1,r=new he(e.__ownerID,[[t,n]])}return e.__ownerID?(e.size=o,e._root=r,e.__hash=void 0,e.__altered=!0,e):r?we(o,r):Me()}function Se(e,t,n,r,o,i,a,s){return e?e.update(t,n,r,o,i,a,s):i===gn?e:(d(s),d(a),new ge(t,r,[o,i]))}function xe(e){return e.constructor===ge||e.constructor===_e}function Le(e,t,n,r,o){if(e.keyHash===r)return new _e(t,r,[e.entry,o]);var i,a=(0===n?e.keyHash:e.keyHash>>>n)&_n,s=(0===n?r:r>>>n)&_n,l=a===s?[Le(e,t,n+pn,r,o)]:(i=new ge(t,r,o),a>>=1)a[s]=1&n?t[i++]:void 0;return a[r]=o,new me(e,i+1,a)}function De(e,t,r){for(var o=[],a=0;a>1&1431655765,e=(858993459&e)+(e>>2&858993459),e=e+(e>>4)&252645135,e+=e>>8,e+=e>>16,127&e}function Re(e,t,n,r){var o=r?e:h(e);return o[t]=n,o}function je(e,t,n,r){var o=e.length+1;if(r&&t+1===o)return e[t]=n,e;for(var i=new Array(o),a=0,s=0;s0&&oi?0:i-n,u=a-n;return u>mn&&(u=mn),function(){if(o===u)return $n;var e=t?--u:o++;return r&&r[e]}}function o(e,r,o){var s,l=e&&e.array,u=o>i?0:i-o>>r,c=(a-o>>r)+1;return c>mn&&(c=mn),function(){for(;;){if(s){var e=s();if(e!==$n)return e;s=null}if(u===c)return $n;var i=t?--c:u++;s=n(l&&l[i],r-pn,o+(i<=e.size||t<0)return e.withMutations(function(e){t<0?Je(e,t).set(0,n):Je(e,0,t+1).set(t,n)});t+=e._origin;var r=e._tail,o=e._root,i=c(vn);return t>=Xe(e._capacity)?r=Ge(r,e.__ownerID,0,t,n,i):o=Ge(o,e.__ownerID,e._level,t,n,i),i.value?e.__ownerID?(e._root=o,e._tail=r,e.__hash=void 0,e.__altered=!0,e):Be(e._origin,e._capacity,e._level,o,r):e}function Ge(e,t,n,r,o,i){var a=r>>>n&_n,s=e&&a0){var u=e&&e.array[a],c=Ge(u,t,n-pn,r,o,i);return c===u?e:(l=qe(e,t),l.array[a]=c,l)}return s&&e.array[a]===o?e:(d(i),l=qe(e,t),void 0===o&&a===l.array.length-1?l.array.pop():l.array[a]=o,l)}function qe(e,t){return t&&e&&t===e.ownerID?e:new ze(e?e.array.slice():[],t)}function Ke(e,t){if(t>=Xe(e._capacity))return e._tail;if(t<1<0;)n=n.array[t>>>r&_n],r-=pn;return n}}function Je(e,t,n){void 0!==t&&(t|=0),void 0!==n&&(n|=0);var r=e.__ownerID||new f,o=e._origin,i=e._capacity,a=o+t,s=void 0===n?i:n<0?i+n:o+n;if(a===o&&s===i)return e;if(a>=s)return e.clear();for(var l=e._level,u=e._root,c=0;a+c<0;)u=new ze(u&&u.array.length?[void 0,u]:[],r),l+=pn,c+=1<=1<d?new ze([],r):p;if(p&&h>d&&apn;g-=pn){var y=d>>>g&_n;_=_.array[y]=qe(_.array[y],r)}_.array[d>>>pn&_n]=p}if(s=h)a-=h,s-=h,l=pn,u=null,m=m&&m.removeBefore(r,0,a);else if(a>o||h>>l&_n;if(v!==h>>>l&_n)break;v&&(c+=(1<o&&(u=u.removeBefore(r,l,a-c)),u&&ha&&(a=u.size),i(l)||(u=u.map(function(e){return V(e)})),o.push(u)}return a>e.size&&(e=e.setSize(a)),Ae(e,t,o)}function Xe(e){return e>>pn<=mn&&a.size>=2*i.size?(o=a.filter(function(e,t){return void 0!==e&&s!==t}),r=o.toKeyedSeq().map(function(e){return e[0]}).flip().toMap(),e.__ownerID&&(r.__ownerID=o.__ownerID=e.__ownerID)):(r=i.remove(t),o=s===a.size-1?a.pop():a.set(s,void 0))}else if(l){if(n===a.get(s)[1])return e;r=i,o=a.set(s,[t,n])}else r=i.set(t,a.size),o=a.set(a.size,[t,n]);return e.__ownerID?(e.size=r.size,e._map=r,e._list=o,e.__hash=void 0,e):et(r,o)}function rt(e,t){this._iter=e,this._useKeys=t,this.size=e.size}function ot(e){this._iter=e,this.size=e.size}function it(e){this._iter=e,this.size=e.size}function at(e){this._iter=e,this.size=e.size}function st(e){var t=Tt(e);return t._iter=e,t.size=e.size,t.flip=function(){return e},t.reverse=function(){var t=e.reverse.apply(this);return t.flip=function(){return e.reverse()},t},t.has=function(t){return e.includes(t)},t.includes=function(t){return e.has(t)},t.cacheResult=Et,t.__iterateUncached=function(t,n){var r=this;return e.__iterate(function(e,n){return t(n,e,r)!==!1},n)},t.__iteratorUncached=function(t,n){if(t===Mn){var r=e.__iterator(t,n);return new w(function(){var e=r.next();if(!e.done){var t=e.value[0];e.value[0]=e.value[1],e.value[1]=t}return e})}return e.__iterator(t===wn?bn:wn,n)},t}function lt(e,t,n){var r=Tt(e);return r.size=e.size,r.has=function(t){return e.has(t)},r.get=function(r,o){var i=e.get(r,gn);return i===gn?o:t.call(n,i,r,e)},r.__iterateUncached=function(r,o){var i=this;return e.__iterate(function(e,o,a){return r(t.call(n,e,o,a),o,i)!==!1},o)},r.__iteratorUncached=function(r,o){var i=e.__iterator(Mn,o);return new w(function(){var o=i.next();if(o.done)return o;var a=o.value,s=a[0];return M(r,s,t.call(n,a[1],s,e),o)})},r}function ut(e,t){var n=Tt(e);return n._iter=e,n.size=e.size,n.reverse=function(){return e},e.flip&&(n.flip=function(){var t=st(e);return t.reverse=function(){return e.flip()},t}),n.get=function(n,r){return e.get(t?n:-1-n,r)},n.has=function(n){return e.has(t?n:-1-n)},n.includes=function(t){return e.includes(t)},n.cacheResult=Et,n.__iterate=function(t,n){var r=this;return e.__iterate(function(e,n){return t(e,n,r)},!n)},n.__iterator=function(t,n){return e.__iterator(t,!n)},n}function ct(e,t,n,r){var o=Tt(e);return r&&(o.has=function(r){var o=e.get(r,gn);return o!==gn&&!!t.call(n,o,r,e)},o.get=function(r,o){var i=e.get(r,gn);return i!==gn&&t.call(n,i,r,e)?i:o}),o.__iterateUncached=function(o,i){var a=this,s=0;return e.__iterate(function(e,i,l){if(t.call(n,e,i,l))return s++,o(e,r?i:s-1,a)},i),s},o.__iteratorUncached=function(o,i){var a=e.__iterator(Mn,i),s=0;return new w(function(){for(;;){var i=a.next();if(i.done)return i;var l=i.value,u=l[0],c=l[1];if(t.call(n,c,u,e))return M(o,r?u:s++,c,i)}})},o}function dt(e,t,n){var r=de().asMutable();return e.__iterate(function(o,i){r.update(t.call(n,o,i,e),0,function(e){return e+1})}),r.asImmutable()}function ft(e,t,n){var r=a(e),o=(u(e)?Qe():de()).asMutable();e.__iterate(function(i,a){o.update(t.call(n,i,a,e),function(e){return e=e||[],e.push(r?[a,i]:i),e})});var i=Ct(e);return o.map(function(t){return St(e,i(t))})}function ht(e,t,n,r){var o=e.size;if(void 0!==t&&(t|=0),void 0!==n&&(n===1/0?n=o:n|=0),g(t,n,o))return e;var i=y(t,o),a=v(n,o);if(i!==i||a!==a)return ht(e.toSeq().cacheResult(),t,n,r);var s,l=a-i;l===l&&(s=l<0?0:l);var u=Tt(e);return u.size=0===s?s:e.size&&s||void 0,!r&&j(e)&&s>=0&&(u.get=function(t,n){return t=m(this,t),t>=0&&ts)return k();var e=o.next();return r||t===wn?e:t===bn?M(t,l-1,void 0,e):M(t,l-1,e.value[1],e)})},u}function pt(e,t,n){var r=Tt(e);return r.__iterateUncached=function(r,o){var i=this;if(o)return this.cacheResult().__iterate(r,o);var a=0;return e.__iterate(function(e,o,s){return t.call(n,e,o,s)&&++a&&r(e,o,i)}),a},r.__iteratorUncached=function(r,o){var i=this;if(o)return this.cacheResult().__iterator(r,o);var a=e.__iterator(Mn,o),s=!0;return new w(function(){if(!s)return k();var e=a.next();if(e.done)return e;var o=e.value,l=o[0],u=o[1];return t.call(n,u,l,i)?r===Mn?e:M(r,l,u,e):(s=!1,k())})},r}function mt(e,t,n,r){var o=Tt(e);return o.__iterateUncached=function(o,i){var a=this;if(i)return this.cacheResult().__iterate(o,i);var s=!0,l=0;return e.__iterate(function(e,i,u){if(!s||!(s=t.call(n,e,i,u)))return l++,o(e,r?i:l-1,a)}),l},o.__iteratorUncached=function(o,i){var a=this;if(i)return this.cacheResult().__iterator(o,i);var s=e.__iterator(Mn,i),l=!0,u=0;return new w(function(){var e,i,c;do{if(e=s.next(),e.done)return r||o===wn?e:o===bn?M(o,u++,void 0,e):M(o,u++,e.value[1],e);var d=e.value;i=d[0],c=d[1],l&&(l=t.call(n,c,i,a))}while(l);return o===Mn?e:M(o,i,c,e)})},o}function _t(e,t){var r=a(e),o=[e].concat(t).map(function(e){return i(e)?r&&(e=n(e)):e=r?N(e):H(Array.isArray(e)?e:[e]),e}).filter(function(e){return 0!==e.size});if(0===o.length)return e; if(1===o.length){var l=o[0];if(l===e||r&&a(l)||s(e)&&s(l))return l}var u=new A(o);return r?u=u.toKeyedSeq():s(e)||(u=u.toSetSeq()),u=u.flatten(!0),u.size=o.reduce(function(e,t){if(void 0!==e){var n=t.size;if(void 0!==n)return e+n}},0),u}function gt(e,t,n){var r=Tt(e);return r.__iterateUncached=function(r,o){function a(e,u){var c=this;e.__iterate(function(e,o){return(!t||u0}function kt(e,n,r){var o=Tt(e);return o.size=new A(r).map(function(e){return e.size}).min(),o.__iterate=function(e,t){for(var n,r=this.__iterator(wn,t),o=0;!(n=r.next()).done&&e(n.value,o++,this)!==!1;);return o},o.__iteratorUncached=function(e,o){var i=r.map(function(e){return e=t(e),L(o?e.reverse():e)}),a=0,s=!1;return new w(function(){var t;return s||(t=i.map(function(e){return e.next()}),s=t.some(function(e){return e.done})),s?k():M(e,a++,n.apply(null,t.map(function(e){return e.value})))})},o}function St(e,t){return j(e)?t:e.constructor(t)}function xt(e){if(e!==Object(e))throw new TypeError("Expected [K, V] tuple: "+e)}function Lt(e){return ce(e.size),p(e)}function Ct(e){return a(e)?n:s(e)?r:o}function Tt(e){return Object.create((a(e)?D:s(e)?O:P).prototype)}function Et(){return this._iter.cacheResult?(this._iter.cacheResult(),this.size=this._iter.size,this):E.prototype.cacheResult.call(this)}function Dt(e,t){return e>t?1:et?-1:0}function on(e){if(e.size===1/0)return 0;var t=u(e),n=a(e),r=t?1:0,o=e.__iterate(n?t?function(e,t){r=31*r+sn(ie(e),ie(t))|0}:function(e,t){r=r+sn(ie(e),ie(t))|0}:t?function(e){r=31*r+ie(e)|0}:function(e){r=r+ie(e)|0});return an(o,r)}function an(e,t){return t=On(t,3432918353),t=On(t<<15|t>>>-15,461845907),t=On(t<<13|t>>>-13,5),t=(t+3864292196|0)^e,t=On(t^t>>>16,2246822507),t=On(t^t>>>13,3266489909),t=oe(t^t>>>16)}function sn(e,t){return e^t+2654435769+(e<<6)+(e>>2)|0}var ln=Array.prototype.slice;e(n,t),e(r,t),e(o,t),t.isIterable=i,t.isKeyed=a,t.isIndexed=s,t.isAssociative=l,t.isOrdered=u,t.Keyed=n,t.Indexed=r,t.Set=o;var un="@@__IMMUTABLE_ITERABLE__@@",cn="@@__IMMUTABLE_KEYED__@@",dn="@@__IMMUTABLE_INDEXED__@@",fn="@@__IMMUTABLE_ORDERED__@@",hn="delete",pn=5,mn=1<r?k():M(e,o,n[t?r-o++:o++])})},e(Y,D),Y.prototype.get=function(e,t){return void 0===t||this.has(e)?this._object[e]:t},Y.prototype.has=function(e){return this._object.hasOwnProperty(e)},Y.prototype.__iterate=function(e,t){for(var n=this._object,r=this._keys,o=r.length-1,i=0;i<=o;i++){var a=r[t?o-i:i];if(e(n[a],a,this)===!1)return i+1}return i},Y.prototype.__iterator=function(e,t){var n=this._object,r=this._keys,o=r.length-1,i=0;return new w(function(){var a=r[t?o-i:i];return i++>o?k():M(e,a,n[a])})},Y.prototype[fn]=!0,e(I,O),I.prototype.__iterateUncached=function(e,t){if(t)return this.cacheResult().__iterate(e,t);var n=this._iterable,r=L(n),o=0;if(x(r))for(var i;!(i=r.next()).done&&e(i.value,o++,this)!==!1;);return o},I.prototype.__iteratorUncached=function(e,t){if(t)return this.cacheResult().__iterator(e,t);var n=this._iterable,r=L(n);if(!x(r))return new w(k);var o=0;return new w(function(){var t=r.next();return t.done?t:M(e,o++,t.value)})},e(R,O),R.prototype.__iterateUncached=function(e,t){if(t)return this.cacheResult().__iterate(e,t);for(var n=this._iterator,r=this._iteratorCache,o=0;o=r.length){var t=n.next();if(t.done)return t;r[o]=t.value}return M(e,o,r[o++])})};var Cn;e(X,O),X.prototype.toString=function(){return 0===this.size?"Repeat []":"Repeat [ "+this._value+" "+this.size+" times ]"},X.prototype.get=function(e,t){return this.has(e)?this._value:t},X.prototype.includes=function(e){return J(this._value,e)},X.prototype.slice=function(e,t){var n=this.size;return g(e,t,n)?this:new X(this._value,v(t,n)-y(e,n))},X.prototype.reverse=function(){return this},X.prototype.indexOf=function(e){return J(this._value,e)?0:-1},X.prototype.lastIndexOf=function(e){return J(this._value,e)?this.size:-1},X.prototype.__iterate=function(e,t){for(var n=0;n=0&&t=0&&nn?k():M(e,i++,a)})},Z.prototype.equals=function(e){return e instanceof Z?this._start===e._start&&this._end===e._end&&this._step===e._step:$(this,e)};var En;e(ee,t),e(te,ee),e(ne,ee),e(re,ee),ee.Keyed=te,ee.Indexed=ne,ee.Set=re;var Dn,On="function"==typeof Math.imul&&Math.imul(4294967295,2)===-2?Math.imul:function(e,t){e|=0,t|=0;var n=65535&e,r=65535&t;return n*r+((e>>>16)*r+n*(t>>>16)<<16>>>0)|0},Pn=Object.isExtensible,An=function(){try{return Object.defineProperty({},"@",{}),!0}catch(e){return!1}}(),Yn="function"==typeof WeakMap;Yn&&(Dn=new WeakMap);var In=0,Rn="__immutablehash__";"function"==typeof Symbol&&(Rn=Symbol(Rn));var jn=16,Fn=255,Nn=0,Hn={};e(de,te),de.of=function(){var e=ln.call(arguments,0);return Me().withMutations(function(t){for(var n=0;n=e.length)throw new Error("Missing value for key: "+e[n]);t.set(e[n],e[n+1])}})},de.prototype.toString=function(){return this.__toString("Map {","}")},de.prototype.get=function(e,t){return this._root?this._root.get(0,void 0,e,t):t},de.prototype.set=function(e,t){return ke(this,e,t)},de.prototype.setIn=function(e,t){return this.updateIn(e,gn,function(){return t})},de.prototype.remove=function(e){return ke(this,e,gn)},de.prototype.deleteIn=function(e){return this.updateIn(e,function(){return gn})},de.prototype.update=function(e,t,n){return 1===arguments.length?e(this):this.updateIn([e],t,n)},de.prototype.updateIn=function(e,t,n){n||(n=t,t=void 0);var r=Ye(this,Ot(e),t,n);return r===gn?void 0:r},de.prototype.clear=function(){return 0===this.size?this:this.__ownerID?(this.size=0,this._root=null,this.__hash=void 0,this.__altered=!0,this):Me()},de.prototype.merge=function(){return De(this,void 0,arguments)},de.prototype.mergeWith=function(e){var t=ln.call(arguments,1);return De(this,e,t)},de.prototype.mergeIn=function(e){var t=ln.call(arguments,1);return this.updateIn(e,Me(),function(e){return"function"==typeof e.merge?e.merge.apply(e,t):t[t.length-1]})},de.prototype.mergeDeep=function(){return De(this,Oe,arguments)},de.prototype.mergeDeepWith=function(e){var t=ln.call(arguments,1);return De(this,Pe(e),t)},de.prototype.mergeDeepIn=function(e){var t=ln.call(arguments,1);return this.updateIn(e,Me(),function(e){return"function"==typeof e.mergeDeep?e.mergeDeep.apply(e,t):t[t.length-1]})},de.prototype.sort=function(e){return Qe(bt(this,e))},de.prototype.sortBy=function(e,t){return Qe(bt(this,t,e))},de.prototype.withMutations=function(e){var t=this.asMutable();return e(t),t.wasAltered()?t.__ensureOwner(this.__ownerID):this},de.prototype.asMutable=function(){return this.__ownerID?this:this.__ensureOwner(new f)},de.prototype.asImmutable=function(){return this.__ensureOwner()},de.prototype.wasAltered=function(){return this.__altered},de.prototype.__iterator=function(e,t){return new ye(this,e,t)},de.prototype.__iterate=function(e,t){var n=this,r=0;return this._root&&this._root.iterate(function(t){return r++,e(t[1],t[0],n)},t),r},de.prototype.__ensureOwner=function(e){return e===this.__ownerID?this:e?we(this.size,this._root,e,this.__hash):(this.__ownerID=e,this.__altered=!1,this)},de.isMap=fe;var zn="@@__IMMUTABLE_MAP__@@",Wn=de.prototype;Wn[zn]=!0,Wn[hn]=Wn.remove,Wn.removeIn=Wn.deleteIn,he.prototype.get=function(e,t,n,r){for(var o=this.entries,i=0,a=o.length;i=Un)return Ce(e,l,r,o);var p=e&&e===this.ownerID,m=p?l:h(l);return f?s?u===c-1?m.pop():m[u]=m.pop():m[u]=[r,o]:m.push([r,o]),p?(this.entries=m,this):new he(e,m)}},pe.prototype.get=function(e,t,n,r){void 0===t&&(t=ie(n));var o=1<<((0===e?t:t>>>e)&_n),i=this.bitmap;return 0===(i&o)?r:this.nodes[Ie(i&o-1)].get(e+pn,t,n,r)},pe.prototype.update=function(e,t,n,r,o,i,a){void 0===n&&(n=ie(r));var s=(0===t?n:n>>>t)&_n,l=1<=Vn)return Ee(e,f,u,s,p);if(c&&!p&&2===f.length&&xe(f[1^d]))return f[1^d];if(c&&p&&1===f.length&&xe(p))return p;var m=e&&e===this.ownerID,_=c?p?u:u^l:u|l,g=c?p?Re(f,d,p,m):Fe(f,d,m):je(f,d,p,m);return m?(this.bitmap=_,this.nodes=g,this):new pe(e,_,g)},me.prototype.get=function(e,t,n,r){void 0===t&&(t=ie(n));var o=(0===e?t:t>>>e)&_n,i=this.nodes[o];return i?i.get(e+pn,t,n,r):r},me.prototype.update=function(e,t,n,r,o,i,a){void 0===n&&(n=ie(r));var s=(0===t?n:n>>>t)&_n,l=o===gn,u=this.nodes,c=u[s];if(l&&!c)return this;var d=Se(c,e,t+pn,n,r,o,i,a);if(d===c)return this;var f=this.count;if(c){if(!d&&(f--,f=0&&e>>t&_n;if(r>=this.array.length)return new ze([],e);var o,i=0===r;if(t>0){var a=this.array[r];if(o=a&&a.removeBefore(e,t-pn,n),o===a&&i)return this}if(i&&!o)return this;var s=qe(this,e);if(!i)for(var l=0;l>>t&_n;if(r>=this.array.length)return this;var o;if(t>0){var i=this.array[r];if(o=i&&i.removeAfter(e,t-pn,n),o===i&&r===this.array.length-1)return this}var a=qe(this,e);return a.array.splice(r+1),o&&(a.array[r]=o),a};var Jn,$n={};e(Qe,de),Qe.of=function(){return this(arguments)},Qe.prototype.toString=function(){return this.__toString("OrderedMap {","}")},Qe.prototype.get=function(e,t){var n=this._map.get(e);return void 0!==n?this._list.get(n)[1]:t},Qe.prototype.clear=function(){return 0===this.size?this:this.__ownerID?(this.size=0,this._map.clear(),this._list.clear(),this):tt()},Qe.prototype.set=function(e,t){return nt(this,e,t)},Qe.prototype.remove=function(e){return nt(this,e,gn)},Qe.prototype.wasAltered=function(){return this._map.wasAltered()||this._list.wasAltered()},Qe.prototype.__iterate=function(e,t){var n=this;return this._list.__iterate(function(t){return t&&e(t[1],t[0],n)},t)},Qe.prototype.__iterator=function(e,t){return this._list.fromEntrySeq().__iterator(e,t)},Qe.prototype.__ensureOwner=function(e){if(e===this.__ownerID)return this;var t=this._map.__ensureOwner(e),n=this._list.__ensureOwner(e);return e?et(t,n,e,this.__hash):(this.__ownerID=e,this._map=t,this._list=n,this)},Qe.isOrderedMap=Ze,Qe.prototype[fn]=!0,Qe.prototype[hn]=Qe.prototype.remove;var Xn;e(rt,D),rt.prototype.get=function(e,t){return this._iter.get(e,t)},rt.prototype.has=function(e){return this._iter.has(e)},rt.prototype.valueSeq=function(){return this._iter.valueSeq()},rt.prototype.reverse=function(){var e=this,t=ut(this,!0);return this._useKeys||(t.valueSeq=function(){return e._iter.toSeq().reverse()}),t},rt.prototype.map=function(e,t){var n=this,r=lt(this,e,t);return this._useKeys||(r.valueSeq=function(){return n._iter.toSeq().map(e,t)}),r},rt.prototype.__iterate=function(e,t){var n,r=this;return this._iter.__iterate(this._useKeys?function(t,n){return e(t,n,r)}:(n=t?Lt(this):0,function(o){return e(o,t?--n:n++,r)}),t)},rt.prototype.__iterator=function(e,t){if(this._useKeys)return this._iter.__iterator(e,t);var n=this._iter.__iterator(wn,t),r=t?Lt(this):0;return new w(function(){var o=n.next();return o.done?o:M(e,t?--r:r++,o.value,o)})},rt.prototype[fn]=!0,e(ot,O),ot.prototype.includes=function(e){return this._iter.includes(e)},ot.prototype.__iterate=function(e,t){var n=this,r=0;return this._iter.__iterate(function(t){return e(t,r++,n)},t)},ot.prototype.__iterator=function(e,t){var n=this._iter.__iterator(wn,t),r=0;return new w(function(){var t=n.next();return t.done?t:M(e,r++,t.value,t)})},e(it,P),it.prototype.has=function(e){return this._iter.includes(e)},it.prototype.__iterate=function(e,t){var n=this;return this._iter.__iterate(function(t){return e(t,t,n)},t)},it.prototype.__iterator=function(e,t){var n=this._iter.__iterator(wn,t);return new w(function(){var t=n.next();return t.done?t:M(e,t.value,t.value,t)})},e(at,D),at.prototype.entrySeq=function(){return this._iter.toSeq()},at.prototype.__iterate=function(e,t){var n=this;return this._iter.__iterate(function(t){if(t){xt(t);var r=i(t);return e(r?t.get(1):t[1],r?t.get(0):t[0],n)}},t)},at.prototype.__iterator=function(e,t){var n=this._iter.__iterator(wn,t);return new w(function(){for(;;){var t=n.next();if(t.done)return t;var r=t.value;if(r){xt(r);var o=i(r);return M(e,o?r.get(0):r[0],o?r.get(1):r[1],t)}}})},ot.prototype.cacheResult=rt.prototype.cacheResult=it.prototype.cacheResult=at.prototype.cacheResult=Et,e(Pt,te),Pt.prototype.toString=function(){return this.__toString(Yt(this)+" {","}")},Pt.prototype.has=function(e){return this._defaultValues.hasOwnProperty(e)},Pt.prototype.get=function(e,t){if(!this.has(e))return t;var n=this._defaultValues[e];return this._map?this._map.get(e,n):n},Pt.prototype.clear=function(){if(this.__ownerID)return this._map&&this._map.clear(),this;var e=this.constructor;return e._empty||(e._empty=At(this,Me()))},Pt.prototype.set=function(e,t){if(!this.has(e))throw new Error('Cannot set unknown key "'+e+'" on '+Yt(this));if(this._map&&!this._map.has(e)){var n=this._defaultValues[e];if(t===n)return this}var r=this._map&&this._map.set(e,t);return this.__ownerID||r===this._map?this:At(this,r)},Pt.prototype.remove=function(e){if(!this.has(e))return this;var t=this._map&&this._map.remove(e);return this.__ownerID||t===this._map?this:At(this,t)},Pt.prototype.wasAltered=function(){return this._map.wasAltered()},Pt.prototype.__iterator=function(e,t){var r=this;return n(this._defaultValues).map(function(e,t){return r.get(t)}).__iterator(e,t)},Pt.prototype.__iterate=function(e,t){var r=this;return n(this._defaultValues).map(function(e,t){return r.get(t)}).__iterate(e,t)},Pt.prototype.__ensureOwner=function(e){if(e===this.__ownerID)return this;var t=this._map&&this._map.__ensureOwner(e);return e?At(this,t,e):(this.__ownerID=e,this._map=t,this)};var Qn=Pt.prototype;Qn[hn]=Qn.remove,Qn.deleteIn=Qn.removeIn=Wn.removeIn,Qn.merge=Wn.merge,Qn.mergeWith=Wn.mergeWith,Qn.mergeIn=Wn.mergeIn,Qn.mergeDeep=Wn.mergeDeep,Qn.mergeDeepWith=Wn.mergeDeepWith,Qn.mergeDeepIn=Wn.mergeDeepIn,Qn.setIn=Wn.setIn,Qn.update=Wn.update,Qn.updateIn=Wn.updateIn,Qn.withMutations=Wn.withMutations,Qn.asMutable=Wn.asMutable,Qn.asImmutable=Wn.asImmutable,e(jt,re),jt.of=function(){return this(arguments)},jt.fromKeys=function(e){return this(n(e).keySeq())},jt.prototype.toString=function(){return this.__toString("Set {","}")},jt.prototype.has=function(e){return this._map.has(e)},jt.prototype.add=function(e){return Nt(this,this._map.set(e,!0))},jt.prototype.remove=function(e){return Nt(this,this._map.remove(e))},jt.prototype.clear=function(){return Nt(this,this._map.clear())},jt.prototype.union=function(){var e=ln.call(arguments,0);return e=e.filter(function(e){return 0!==e.size}),0===e.length?this:0!==this.size||this.__ownerID||1!==e.length?this.withMutations(function(t){for(var n=0;n=0;n--)t={value:arguments[n],next:t};return this.__ownerID?(this.size=e,this._head=t,this.__hash=void 0,this.__altered=!0,this):Kt(e,t)},Gt.prototype.pushAll=function(e){if(e=r(e),0===e.size)return this;ce(e.size);var t=this.size,n=this._head;return e.reverse().forEach(function(e){t++,n={value:e,next:n}}),this.__ownerID?(this.size=t,this._head=n,this.__hash=void 0,this.__altered=!0,this):Kt(t,n)},Gt.prototype.pop=function(){return this.slice(1)},Gt.prototype.unshift=function(){return this.push.apply(this,arguments)},Gt.prototype.unshiftAll=function(e){return this.pushAll(e)},Gt.prototype.shift=function(){return this.pop.apply(this,arguments)},Gt.prototype.clear=function(){return 0===this.size?this:this.__ownerID?(this.size=0,this._head=void 0,this.__hash=void 0,this.__altered=!0,this):Jt()},Gt.prototype.slice=function(e,t){if(g(e,t,this.size))return this;var n=y(e,this.size),r=v(t,this.size);if(r!==this.size)return ne.prototype.slice.call(this,e,t);for(var o=this.size-n,i=this._head;n--;)i=i.next;return this.__ownerID?(this.size=o,this._head=i,this.__hash=void 0,this.__altered=!0,this):Kt(o,i)},Gt.prototype.__ensureOwner=function(e){return e===this.__ownerID?this:e?Kt(this.size,this._head,e,this.__hash):(this.__ownerID=e,this.__altered=!1,this)},Gt.prototype.__iterate=function(e,t){if(t)return this.reverse().__iterate(e);for(var n=0,r=this._head;r&&e(r.value,n++,this)!==!1;)r=r.next;return n},Gt.prototype.__iterator=function(e,t){if(t)return this.reverse().__iterator(e);var n=0,r=this._head;return new w(function(){if(r){var t=r.value;return r=r.next,M(e,n++,t)}return k()})},Gt.isStack=qt;var or="@@__IMMUTABLE_STACK__@@",ir=Gt.prototype;ir[or]=!0,ir.withMutations=Wn.withMutations,ir.asMutable=Wn.asMutable,ir.asImmutable=Wn.asImmutable,ir.wasAltered=Wn.wasAltered;var ar;t.Iterator=w,$t(t,{toArray:function(){ce(this.size);var e=new Array(this.size||0);return this.valueSeq().__iterate(function(t,n){e[n]=t}),e},toIndexedSeq:function(){return new ot(this)},toJS:function(){return this.toSeq().map(function(e){return e&&"function"==typeof e.toJS?e.toJS():e}).__toJS()},toJSON:function(){return this.toSeq().map(function(e){return e&&"function"==typeof e.toJSON?e.toJSON():e}).__toJS()},toKeyedSeq:function(){return new rt(this,!0)},toMap:function(){return de(this.toKeyedSeq())},toObject:function(){ce(this.size);var e={};return this.__iterate(function(t,n){e[n]=t}),e},toOrderedMap:function(){return Qe(this.toKeyedSeq())},toOrderedSet:function(){return Wt(a(this)?this.valueSeq():this)},toSet:function(){return jt(a(this)?this.valueSeq():this)},toSetSeq:function(){return new it(this)},toSeq:function(){return s(this)?this.toIndexedSeq():a(this)?this.toKeyedSeq():this.toSetSeq()},toStack:function(){return Gt(a(this)?this.valueSeq():this)},toList:function(){return Ne(a(this)?this.valueSeq():this)},toString:function(){return"[Iterable]"},__toString:function(e,t){return 0===this.size?e+t:e+" "+this.toSeq().map(this.__toStringMapper).join(", ")+" "+t},concat:function(){var e=ln.call(arguments,0);return St(this,_t(this,e))},includes:function(e){return this.some(function(t){return J(t,e)})},entries:function(){return this.__iterator(Mn)},every:function(e,t){ce(this.size);var n=!0;return this.__iterate(function(r,o,i){if(!e.call(t,r,o,i))return n=!1,!1}),n},filter:function(e,t){return St(this,ct(this,e,t,!0))},find:function(e,t,n){var r=this.findEntry(e,t);return r?r[1]:n},forEach:function(e,t){return ce(this.size),this.__iterate(t?e.bind(t):e)},join:function(e){ce(this.size),e=void 0!==e?""+e:",";var t="",n=!0;return this.__iterate(function(r){n?n=!1:t+=e,t+=null!==r&&void 0!==r?r.toString():""}),t},keys:function(){return this.__iterator(bn)},map:function(e,t){return St(this,lt(this,e,t))},reduce:function(e,t,n){ce(this.size);var r,o;return arguments.length<2?o=!0:r=t,this.__iterate(function(t,i,a){o?(o=!1, -r=t):r=e.call(n,r,t,i,a)}),r},reduceRight:function(e,t,n){var r=this.toKeyedSeq().reverse();return r.reduce.apply(r,arguments)},reverse:function(){return St(this,ut(this,!0))},slice:function(e,t){return St(this,ht(this,e,t,!0))},some:function(e,t){return!this.every(Zt(e),t)},sort:function(e){return St(this,bt(this,e))},values:function(){return this.__iterator(wn)},butLast:function(){return this.slice(0,-1)},isEmpty:function(){return void 0!==this.size?0===this.size:!this.some(function(){return!0})},count:function(e,t){return p(e?this.toSeq().filter(e,t):this)},countBy:function(e,t){return dt(this,e,t)},equals:function(e){return $(this,e)},entrySeq:function(){var e=this;if(e._cache)return new A(e._cache);var t=e.toSeq().map(Qt).toIndexedSeq();return t.fromEntrySeq=function(){return e.toSeq()},t},filterNot:function(e,t){return this.filter(Zt(e),t)},findEntry:function(e,t,n){var r=n;return this.__iterate(function(n,o,i){if(e.call(t,n,o,i))return r=[o,n],!1}),r},findKey:function(e,t){var n=this.findEntry(e,t);return n&&n[0]},findLast:function(e,t,n){return this.toKeyedSeq().reverse().find(e,t,n)},findLastEntry:function(e,t,n){return this.toKeyedSeq().reverse().findEntry(e,t,n)},findLastKey:function(e,t){return this.toKeyedSeq().reverse().findKey(e,t)},first:function(){return this.find(_)},flatMap:function(e,t){return St(this,yt(this,e,t))},flatten:function(e){return St(this,gt(this,e,!0))},fromEntrySeq:function(){return new at(this)},get:function(e,t){return this.find(function(t,n){return J(n,e)},void 0,t)},getIn:function(e,t){for(var n,r=this,o=Ot(e);!(n=o.next()).done;){var i=n.value;if(r=r&&r.get?r.get(i,gn):gn,r===gn)return t}return r},groupBy:function(e,t){return ft(this,e,t)},has:function(e){return this.get(e,gn)!==gn},hasIn:function(e){return this.getIn(e,gn)!==gn},isSubset:function(e){return e="function"==typeof e.includes?e:t(e),this.every(function(t){return e.includes(t)})},isSuperset:function(e){return e="function"==typeof e.isSubset?e:t(e),e.isSubset(this)},keyOf:function(e){return this.findKey(function(t){return J(t,e)})},keySeq:function(){return this.toSeq().map(Xt).toIndexedSeq()},last:function(){return this.toSeq().reverse().first()},lastKeyOf:function(e){return this.toKeyedSeq().reverse().keyOf(e)},max:function(e){return wt(this,e)},maxBy:function(e,t){return wt(this,t,e)},min:function(e){return wt(this,e?en(e):rn)},minBy:function(e,t){return wt(this,t?en(t):rn,e)},rest:function(){return this.slice(1)},skip:function(e){return this.slice(Math.max(0,e))},skipLast:function(e){return St(this,this.toSeq().reverse().skip(e).reverse())},skipWhile:function(e,t){return St(this,mt(this,e,t,!0))},skipUntil:function(e,t){return this.skipWhile(Zt(e),t)},sortBy:function(e,t){return St(this,bt(this,t,e))},take:function(e){return this.slice(0,Math.max(0,e))},takeLast:function(e){return St(this,this.toSeq().reverse().take(e).reverse())},takeWhile:function(e,t){return St(this,pt(this,e,t))},takeUntil:function(e,t){return this.takeWhile(Zt(e),t)},valueSeq:function(){return this.toIndexedSeq()},hashCode:function(){return this.__hash||(this.__hash=on(this))}});var sr=t.prototype;sr[un]=!0,sr[xn]=sr.values,sr.__toJS=sr.toArray,sr.__toStringMapper=tn,sr.inspect=sr.toSource=function(){return this.toString()},sr.chain=sr.flatMap,sr.contains=sr.includes,$t(n,{flip:function(){return St(this,st(this))},mapEntries:function(e,t){var n=this,r=0;return St(this,this.toSeq().map(function(o,i){return e.call(t,[i,o],r++,n)}).fromEntrySeq())},mapKeys:function(e,t){var n=this;return St(this,this.toSeq().flip().map(function(r,o){return e.call(t,r,o,n)}).flip())}});var lr=n.prototype;lr[cn]=!0,lr[xn]=sr.entries,lr.__toJS=sr.toObject,lr.__toStringMapper=function(e,t){return JSON.stringify(t)+": "+tn(e)},$t(r,{toKeyedSeq:function(){return new rt(this,!1)},filter:function(e,t){return St(this,ct(this,e,t,!1))},findIndex:function(e,t){var n=this.findEntry(e,t);return n?n[0]:-1},indexOf:function(e){var t=this.keyOf(e);return void 0===t?-1:t},lastIndexOf:function(e){var t=this.lastKeyOf(e);return void 0===t?-1:t},reverse:function(){return St(this,ut(this,!1))},slice:function(e,t){return St(this,ht(this,e,t,!1))},splice:function(e,t){var n=arguments.length;if(t=Math.max(0|t,0),0===n||2===n&&!t)return this;e=y(e,e<0?this.count():this.size);var r=this.slice(0,e);return St(this,1===n?r:r.concat(h(arguments,2),this.slice(e+t)))},findLastIndex:function(e,t){var n=this.findLastEntry(e,t);return n?n[0]:-1},first:function(){return this.get(0)},flatten:function(e){return St(this,gt(this,e,!1))},get:function(e,t){return e=m(this,e),e<0||this.size===1/0||void 0!==this.size&&e>this.size?t:this.find(function(t,n){return n===e},void 0,t)},has:function(e){return e=m(this,e),e>=0&&(void 0!==this.size?this.size===1/0||e0&&this._events[e].length>o&&(this._events[e].warned=!0,console.error("(node) warning: possible EventEmitter memory leak detected. %d listeners added. Use emitter.setMaxListeners() to increase limit.",this._events[e].length),"function"==typeof console.trace&&console.trace())),this},n.prototype.on=n.prototype.addListener,n.prototype.once=function(e,t){function n(){this.removeListener(e,n),o||(o=!0,t.apply(this,arguments))}if(!r(t))throw TypeError("listener must be a function");var o=!1;return n.listener=t,this.on(e,n),this},n.prototype.removeListener=function(e,t){var n,o,a,s;if(!r(t))throw TypeError("listener must be a function");if(!this._events||!this._events[e])return this;if(n=this._events[e],a=n.length,o=-1,n===t||r(n.listener)&&n.listener===t)delete this._events[e],this._events.removeListener&&this.emit("removeListener",e,t);else if(i(n)){for(s=a;s-- >0;)if(n[s]===t||n[s].listener&&n[s].listener===t){o=s;break}if(o<0)return this;1===n.length?(n.length=0,delete this._events[e]):n.splice(o,1),this._events.removeListener&&this.emit("removeListener",e,t)}return this},n.prototype.removeAllListeners=function(e){var t,n;if(!this._events)return this;if(!this._events.removeListener)return 0===arguments.length?this._events={}:this._events[e]&&delete this._events[e],this;if(0===arguments.length){for(t in this._events)"removeListener"!==t&&this.removeAllListeners(t);return this.removeAllListeners("removeListener"),this._events={},this}if(n=this._events[e],r(n))this.removeListener(e,n);else if(n)for(;n.length;)this.removeListener(e,n[n.length-1]);return delete this._events[e],this},n.prototype.listeners=function(e){var t;return t=this._events&&this._events[e]?r(this._events[e])?[this._events[e]]:this._events[e].slice():[]},n.prototype.listenerCount=function(e){if(this._events){var t=this._events[e];if(r(t))return 1;if(t)return t.length}return 0},n.listenerCount=function(e,t){return e.listenerCount(t)}},function(e,t,n){function r(e){var t=-1,n=null==e?0:e.length;for(this.clear();++t]/;e.exports=r},function(e,t,n){"use strict";var r,o=n(17),i=n(111),a=/^[ \r\n\t\f]/,s=/<(!--|link|noscript|meta|script|style)[ \r\n\t\f\/>]/,l=n(119),u=l(function(e,t){if(e.namespaceURI!==i.svg||"innerHTML"in e)e.innerHTML=t;else{r=r||document.createElement("div"),r.innerHTML=""+t+"";for(var n=r.firstChild;n.firstChild;)e.appendChild(n.firstChild)}});if(o.canUseDOM){var c=document.createElement("div");c.innerHTML=" ",""===c.innerHTML&&(u=function(e,t){if(e.parentNode&&e.parentNode.replaceChild(e,e),a.test(t)||"<"===t[0]&&s.test(t)){e.innerHTML=String.fromCharCode(65279)+t;var n=e.firstChild;1===n.data.length?e.removeChild(n):n.deleteData(0,1)}else e.innerHTML=t}),c=null}e.exports=u},function(e,t,n){"use strict";function r(e){return e&&e.__esModule?e:{default:e}}t.__esModule=!0;var o="function"==typeof Symbol&&"symbol"==typeof Symbol.iterator?function(e){return typeof e}:function(e){return e&&"function"==typeof Symbol&&e.constructor===Symbol?"symbol":typeof e},i=Object.assign||function(e){for(var t=1;t1?r-1:0),i=1;i0?t:20,order:"LAST_TO_FIRST"},authorization:r}).promise.then(function(t){t.hasOwnProperty("values")&&(e.data=t.values.reverse(),e.data.forEach(function(t){t.name=e.name,t.parent=e.parentPath,t.uuid=e.uuid}),l.dispatch({type:n,item:e}))}).catch(_.Error,function(e){a(e)})})}function o(e){var t=e,n=e.indexOf("devices/");return 0===n&&(t=e.replace("devices/","")),t}function i(e,t,n){var r=o(e.topic);new _.Exchange({method:"historian.query",params:{topic:r,count:20,order:"LAST_TO_FIRST"},authorization:n}).promise.then(function(n){if(n.hasOwnProperty("values")){e.data=n.values.reverse(),e.data.forEach(function(t){t.name=e.name,t.parent=e.parentPath,t.uuid=e.uuid}),l.dispatch({type:s.SHOW_CHARTS,emitChange:null===t||"undefined"==typeof t||t}),l.dispatch({type:s.ADD_TO_CHART,panelItem:e}),p.checkItem(e.path,!0);var o=c.getPinnedCharts(),i=o.find(function(t){return t.chartKey===e.name});i&&m.saveCharts(o)}else{var u="Unable to load chart: An unknown problem occurred.",d="center",h={},_=null;if(e.path&&e.path.length>1){var g=e.path.slice(0,1),y=e.path[1],v=f.getItem(g);u="Unable to load chart: No data was retrieved for "+r+". Check for proper configuration of any forwarder, master driver, and platform agents on platform '"+v[y].name+"'.",d="left",_=r}p.checkItem(e.path,!1),a(h,u,_,d)}}).catch(_.Error,function(t){var n,r="Unable to load chart: "+t.message;if(t.code===-32602)"historian unavailable"===t.message&&(r="Unable to load chart: The platform historian is unavailable on the VOLTTRON Central platform.",n="left");else{var o=d.getVcInstance();if(o){var i=d.getVcHistorianRunning(o);i||(r="Unable to load chart: The platform historian is unavailable on the VOLTTRON Central platform.",n="left")}else r="Unable to load chart: An unknown problem occurred.",n="left"}p.checkItem(e.path,!1),a(t,r,null,n)})}function a(e,t,n,r){e.code&&401===e.code||e.response&&401===e.response.status?(l.dispatch({type:s.RECEIVE_UNAUTHORIZED,error:e}),l.dispatch({type:s.CLEAR_AUTHORIZATION})):t&&h.openStatusIndicator("error",t,n,r)}var s=n(7),l=n(9),u=n(19),c=n(39),d=n(36),f=n(40),h=n(20),p=n(35),m=n(34),_=n(57),g={pinChart:function(e){l.dispatch({type:s.PIN_CHART,chartKey:e})},setType:function(e,t){l.dispatch({type:s.CHANGE_CHART_TYPE,chartKey:e,chartType:t})},changeRefreshRate:function(e,t){l.dispatch({type:s.CHANGE_CHART_REFRESH,rate:e,chartKey:t})},changeDataLength:function(e,t){l.dispatch({type:s.CHANGE_CHART_LENGTH,length:e,chartKey:t})},refreshChart:function(e,t){r(e,t,s.REFRESH_CHART)},addToChart:function(e,t){var n=u.getAuthorization();i(e,t,n)},addToCharts:function(e){var t=u.getAuthorization(),n=!1;e.forEach(function(e){i(e,n,t)})},removeFromChart:function(e){var t=c.getPinnedCharts(),n=t.find(function(t){return t.chartKey===e.name});l.dispatch({type:s.REMOVE_FROM_CHART,panelItem:e}),p.checkItem(e.path,!1),n&&m.saveCharts()},removeChart:function(e){l.dispatch({type:s.REMOVE_CHART,name:e})}};e.exports=g},function(e,t,n){"use strict";var r=n(417);e.exports={Error:n(155),openManagementWS:r.openManagementWS,openConfigureWS:r.openConfigureWS,openIAmWS:r.openIAmWS,setAuthorization:r.setAuthorization}},function(e,t,n){"use strict";function r(){var e,t=a.getAuthorization();try{e=JSON.parse(u)}catch(t){e={method:""}}t?e.authorization=t:delete e.authorization,u=JSON.stringify(e,null," ")}var o=n(7),i=n(9),a=n(19),s=n(22),l=Date.now(),u="",c=!1,d=[],f=new s;f.getComposerId=function(){return l},f.getComposerValue=function(){return u},f.getConsoleShown=function(){return c},f.getExchanges=function(){return d},r(),f.dispatchToken=i.register(function(e){switch(i.waitFor([a.dispatchToken]),e.type){case o.TOGGLE_CONSOLE:c=!c,f.emitChange();break;case o.UPDATE_COMPOSER_VALUE:u=e.value,f.emitChange();break;case o.RECEIVE_AUTHORIZATION:case o.RECEIVE_UNAUTHORIZED:case o.CLEAR_AUTHORIZATION:l=Date.now(),r(),f.emitChange();break;case o.MAKE_REQUEST:c&&(d.push(e.exchange),f.emitChange());break;case o.FAIL_REQUEST:case o.RECEIVE_RESPONSE:c&&f.emitChange()}}),e.exports=f},function(e,t,n){"use strict";var r=n(7),o=n(9),i=n(22),a=null,s=null,l=null,u=null,c=new i;c.getStatusMessage=function(){var e={statusMessage:a,status:s};return l&&(e.highlight=l),u&&(e.align=u),e},c.getStatus=function(){return s}, +r=t):r=e.call(n,r,t,i,a)}),r},reduceRight:function(e,t,n){var r=this.toKeyedSeq().reverse();return r.reduce.apply(r,arguments)},reverse:function(){return St(this,ut(this,!0))},slice:function(e,t){return St(this,ht(this,e,t,!0))},some:function(e,t){return!this.every(Zt(e),t)},sort:function(e){return St(this,bt(this,e))},values:function(){return this.__iterator(wn)},butLast:function(){return this.slice(0,-1)},isEmpty:function(){return void 0!==this.size?0===this.size:!this.some(function(){return!0})},count:function(e,t){return p(e?this.toSeq().filter(e,t):this)},countBy:function(e,t){return dt(this,e,t)},equals:function(e){return $(this,e)},entrySeq:function(){var e=this;if(e._cache)return new A(e._cache);var t=e.toSeq().map(Qt).toIndexedSeq();return t.fromEntrySeq=function(){return e.toSeq()},t},filterNot:function(e,t){return this.filter(Zt(e),t)},findEntry:function(e,t,n){var r=n;return this.__iterate(function(n,o,i){if(e.call(t,n,o,i))return r=[o,n],!1}),r},findKey:function(e,t){var n=this.findEntry(e,t);return n&&n[0]},findLast:function(e,t,n){return this.toKeyedSeq().reverse().find(e,t,n)},findLastEntry:function(e,t,n){return this.toKeyedSeq().reverse().findEntry(e,t,n)},findLastKey:function(e,t){return this.toKeyedSeq().reverse().findKey(e,t)},first:function(){return this.find(_)},flatMap:function(e,t){return St(this,yt(this,e,t))},flatten:function(e){return St(this,gt(this,e,!0))},fromEntrySeq:function(){return new at(this)},get:function(e,t){return this.find(function(t,n){return J(n,e)},void 0,t)},getIn:function(e,t){for(var n,r=this,o=Ot(e);!(n=o.next()).done;){var i=n.value;if(r=r&&r.get?r.get(i,gn):gn,r===gn)return t}return r},groupBy:function(e,t){return ft(this,e,t)},has:function(e){return this.get(e,gn)!==gn},hasIn:function(e){return this.getIn(e,gn)!==gn},isSubset:function(e){return e="function"==typeof e.includes?e:t(e),this.every(function(t){return e.includes(t)})},isSuperset:function(e){return e="function"==typeof e.isSubset?e:t(e),e.isSubset(this)},keyOf:function(e){return this.findKey(function(t){return J(t,e)})},keySeq:function(){return this.toSeq().map(Xt).toIndexedSeq()},last:function(){return this.toSeq().reverse().first()},lastKeyOf:function(e){return this.toKeyedSeq().reverse().keyOf(e)},max:function(e){return wt(this,e)},maxBy:function(e,t){return wt(this,t,e)},min:function(e){return wt(this,e?en(e):rn)},minBy:function(e,t){return wt(this,t?en(t):rn,e)},rest:function(){return this.slice(1)},skip:function(e){return this.slice(Math.max(0,e))},skipLast:function(e){return St(this,this.toSeq().reverse().skip(e).reverse())},skipWhile:function(e,t){return St(this,mt(this,e,t,!0))},skipUntil:function(e,t){return this.skipWhile(Zt(e),t)},sortBy:function(e,t){return St(this,bt(this,t,e))},take:function(e){return this.slice(0,Math.max(0,e))},takeLast:function(e){return St(this,this.toSeq().reverse().take(e).reverse())},takeWhile:function(e,t){return St(this,pt(this,e,t))},takeUntil:function(e,t){return this.takeWhile(Zt(e),t)},valueSeq:function(){return this.toIndexedSeq()},hashCode:function(){return this.__hash||(this.__hash=on(this))}});var sr=t.prototype;sr[un]=!0,sr[xn]=sr.values,sr.__toJS=sr.toArray,sr.__toStringMapper=tn,sr.inspect=sr.toSource=function(){return this.toString()},sr.chain=sr.flatMap,sr.contains=sr.includes,$t(n,{flip:function(){return St(this,st(this))},mapEntries:function(e,t){var n=this,r=0;return St(this,this.toSeq().map(function(o,i){return e.call(t,[i,o],r++,n)}).fromEntrySeq())},mapKeys:function(e,t){var n=this;return St(this,this.toSeq().flip().map(function(r,o){return e.call(t,r,o,n)}).flip())}});var lr=n.prototype;lr[cn]=!0,lr[xn]=sr.entries,lr.__toJS=sr.toObject,lr.__toStringMapper=function(e,t){return JSON.stringify(t)+": "+tn(e)},$t(r,{toKeyedSeq:function(){return new rt(this,!1)},filter:function(e,t){return St(this,ct(this,e,t,!1))},findIndex:function(e,t){var n=this.findEntry(e,t);return n?n[0]:-1},indexOf:function(e){var t=this.keyOf(e);return void 0===t?-1:t},lastIndexOf:function(e){var t=this.lastKeyOf(e);return void 0===t?-1:t},reverse:function(){return St(this,ut(this,!1))},slice:function(e,t){return St(this,ht(this,e,t,!1))},splice:function(e,t){var n=arguments.length;if(t=Math.max(0|t,0),0===n||2===n&&!t)return this;e=y(e,e<0?this.count():this.size);var r=this.slice(0,e);return St(this,1===n?r:r.concat(h(arguments,2),this.slice(e+t)))},findLastIndex:function(e,t){var n=this.findLastEntry(e,t);return n?n[0]:-1},first:function(){return this.get(0)},flatten:function(e){return St(this,gt(this,e,!1))},get:function(e,t){return e=m(this,e),e<0||this.size===1/0||void 0!==this.size&&e>this.size?t:this.find(function(t,n){return n===e},void 0,t)},has:function(e){return e=m(this,e),e>=0&&(void 0!==this.size?this.size===1/0||e0&&this._events[e].length>o&&(this._events[e].warned=!0,console.error("(node) warning: possible EventEmitter memory leak detected. %d listeners added. Use emitter.setMaxListeners() to increase limit.",this._events[e].length),"function"==typeof console.trace&&console.trace())),this},n.prototype.on=n.prototype.addListener,n.prototype.once=function(e,t){function n(){this.removeListener(e,n),o||(o=!0,t.apply(this,arguments))}if(!r(t))throw TypeError("listener must be a function");var o=!1;return n.listener=t,this.on(e,n),this},n.prototype.removeListener=function(e,t){var n,o,a,s;if(!r(t))throw TypeError("listener must be a function");if(!this._events||!this._events[e])return this;if(n=this._events[e],a=n.length,o=-1,n===t||r(n.listener)&&n.listener===t)delete this._events[e],this._events.removeListener&&this.emit("removeListener",e,t);else if(i(n)){for(s=a;s-- >0;)if(n[s]===t||n[s].listener&&n[s].listener===t){o=s;break}if(o<0)return this;1===n.length?(n.length=0,delete this._events[e]):n.splice(o,1),this._events.removeListener&&this.emit("removeListener",e,t)}return this},n.prototype.removeAllListeners=function(e){var t,n;if(!this._events)return this;if(!this._events.removeListener)return 0===arguments.length?this._events={}:this._events[e]&&delete this._events[e],this;if(0===arguments.length){for(t in this._events)"removeListener"!==t&&this.removeAllListeners(t);return this.removeAllListeners("removeListener"),this._events={},this}if(n=this._events[e],r(n))this.removeListener(e,n);else if(n)for(;n.length;)this.removeListener(e,n[n.length-1]);return delete this._events[e],this},n.prototype.listeners=function(e){var t;return t=this._events&&this._events[e]?r(this._events[e])?[this._events[e]]:this._events[e].slice():[]},n.prototype.listenerCount=function(e){if(this._events){var t=this._events[e];if(r(t))return 1;if(t)return t.length}return 0},n.listenerCount=function(e,t){return e.listenerCount(t)}},function(e,t,n){function r(e){var t=-1,n=null==e?0:e.length;for(this.clear();++t]/;e.exports=r},function(e,t,n){"use strict";var r,o=n(17),i=n(111),a=/^[ \r\n\t\f]/,s=/<(!--|link|noscript|meta|script|style)[ \r\n\t\f\/>]/,l=n(119),u=l(function(e,t){if(e.namespaceURI!==i.svg||"innerHTML"in e)e.innerHTML=t;else{r=r||document.createElement("div"),r.innerHTML=""+t+"";for(var n=r.firstChild;n.firstChild;)e.appendChild(n.firstChild)}});if(o.canUseDOM){var c=document.createElement("div");c.innerHTML=" ",""===c.innerHTML&&(u=function(e,t){if(e.parentNode&&e.parentNode.replaceChild(e,e),a.test(t)||"<"===t[0]&&s.test(t)){e.innerHTML=String.fromCharCode(65279)+t;var n=e.firstChild;1===n.data.length?e.removeChild(n):n.deleteData(0,1)}else e.innerHTML=t}),c=null}e.exports=u},function(e,t,n){"use strict";function r(e){return e&&e.__esModule?e:{default:e}}t.__esModule=!0;var o="function"==typeof Symbol&&"symbol"==typeof Symbol.iterator?function(e){return typeof e}:function(e){return e&&"function"==typeof Symbol&&e.constructor===Symbol?"symbol":typeof e},i=Object.assign||function(e){for(var t=1;t1?r-1:0),i=1;i0?t:20,order:"LAST_TO_FIRST"},authorization:r}).promise.then(function(t){t.hasOwnProperty("values")&&(e.data=t.values.reverse(),e.data.forEach(function(t){t.name=e.name,t.parent=e.parentPath,t.uuid=e.uuid}),l.dispatch({type:n,item:e}))}).catch(_.Error,function(e){a(e)})})}function o(e){var t=e,n=e.indexOf("devices/");return 0===n&&(t=e.replace("devices/","")),t}function i(e,t,n){var r=o(e.topic);new _.Exchange({method:"historian.query",params:{topic:r,count:20,order:"LAST_TO_FIRST"},authorization:n}).promise.then(function(n){if(n.hasOwnProperty("values")){e.data=n.values.reverse(),e.data.forEach(function(t){t.name=e.name,t.parent=e.parentPath,t.uuid=e.uuid}),l.dispatch({type:s.SHOW_CHARTS,emitChange:null===t||"undefined"==typeof t||t}),l.dispatch({type:s.ADD_TO_CHART,panelItem:e}),p.checkItem(e.path,!0);var o=c.getPinnedCharts(),i=o.find(function(t){return t.chartKey===e.name});i&&m.saveCharts(o)}else{var u="Unable to load chart: An unknown problem occurred.",d="center",h={},_=null;if(e.path&&e.path.length>1){var g=e.path.slice(0,1),y=e.path[1],v=f.getItem(g);u="Unable to load chart: No data was retrieved for "+r+". Check for proper configuration of any forwarder, platform driver, and platform agents on platform '"+v[y].name+"'.",d="left",_=r}p.checkItem(e.path,!1),a(h,u,_,d)}}).catch(_.Error,function(t){var n,r="Unable to load chart: "+t.message;if(t.code===-32602)"historian unavailable"===t.message&&(r="Unable to load chart: The platform historian is unavailable on the VOLTTRON Central platform.",n="left");else{var o=d.getVcInstance();if(o){var i=d.getVcHistorianRunning(o);i||(r="Unable to load chart: The platform historian is unavailable on the VOLTTRON Central platform.",n="left")}else r="Unable to load chart: An unknown problem occurred.",n="left"}p.checkItem(e.path,!1),a(t,r,null,n)})}function a(e,t,n,r){e.code&&401===e.code||e.response&&401===e.response.status?(l.dispatch({type:s.RECEIVE_UNAUTHORIZED,error:e}),l.dispatch({type:s.CLEAR_AUTHORIZATION})):t&&h.openStatusIndicator("error",t,n,r)}var s=n(7),l=n(9),u=n(19),c=n(39),d=n(36),f=n(40),h=n(20),p=n(35),m=n(34),_=n(57),g={pinChart:function(e){l.dispatch({type:s.PIN_CHART,chartKey:e})},setType:function(e,t){l.dispatch({type:s.CHANGE_CHART_TYPE,chartKey:e,chartType:t})},changeRefreshRate:function(e,t){l.dispatch({type:s.CHANGE_CHART_REFRESH,rate:e,chartKey:t})},changeDataLength:function(e,t){l.dispatch({type:s.CHANGE_CHART_LENGTH,length:e,chartKey:t})},refreshChart:function(e,t){r(e,t,s.REFRESH_CHART)},addToChart:function(e,t){var n=u.getAuthorization();i(e,t,n)},addToCharts:function(e){var t=u.getAuthorization(),n=!1;e.forEach(function(e){i(e,n,t)})},removeFromChart:function(e){var t=c.getPinnedCharts(),n=t.find(function(t){return t.chartKey===e.name});l.dispatch({type:s.REMOVE_FROM_CHART,panelItem:e}),p.checkItem(e.path,!1),n&&m.saveCharts()},removeChart:function(e){l.dispatch({type:s.REMOVE_CHART,name:e})}};e.exports=g},function(e,t,n){"use strict";var r=n(417);e.exports={Error:n(155),openManagementWS:r.openManagementWS,openConfigureWS:r.openConfigureWS,openIAmWS:r.openIAmWS,setAuthorization:r.setAuthorization}},function(e,t,n){"use strict";function r(){var e,t=a.getAuthorization();try{e=JSON.parse(u)}catch(t){e={method:""}}t?e.authorization=t:delete e.authorization,u=JSON.stringify(e,null," ")}var o=n(7),i=n(9),a=n(19),s=n(22),l=Date.now(),u="",c=!1,d=[],f=new s;f.getComposerId=function(){return l},f.getComposerValue=function(){return u},f.getConsoleShown=function(){return c},f.getExchanges=function(){return d},r(),f.dispatchToken=i.register(function(e){switch(i.waitFor([a.dispatchToken]),e.type){case o.TOGGLE_CONSOLE:c=!c,f.emitChange();break;case o.UPDATE_COMPOSER_VALUE:u=e.value,f.emitChange();break;case o.RECEIVE_AUTHORIZATION:case o.RECEIVE_UNAUTHORIZED:case o.CLEAR_AUTHORIZATION:l=Date.now(),r(),f.emitChange();break;case o.MAKE_REQUEST:c&&(d.push(e.exchange),f.emitChange());break;case o.FAIL_REQUEST:case o.RECEIVE_RESPONSE:c&&f.emitChange()}}),e.exports=f},function(e,t,n){"use strict";var r=n(7),o=n(9),i=n(22),a=null,s=null,l=null,u=null,c=new i;c.getStatusMessage=function(){var e={statusMessage:a,status:s};return l&&(e.highlight=l),u&&(e.align=u),e},c.getStatus=function(){return s}, c.dispatchToken=o.register(function(e){switch(e.type){case r.OPEN_STATUS:null===a?(a=e.message,s=e.status,l=e.highlight,u=e.align):"success"!==s&&s!==e.status||a!==e.message&&(a=a+"; "+e.message),c.emitChange();break;case r.CLOSE_STATUS:a=null,s=null,c.emitChange()}}),e.exports=c},function(e,t,n){(function(e){function n(e){var t,n;return t=e>s||e<0?(n=Math.abs(e)%s,e<0?s-n:n):e}function r(e){var t,n,r;for(t=n=0,r=e.length;0<=r?nr;t=0<=r?++n:--n)e[t]=0;return!1}function o(){var e;this.SBOX=[],this.INV_SBOX=[],this.SUB_MIX=function(){var t,n;for(n=[],e=t=0;t<4;e=++t)n.push([]);return n}(),this.INV_SUB_MIX=function(){var t,n;for(n=[],e=t=0;t<4;e=++t)n.push([]);return n}(),this.init(),this.RCON=[0,1,2,4,8,16,32,64,128,27,54]}function i(e){for(var t=e.length/4,n=new Array(t),r=-1;++r>>8^255&n^99,this.SBOX[o]=n,this.INV_SBOX[n]=o,i=e[o],a=e[i],s=e[a],r=257*e[n]^16843008*n,this.SUB_MIX[0][o]=r<<24|r>>>8,this.SUB_MIX[1][o]=r<<16|r>>>16,this.SUB_MIX[2][o]=r<<8|r>>>24,this.SUB_MIX[3][o]=r,r=16843009*s^65537*a^257*i^16843008*o,this.INV_SUB_MIX[0][n]=r<<24|r>>>8,this.INV_SUB_MIX[1][n]=r<<16|r>>>16,this.INV_SUB_MIX[2][n]=r<<8|r>>>24,this.INV_SUB_MIX[3][n]=r,0===o?o=l=1:(o=i^e[e[e[s^i]]],l^=e[e[l]]);return!0};var l=new o;a.blockSize=16,a.prototype.blockSize=a.blockSize,a.keySize=32,a.prototype.keySize=a.keySize,a.ivSize=a.blockSize,a.prototype.ivSize=a.ivSize,a.prototype._doReset=function(){var e,t,n,r,o,i,a,s;for(n=this._key,t=n.length,this._nRounds=t+6,o=4*(this._nRounds+1),this._keySchedule=[],r=a=0;0<=o?ao;r=0<=o?++a:--a)this._keySchedule[r]=r>>24,i=l.SBOX[i>>>24]<<24|l.SBOX[i>>>16&255]<<16|l.SBOX[i>>>8&255]<<8|l.SBOX[255&i],i^=l.RCON[r/t|0]<<24):t>6&&r%t===4?i=l.SBOX[i>>>24]<<24|l.SBOX[i>>>16&255]<<16|l.SBOX[i>>>8&255]<<8|l.SBOX[255&i]:void 0,this._keySchedule[r-t]^i);for(this._invKeySchedule=[],e=s=0;0<=o?so;e=0<=o?++s:--s)r=o-e,i=this._keySchedule[r-(e%4?0:4)],this._invKeySchedule[e]=e<4||r<=4?i:l.INV_SUB_MIX[0][l.SBOX[i>>>24]]^l.INV_SUB_MIX[1][l.SBOX[i>>>16&255]]^l.INV_SUB_MIX[2][l.SBOX[i>>>8&255]]^l.INV_SUB_MIX[3][l.SBOX[255&i]];return!0},a.prototype.encryptBlock=function(t){t=i(new e(t));var n=this._doCryptBlock(t,this._keySchedule,l.SUB_MIX,l.SBOX),r=new e(16);return r.writeUInt32BE(n[0],0),r.writeUInt32BE(n[1],4),r.writeUInt32BE(n[2],8),r.writeUInt32BE(n[3],12),r},a.prototype.decryptBlock=function(t){t=i(new e(t));var n=[t[3],t[1]];t[1]=n[0],t[3]=n[1];var r=this._doCryptBlock(t,this._invKeySchedule,l.INV_SUB_MIX,l.INV_SBOX),o=new e(16);return o.writeUInt32BE(r[0],0),o.writeUInt32BE(r[3],4),o.writeUInt32BE(r[2],8),o.writeUInt32BE(r[1],12),o},a.prototype.scrub=function(){r(this._keySchedule),r(this._invKeySchedule),r(this._key)},a.prototype._doCryptBlock=function(e,t,r,o){var i,a,s,l,u,c,d,f,h,p,m,_;for(s=e[0]^t[0],l=e[1]^t[1],u=e[2]^t[2],c=e[3]^t[3],i=4,a=m=1,_=this._nRounds;1<=_?m<_:m>_;a=1<=_?++m:--m)d=r[0][s>>>24]^r[1][l>>>16&255]^r[2][u>>>8&255]^r[3][255&c]^t[i++],f=r[0][l>>>24]^r[1][u>>>16&255]^r[2][c>>>8&255]^r[3][255&s]^t[i++],h=r[0][u>>>24]^r[1][c>>>16&255]^r[2][s>>>8&255]^r[3][255&l]^t[i++],p=r[0][c>>>24]^r[1][s>>>16&255]^r[2][l>>>8&255]^r[3][255&u]^t[i++],s=d,l=f,u=h,c=p;return d=(o[s>>>24]<<24|o[l>>>16&255]<<16|o[u>>>8&255]<<8|o[255&c])^t[i++],f=(o[l>>>24]<<24|o[u>>>16&255]<<16|o[c>>>8&255]<<8|o[255&s])^t[i++],h=(o[u>>>24]<<24|o[c>>>16&255]<<16|o[s>>>8&255]<<8|o[255&l])^t[i++],p=(o[c>>>24]<<24|o[s>>>16&255]<<16|o[l>>>8&255]<<8|o[255&u])^t[i++],[n(d),n(f),n(h),n(p)]},t.AES=a}).call(t,n(11).Buffer)},function(e,t,n){(function(t){function r(){o.call(this)}var o=n(737).Transform,i=n(24);e.exports=r,i(r,o),r.prototype.update=function(e,n,r){this.write(e,n);for(var o,i=new t("");o=this.read();)i=t.concat([i,o]);return r&&(i=i.toString(r)),i},r.prototype.final=function(e){this.end();for(var n,r=new t("");n=this.read();)r=t.concat([r,n]);return e&&(r=r.toString(e)),r}}).call(t,n(11).Buffer)},function(e,t){t["aes-128-ecb"]={cipher:"AES",key:128,iv:0,mode:"ECB",type:"block"},t["aes-192-ecb"]={cipher:"AES",key:192,iv:0,mode:"ECB",type:"block"},t["aes-256-ecb"]={cipher:"AES",key:256,iv:0,mode:"ECB",type:"block"},t["aes-128-cbc"]={cipher:"AES",key:128,iv:16,mode:"CBC",type:"block"},t["aes-192-cbc"]={cipher:"AES",key:192,iv:16,mode:"CBC",type:"block"},t["aes-256-cbc"]={cipher:"AES",key:256,iv:16,mode:"CBC",type:"block"},t.aes128=t["aes-128-cbc"],t.aes192=t["aes-192-cbc"],t.aes256=t["aes-256-cbc"],t["aes-128-cfb"]={cipher:"AES",key:128,iv:16,mode:"CFB",type:"stream"},t["aes-192-cfb"]={cipher:"AES",key:192,iv:16,mode:"CFB",type:"stream"},t["aes-256-cfb"]={cipher:"AES",key:256,iv:16,mode:"CFB",type:"stream"},t["aes-128-ofb"]={cipher:"AES",key:128,iv:16,mode:"OFB",type:"stream"},t["aes-192-ofb"]={cipher:"AES",key:192,iv:16,mode:"OFB",type:"stream"},t["aes-256-ofb"]={cipher:"AES",key:256,iv:16,mode:"OFB",type:"stream"},t["aes-128-ctr"]={cipher:"AES",key:128,iv:16,mode:"CTR",type:"stream"},t["aes-192-ctr"]={cipher:"AES",key:192,iv:16,mode:"CTR",type:"stream"},t["aes-256-ctr"]={cipher:"AES",key:256,iv:16,mode:"CTR",type:"stream"}},function(e,t){e.exports=function(){var e=[];return e.toString=function(){for(var e=[],t=0;t>>5]|=e[n]<<24-r%32;return t},_=function(e){for(var t=[],n=0;n<32*e.length;n+=8)t.push(e[n>>>5]>>>24-n%32&255);return t},g=function(e,t,l){for(var m=0;m<16;m++){var _=l+m,g=t[_];t[_]=16711935&(g<<8|g>>>24)|4278255360&(g<<24|g>>>8)}var y,v,b,w,M,k,S,x,L,C;k=y=e[0],S=v=e[1],x=b=e[2],L=w=e[3],C=M=e[4];for(var T,m=0;m<80;m+=1)T=y+t[l+u[m]]|0,T+=m<16?n(v,b,w)+h[0]:m<32?r(v,b,w)+h[1]:m<48?o(v,b,w)+h[2]:m<64?i(v,b,w)+h[3]:a(v,b,w)+h[4],T|=0,T=s(T,d[m]),T=T+M|0,y=M,M=w,w=s(b,10),b=v,v=T,T=k+t[l+c[m]]|0,T+=m<16?a(S,x,L)+p[0]:m<32?i(S,x,L)+p[1]:m<48?o(S,x,L)+p[2]:m<64?r(S,x,L)+p[3]:n(S,x,L)+p[4],T|=0,T=s(T,f[m]),T=T+C|0,k=C,C=L,L=s(x,10),x=S,S=T;T=e[1]+b+L|0,e[1]=e[2]+w+C|0,e[2]=e[3]+M+k|0,e[3]=e[4]+y+S|0,e[4]=e[0]+v+x|0,e[0]=T}}).call(t,n(11).Buffer)},function(e,t,n){(function(e,t){!function(e,n){"use strict";function r(e){"function"!=typeof e&&(e=new Function(""+e));for(var t=new Array(arguments.length-1),n=0;n=8*this._finalSize&&(this._update(this._block),this._block.fill(0)),this._block.writeInt32BE(t,this._blockSize-4);var n=this._update(this._block)||this._hash();return e?n.toString(e):n},t.prototype._update=function(){throw new Error("_update must be implemented by subclass")},t}},function(e,t,n){var t=e.exports=function(e){var n=t[e];if(!n)throw new Error(e+" is not supported (we accept pull requests)");return new n},r=n(11).Buffer,o=n(732)(r);t.sha1=n(734)(r,o),t.sha256=n(735)(r,o),t.sha512=n(736)(r,o)},function(e,t,n){var r=n(140).inherits;e.exports=function(e,t){function n(){return p.length?p.pop().init():this instanceof n?(this._w=h,t.call(this,64,56),this._h=null,void this.init()):new n}function o(e,t,n,r){return e<20?t&n|~t&r:e<40?t^n^r:e<60?t&n|t&r|n&r:t^n^r}function i(e){return e<20?1518500249:e<40?1859775393:e<60?-1894007588:-899497514}function a(e,t){return e+t|0}function s(e,t){return e<>>32-t}var l=0,u=4,c=8,d=12,f=16,h=new("undefined"==typeof Int32Array?Array:Int32Array)(80),p=[];return r(n,t),n.prototype.init=function(){return this._a=1732584193,this._b=4023233417,this._c=2562383102,this._d=271733878,this._e=3285377520,t.prototype.init.call(this),this},n.prototype._POOL=p,n.prototype._update=function(e){var t,n,r,l,u,c,d,f,h,p;t=c=this._a,n=d=this._b,r=f=this._c,l=h=this._d,u=p=this._e;for(var m=this._w,_=0;_<80;_++){var g=m[_]=_<16?e.readInt32BE(4*_):s(m[_-3]^m[_-8]^m[_-14]^m[_-16],1),y=a(a(s(t,5),o(_,n,r,l)),a(a(u,g),i(_)));u=l,l=r,r=s(n,30),n=t,t=y}this._a=a(t,c),this._b=a(n,d),this._c=a(r,f),this._d=a(l,h),this._e=a(u,p)},n.prototype._hash=function(){p.length<100&&p.push(this);var t=new e(20);return t.writeInt32BE(0|this._a,l),t.writeInt32BE(0|this._b,u),t.writeInt32BE(0|this._c,c),t.writeInt32BE(0|this._d,d),t.writeInt32BE(0|this._e,f),t},n}},function(e,t,n){var r=n(140).inherits;e.exports=function(e,t){function n(){this.init(),this._w=h,t.call(this,64,56)}function o(e,t){return e>>>t|e<<32-t}function i(e,t){return e>>>t}function a(e,t,n){return e&t^~e&n}function s(e,t,n){return e&t^e&n^t&n}function l(e){return o(e,2)^o(e,13)^o(e,22)}function u(e){return o(e,6)^o(e,11)^o(e,25)}function c(e){return o(e,7)^o(e,18)^i(e,3)}function d(e){return o(e,17)^o(e,19)^i(e,10)}var f=[1116352408,1899447441,3049323471,3921009573,961987163,1508970993,2453635748,2870763221,3624381080,310598401,607225278,1426881987,1925078388,2162078206,2614888103,3248222580,3835390401,4022224774,264347078,604807628,770255983,1249150122,1555081692,1996064986,2554220882,2821834349,2952996808,3210313671,3336571891,3584528711,113926993,338241895,666307205,773529912,1294757372,1396182291,1695183700,1986661051,2177026350,2456956037,2730485921,2820302411,3259730800,3345764771,3516065817,3600352804,4094571909,275423344,430227734,506948616,659060556,883997877,958139571,1322822218,1537002063,1747873779,1955562222,2024104815,2227730452,2361852424,2428436474,2756734187,3204031479,3329325298],h=new Array(64);return r(n,t),n.prototype.init=function(){return this._a=1779033703,this._b=-1150833019,this._c=1013904242,this._d=-1521486534,this._e=1359893119,this._f=-1694144372,this._g=528734635,this._h=1541459225,this._len=this._s=0,this},n.prototype._update=function(e){var t,n,r,o,i,h,p,m,_,g,y=this._w;t=0|this._a,n=0|this._b,r=0|this._c,o=0|this._d,i=0|this._e,h=0|this._f,p=0|this._g,m=0|this._h;for(var v=0;v<64;v++){var b=y[v]=v<16?e.readInt32BE(4*v):d(y[v-2])+y[v-7]+c(y[v-15])+y[v-16];_=m+u(i)+a(i,h,p)+f[v]+b,g=l(t)+s(t,n,r),m=p,p=h,h=i,i=o+_,o=r,r=n,n=t,t=_+g}this._a=t+this._a|0,this._b=n+this._b|0,this._c=r+this._c|0,this._d=o+this._d|0,this._e=i+this._e|0,this._f=h+this._f|0,this._g=p+this._g|0,this._h=m+this._h|0},n.prototype._hash=function(){var t=new e(32);return t.writeInt32BE(this._a,0),t.writeInt32BE(this._b,4),t.writeInt32BE(this._c,8),t.writeInt32BE(this._d,12),t.writeInt32BE(this._e,16),t.writeInt32BE(this._f,20),t.writeInt32BE(this._g,24),t.writeInt32BE(this._h,28),t},n}},function(e,t,n){var r=n(140).inherits;e.exports=function(e,t){function n(){this.init(),this._w=l,t.call(this,128,112)}function o(e,t,n){return e>>>n|t<<32-n}function i(e,t,n){return e&t^~e&n}function a(e,t,n){return e&t^e&n^t&n}var s=[1116352408,3609767458,1899447441,602891725,3049323471,3964484399,3921009573,2173295548,961987163,4081628472,1508970993,3053834265,2453635748,2937671579,2870763221,3664609560,3624381080,2734883394,310598401,1164996542,607225278,1323610764,1426881987,3590304994,1925078388,4068182383,2162078206,991336113,2614888103,633803317,3248222580,3479774868,3835390401,2666613458,4022224774,944711139,264347078,2341262773,604807628,2007800933,770255983,1495990901,1249150122,1856431235,1555081692,3175218132,1996064986,2198950837,2554220882,3999719339,2821834349,766784016,2952996808,2566594879,3210313671,3203337956,3336571891,1034457026,3584528711,2466948901,113926993,3758326383,338241895,168717936,666307205,1188179964,773529912,1546045734,1294757372,1522805485,1396182291,2643833823,1695183700,2343527390,1986661051,1014477480,2177026350,1206759142,2456956037,344077627,2730485921,1290863460,2820302411,3158454273,3259730800,3505952657,3345764771,106217008,3516065817,3606008344,3600352804,1432725776,4094571909,1467031594,275423344,851169720,430227734,3100823752,506948616,1363258195,659060556,3750685593,883997877,3785050280,958139571,3318307427,1322822218,3812723403,1537002063,2003034995,1747873779,3602036899,1955562222,1575990012,2024104815,1125592928,2227730452,2716904306,2361852424,442776044,2428436474,593698344,2756734187,3733110249,3204031479,2999351573,3329325298,3815920427,3391569614,3928383900,3515267271,566280711,3940187606,3454069534,4118630271,4000239992,116418474,1914138554,174292421,2731055270,289380356,3203993006,460393269,320620315,685471733,587496836,852142971,1086792851,1017036298,365543100,1126000580,2618297676,1288033470,3409855158,1501505948,4234509866,1607167915,987167468,1816402316,1246189591],l=new Array(160);return r(n,t),n.prototype.init=function(){return this._a=1779033703,this._b=-1150833019,this._c=1013904242,this._d=-1521486534,this._e=1359893119,this._f=-1694144372,this._g=528734635,this._h=1541459225,this._al=-205731576,this._bl=-2067093701,this._cl=-23791573,this._dl=1595750129,this._el=-1377402159,this._fl=725511199,this._gl=-79577749,this._hl=327033209,this._len=this._s=0,this},n.prototype._update=function(e){var t,n,r,l,u,c,d,f,h,p,m,_,g,y,v,b,w=this._w;t=0|this._a,n=0|this._b,r=0|this._c,l=0|this._d,u=0|this._e,c=0|this._f,d=0|this._g,f=0|this._h,h=0|this._al,p=0|this._bl,m=0|this._cl,_=0|this._dl,g=0|this._el,y=0|this._fl,v=0|this._gl,b=0|this._hl;for(var M=0;M<80;M++){var k,S,x=2*M;if(M<16)k=w[x]=e.readInt32BE(4*x),S=w[x+1]=e.readInt32BE(4*x+4);else{var L=w[x-30],C=w[x-30+1],T=o(L,C,1)^o(L,C,8)^L>>>7,E=o(C,L,1)^o(C,L,8)^o(C,L,7);L=w[x-4],C=w[x-4+1];var D=o(L,C,19)^o(C,L,29)^L>>>6,O=o(C,L,19)^o(L,C,29)^o(C,L,6),P=w[x-14],A=w[x-14+1],Y=w[x-32],I=w[x-32+1];S=E+A,k=T+P+(S>>>0>>0?1:0),S+=O,k=k+D+(S>>>0>>0?1:0),S+=I,k=k+Y+(S>>>0>>0?1:0),w[x]=k,w[x+1]=S}var R=a(t,n,r),j=a(h,p,m),F=o(t,h,28)^o(h,t,2)^o(h,t,7),N=o(h,t,28)^o(t,h,2)^o(t,h,7),H=o(u,g,14)^o(u,g,18)^o(g,u,9),z=o(g,u,14)^o(g,u,18)^o(u,g,9),W=s[x],B=s[x+1],U=i(u,c,d),V=i(g,y,v),G=b+z,q=f+H+(G>>>0>>0?1:0);G+=V,q=q+U+(G>>>0>>0?1:0),G+=B,q=q+W+(G>>>0>>0?1:0),G+=S,q=q+k+(G>>>0>>0?1:0);var K=N+j,J=F+R+(K>>>0>>0?1:0);f=d,b=v,d=c,v=y,c=u,y=g,g=_+G|0,u=l+q+(g>>>0<_>>>0?1:0)|0,l=r,_=m,r=n,m=p,n=t,p=h,h=G+K|0,t=q+J+(h>>>0>>0?1:0)|0}this._al=this._al+h|0,this._bl=this._bl+p|0,this._cl=this._cl+m|0,this._dl=this._dl+_|0,this._el=this._el+g|0,this._fl=this._fl+y|0,this._gl=this._gl+v|0,this._hl=this._hl+b|0,this._a=this._a+t+(this._al>>>0>>0?1:0)|0,this._b=this._b+n+(this._bl>>>0

>>0?1:0)|0,this._c=this._c+r+(this._cl>>>0>>0?1:0)|0,this._d=this._d+l+(this._dl>>>0<_>>>0?1:0)|0,this._e=this._e+u+(this._el>>>0>>0?1:0)|0,this._f=this._f+c+(this._fl>>>0>>0?1:0)|0,this._g=this._g+d+(this._gl>>>0>>0?1:0)|0,this._h=this._h+f+(this._hl>>>0>>0?1:0)|0},n.prototype._hash=function(){function t(e,t,r){n.writeInt32BE(e,r),n.writeInt32BE(t,r+4)}var n=new e(64);return t(this._a,this._al,0),t(this._b,this._bl,8),t(this._c,this._cl,16),t(this._d,this._dl,24),t(this._e,this._el,32),t(this._f,this._fl,40),t(this._g,this._gl,48),t(this._h,this._hl,56),n},n}},function(e,t,n){function r(){o.call(this)}e.exports=r;var o=n(72).EventEmitter,i=n(24);i(r,o),r.Readable=n(138),r.Writable=n(727),r.Duplex=n(721),r.Transform=n(726),r.PassThrough=n(725),r.Stream=r,r.prototype.pipe=function(e,t){function n(t){e.writable&&!1===e.write(t)&&u.pause&&u.pause()}function r(){u.readable&&u.resume&&u.resume()}function i(){c||(c=!0,e.end())}function a(){c||(c=!0,"function"==typeof e.destroy&&e.destroy())}function s(e){if(l(),0===o.listenerCount(this,"error"))throw e}function l(){u.removeListener("data",n),e.removeListener("drain",r),u.removeListener("end",i),u.removeListener("close",a),u.removeListener("error",s),e.removeListener("error",s),u.removeListener("end",l),u.removeListener("close",l),e.removeListener("close",l)}var u=this;u.on("data",n),e.on("drain",r),e._isStdio||t&&t.end===!1||(u.on("end",i),u.on("close",a));var c=!1;return u.on("error",s),e.on("error",s),u.on("end",l),u.on("close",l),e.on("close",l),e.emit("pipe",u),e}},function(e,t){"use strict";e.exports=function(e){return encodeURIComponent(e).replace(/[!'()*]/g,function(e){return"%"+e.charCodeAt(0).toString(16).toUpperCase()})}},function(e,t,n){var r=n(440);"string"==typeof r&&(r=[[e.id,r,""]]);n(139)(r,{});r.locals&&(e.exports=r.locals)},function(e,t,n){var r=n(441);"string"==typeof r&&(r=[[e.id,r,""]]);n(139)(r,{});r.locals&&(e.exports=r.locals)},function(e,t,n){var r=n(442);"string"==typeof r&&(r=[[e.id,r,""]]);n(139)(r,{});r.locals&&(e.exports=r.locals)},function(e,t,n){e.exports=n.p+"fonts/fontawesome-webfont-woff2"},function(e,t,n){e.exports=n.p+"fonts/fontawesome-webfont-woff"},function(e,t){(function(t){function n(e,t){function n(){if(!o){if(r("throwDeprecation"))throw new Error(t);r("traceDeprecation")?console.trace(t):console.warn(t),o=!0}return e.apply(this,arguments)}if(r("noDeprecation"))return e;var o=!1;return n}function r(e){try{if(!t.localStorage)return!1}catch(e){return!1}var n=t.localStorage[e];return null!=n&&"true"===String(n).toLowerCase()}e.exports=n}).call(t,function(){return this}())},function(e,t){"function"==typeof Object.create?e.exports=function(e,t){e.super_=t,e.prototype=Object.create(t.prototype,{constructor:{value:e,enumerable:!1,writable:!0,configurable:!0}})}:e.exports=function(e,t){e.super_=t;var n=function(){};n.prototype=t.prototype,e.prototype=new n,e.prototype.constructor=e}},function(e,t){e.exports=function(e){return e&&"object"==typeof e&&"function"==typeof e.copy&&"function"==typeof e.fill&&"function"==typeof e.readUInt8}},26,function(e,t){},748,748,function(e,t,n,r,o){"use strict";var i=n(r),a=(n(o),function(e){var t=this;if(t.instancePool.length){var n=t.instancePool.pop();return t.call(n,e),n}return new t(e)}),s=function(e,t){var n=this;if(n.instancePool.length){var r=n.instancePool.pop();return n.call(r,e,t),r}return new n(e,t)},l=function(e,t,n){var r=this;if(r.instancePool.length){var o=r.instancePool.pop();return r.call(o,e,t,n),o}return new r(e,t,n)},u=function(e,t,n,r){var o=this;if(o.instancePool.length){var i=o.instancePool.pop();return o.call(i,e,t,n,r),i}return new o(e,t,n,r)},c=function(e){var t=this;e instanceof t?void 0:i("25"),e.destructor(),t.instancePool.length>>5]|=e[n]<<24-r%32;return t},_=function(e){for(var t=[],n=0;n<32*e.length;n+=8)t.push(e[n>>>5]>>>24-n%32&255);return t},g=function(e,t,l){for(var m=0;m<16;m++){var _=l+m,g=t[_];t[_]=16711935&(g<<8|g>>>24)|4278255360&(g<<24|g>>>8)}var y,v,b,w,M,k,S,x,L,C;k=y=e[0],S=v=e[1],x=b=e[2],L=w=e[3],C=M=e[4];for(var T,m=0;m<80;m+=1)T=y+t[l+u[m]]|0,T+=m<16?n(v,b,w)+h[0]:m<32?r(v,b,w)+h[1]:m<48?o(v,b,w)+h[2]:m<64?i(v,b,w)+h[3]:a(v,b,w)+h[4],T|=0,T=s(T,d[m]),T=T+M|0,y=M,M=w,w=s(b,10),b=v,v=T,T=k+t[l+c[m]]|0,T+=m<16?a(S,x,L)+p[0]:m<32?i(S,x,L)+p[1]:m<48?o(S,x,L)+p[2]:m<64?r(S,x,L)+p[3]:n(S,x,L)+p[4],T|=0,T=s(T,f[m]),T=T+C|0,k=C,C=L,L=s(x,10),x=S,S=T;T=e[1]+b+L|0,e[1]=e[2]+w+C|0,e[2]=e[3]+M+k|0,e[3]=e[4]+y+S|0,e[4]=e[0]+v+x|0,e[0]=T}}).call(t,n(11).Buffer)},function(e,t,n){(function(e,t){!function(e,n){"use strict";function r(e){"function"!=typeof e&&(e=new Function(""+e));for(var t=new Array(arguments.length-1),n=0;n=8*this._finalSize&&(this._update(this._block),this._block.fill(0)),this._block.writeInt32BE(t,this._blockSize-4);var n=this._update(this._block)||this._hash();return e?n.toString(e):n},t.prototype._update=function(){throw new Error("_update must be implemented by subclass")},t}},function(e,t,n){var t=e.exports=function(e){var n=t[e];if(!n)throw new Error(e+" is not supported (we accept pull requests)");return new n},r=n(11).Buffer,o=n(732)(r);t.sha1=n(734)(r,o),t.sha256=n(735)(r,o),t.sha512=n(736)(r,o)},function(e,t,n){var r=n(140).inherits;e.exports=function(e,t){function n(){return p.length?p.pop().init():this instanceof n?(this._w=h,t.call(this,64,56),this._h=null,void this.init()):new n}function o(e,t,n,r){return e<20?t&n|~t&r:e<40?t^n^r:e<60?t&n|t&r|n&r:t^n^r}function i(e){return e<20?1518500249:e<40?1859775393:e<60?-1894007588:-899497514}function a(e,t){return e+t|0}function s(e,t){return e<>>32-t}var l=0,u=4,c=8,d=12,f=16,h=new("undefined"==typeof Int32Array?Array:Int32Array)(80),p=[];return r(n,t),n.prototype.init=function(){return this._a=1732584193,this._b=4023233417,this._c=2562383102,this._d=271733878,this._e=3285377520,t.prototype.init.call(this),this},n.prototype._POOL=p,n.prototype._update=function(e){var t,n,r,l,u,c,d,f,h,p;t=c=this._a,n=d=this._b,r=f=this._c,l=h=this._d,u=p=this._e;for(var m=this._w,_=0;_<80;_++){var g=m[_]=_<16?e.readInt32BE(4*_):s(m[_-3]^m[_-8]^m[_-14]^m[_-16],1),y=a(a(s(t,5),o(_,n,r,l)),a(a(u,g),i(_)));u=l,l=r,r=s(n,30),n=t,t=y}this._a=a(t,c),this._b=a(n,d),this._c=a(r,f),this._d=a(l,h),this._e=a(u,p)},n.prototype._hash=function(){p.length<100&&p.push(this);var t=new e(20);return t.writeInt32BE(0|this._a,l),t.writeInt32BE(0|this._b,u),t.writeInt32BE(0|this._c,c),t.writeInt32BE(0|this._d,d),t.writeInt32BE(0|this._e,f),t},n}},function(e,t,n){var r=n(140).inherits;e.exports=function(e,t){function n(){this.init(),this._w=h,t.call(this,64,56)}function o(e,t){return e>>>t|e<<32-t}function i(e,t){return e>>>t}function a(e,t,n){return e&t^~e&n}function s(e,t,n){return e&t^e&n^t&n}function l(e){return o(e,2)^o(e,13)^o(e,22)}function u(e){return o(e,6)^o(e,11)^o(e,25)}function c(e){return o(e,7)^o(e,18)^i(e,3)}function d(e){return o(e,17)^o(e,19)^i(e,10)}var f=[1116352408,1899447441,3049323471,3921009573,961987163,1508970993,2453635748,2870763221,3624381080,310598401,607225278,1426881987,1925078388,2162078206,2614888103,3248222580,3835390401,4022224774,264347078,604807628,770255983,1249150122,1555081692,1996064986,2554220882,2821834349,2952996808,3210313671,3336571891,3584528711,113926993,338241895,666307205,773529912,1294757372,1396182291,1695183700,1986661051,2177026350,2456956037,2730485921,2820302411,3259730800,3345764771,3516065817,3600352804,4094571909,275423344,430227734,506948616,659060556,883997877,958139571,1322822218,1537002063,1747873779,1955562222,2024104815,2227730452,2361852424,2428436474,2756734187,3204031479,3329325298],h=new Array(64);return r(n,t),n.prototype.init=function(){return this._a=1779033703,this._b=-1150833019,this._c=1013904242,this._d=-1521486534,this._e=1359893119,this._f=-1694144372,this._g=528734635,this._h=1541459225,this._len=this._s=0,this},n.prototype._update=function(e){var t,n,r,o,i,h,p,m,_,g,y=this._w;t=0|this._a,n=0|this._b,r=0|this._c,o=0|this._d,i=0|this._e,h=0|this._f,p=0|this._g,m=0|this._h;for(var v=0;v<64;v++){var b=y[v]=v<16?e.readInt32BE(4*v):d(y[v-2])+y[v-7]+c(y[v-15])+y[v-16];_=m+u(i)+a(i,h,p)+f[v]+b,g=l(t)+s(t,n,r),m=p,p=h,h=i,i=o+_,o=r,r=n,n=t,t=_+g}this._a=t+this._a|0,this._b=n+this._b|0,this._c=r+this._c|0,this._d=o+this._d|0,this._e=i+this._e|0,this._f=h+this._f|0,this._g=p+this._g|0,this._h=m+this._h|0},n.prototype._hash=function(){var t=new e(32);return t.writeInt32BE(this._a,0),t.writeInt32BE(this._b,4),t.writeInt32BE(this._c,8),t.writeInt32BE(this._d,12),t.writeInt32BE(this._e,16),t.writeInt32BE(this._f,20),t.writeInt32BE(this._g,24),t.writeInt32BE(this._h,28),t},n}},function(e,t,n){var r=n(140).inherits;e.exports=function(e,t){function n(){this.init(),this._w=l,t.call(this,128,112)}function o(e,t,n){return e>>>n|t<<32-n}function i(e,t,n){return e&t^~e&n}function a(e,t,n){return e&t^e&n^t&n}var s=[1116352408,3609767458,1899447441,602891725,3049323471,3964484399,3921009573,2173295548,961987163,4081628472,1508970993,3053834265,2453635748,2937671579,2870763221,3664609560,3624381080,2734883394,310598401,1164996542,607225278,1323610764,1426881987,3590304994,1925078388,4068182383,2162078206,991336113,2614888103,633803317,3248222580,3479774868,3835390401,2666613458,4022224774,944711139,264347078,2341262773,604807628,2007800933,770255983,1495990901,1249150122,1856431235,1555081692,3175218132,1996064986,2198950837,2554220882,3999719339,2821834349,766784016,2952996808,2566594879,3210313671,3203337956,3336571891,1034457026,3584528711,2466948901,113926993,3758326383,338241895,168717936,666307205,1188179964,773529912,1546045734,1294757372,1522805485,1396182291,2643833823,1695183700,2343527390,1986661051,1014477480,2177026350,1206759142,2456956037,344077627,2730485921,1290863460,2820302411,3158454273,3259730800,3505952657,3345764771,106217008,3516065817,3606008344,3600352804,1432725776,4094571909,1467031594,275423344,851169720,430227734,3100823752,506948616,1363258195,659060556,3750685593,883997877,3785050280,958139571,3318307427,1322822218,3812723403,1537002063,2003034995,1747873779,3602036899,1955562222,1575990012,2024104815,1125592928,2227730452,2716904306,2361852424,442776044,2428436474,593698344,2756734187,3733110249,3204031479,2999351573,3329325298,3815920427,3391569614,3928383900,3515267271,566280711,3940187606,3454069534,4118630271,4000239992,116418474,1914138554,174292421,2731055270,289380356,3203993006,460393269,320620315,685471733,587496836,852142971,1086792851,1017036298,365543100,1126000580,2618297676,1288033470,3409855158,1501505948,4234509866,1607167915,987167468,1816402316,1246189591],l=new Array(160);return r(n,t),n.prototype.init=function(){return this._a=1779033703,this._b=-1150833019,this._c=1013904242,this._d=-1521486534,this._e=1359893119,this._f=-1694144372,this._g=528734635,this._h=1541459225,this._al=-205731576,this._bl=-2067093701,this._cl=-23791573,this._dl=1595750129,this._el=-1377402159,this._fl=725511199,this._gl=-79577749,this._hl=327033209,this._len=this._s=0,this},n.prototype._update=function(e){var t,n,r,l,u,c,d,f,h,p,m,_,g,y,v,b,w=this._w;t=0|this._a,n=0|this._b,r=0|this._c,l=0|this._d,u=0|this._e,c=0|this._f,d=0|this._g,f=0|this._h,h=0|this._al,p=0|this._bl,m=0|this._cl,_=0|this._dl,g=0|this._el,y=0|this._fl,v=0|this._gl,b=0|this._hl;for(var M=0;M<80;M++){var k,S,x=2*M;if(M<16)k=w[x]=e.readInt32BE(4*x),S=w[x+1]=e.readInt32BE(4*x+4);else{var L=w[x-30],C=w[x-30+1],T=o(L,C,1)^o(L,C,8)^L>>>7,E=o(C,L,1)^o(C,L,8)^o(C,L,7);L=w[x-4],C=w[x-4+1];var D=o(L,C,19)^o(C,L,29)^L>>>6,O=o(C,L,19)^o(L,C,29)^o(C,L,6),P=w[x-14],A=w[x-14+1],Y=w[x-32],I=w[x-32+1];S=E+A,k=T+P+(S>>>0>>0?1:0),S+=O,k=k+D+(S>>>0>>0?1:0),S+=I,k=k+Y+(S>>>0>>0?1:0),w[x]=k,w[x+1]=S}var R=a(t,n,r),j=a(h,p,m),F=o(t,h,28)^o(h,t,2)^o(h,t,7),N=o(h,t,28)^o(t,h,2)^o(t,h,7),H=o(u,g,14)^o(u,g,18)^o(g,u,9),z=o(g,u,14)^o(g,u,18)^o(u,g,9),W=s[x],B=s[x+1],U=i(u,c,d),V=i(g,y,v),G=b+z,q=f+H+(G>>>0>>0?1:0);G+=V,q=q+U+(G>>>0>>0?1:0),G+=B,q=q+W+(G>>>0>>0?1:0),G+=S,q=q+k+(G>>>0>>0?1:0);var K=N+j,J=F+R+(K>>>0>>0?1:0);f=d,b=v,d=c,v=y,c=u,y=g,g=_+G|0,u=l+q+(g>>>0<_>>>0?1:0)|0,l=r,_=m,r=n,m=p,n=t,p=h,h=G+K|0,t=q+J+(h>>>0>>0?1:0)|0}this._al=this._al+h|0,this._bl=this._bl+p|0,this._cl=this._cl+m|0,this._dl=this._dl+_|0,this._el=this._el+g|0,this._fl=this._fl+y|0,this._gl=this._gl+v|0,this._hl=this._hl+b|0,this._a=this._a+t+(this._al>>>0>>0?1:0)|0,this._b=this._b+n+(this._bl>>>0

>>0?1:0)|0,this._c=this._c+r+(this._cl>>>0>>0?1:0)|0,this._d=this._d+l+(this._dl>>>0<_>>>0?1:0)|0,this._e=this._e+u+(this._el>>>0>>0?1:0)|0,this._f=this._f+c+(this._fl>>>0>>0?1:0)|0,this._g=this._g+d+(this._gl>>>0>>0?1:0)|0,this._h=this._h+f+(this._hl>>>0>>0?1:0)|0},n.prototype._hash=function(){function t(e,t,r){n.writeInt32BE(e,r),n.writeInt32BE(t,r+4)}var n=new e(64);return t(this._a,this._al,0),t(this._b,this._bl,8),t(this._c,this._cl,16),t(this._d,this._dl,24),t(this._e,this._el,32),t(this._f,this._fl,40),t(this._g,this._gl,48),t(this._h,this._hl,56),n},n}},function(e,t,n){function r(){o.call(this)}e.exports=r;var o=n(72).EventEmitter,i=n(24);i(r,o),r.Readable=n(138),r.Writable=n(727),r.Duplex=n(721),r.Transform=n(726),r.PassThrough=n(725),r.Stream=r,r.prototype.pipe=function(e,t){function n(t){e.writable&&!1===e.write(t)&&u.pause&&u.pause()}function r(){u.readable&&u.resume&&u.resume()}function i(){c||(c=!0,e.end())}function a(){c||(c=!0,"function"==typeof e.destroy&&e.destroy())}function s(e){if(l(),0===o.listenerCount(this,"error"))throw e}function l(){u.removeListener("data",n),e.removeListener("drain",r),u.removeListener("end",i),u.removeListener("close",a),u.removeListener("error",s),e.removeListener("error",s),u.removeListener("end",l),u.removeListener("close",l),e.removeListener("close",l)}var u=this;u.on("data",n),e.on("drain",r),e._isStdio||t&&t.end===!1||(u.on("end",i),u.on("close",a));var c=!1;return u.on("error",s),e.on("error",s),u.on("end",l),u.on("close",l),e.on("close",l),e.emit("pipe",u),e}},function(e,t){"use strict";e.exports=function(e){return encodeURIComponent(e).replace(/[!'()*]/g,function(e){return"%"+e.charCodeAt(0).toString(16).toUpperCase()})}},function(e,t,n){var r=n(440);"string"==typeof r&&(r=[[e.id,r,""]]);n(139)(r,{});r.locals&&(e.exports=r.locals)},function(e,t,n){var r=n(441);"string"==typeof r&&(r=[[e.id,r,""]]);n(139)(r,{});r.locals&&(e.exports=r.locals)},function(e,t,n){var r=n(442);"string"==typeof r&&(r=[[e.id,r,""]]);n(139)(r,{});r.locals&&(e.exports=r.locals)},function(e,t,n){e.exports=n.p+"fonts/fontawesome-webfont-woff2"},function(e,t,n){e.exports=n.p+"fonts/fontawesome-webfont-woff"},function(e,t){(function(t){function n(e,t){function n(){if(!o){if(r("throwDeprecation"))throw new Error(t);r("traceDeprecation")?console.trace(t):console.warn(t),o=!0}return e.apply(this,arguments)}if(r("noDeprecation"))return e;var o=!1;return n}function r(e){try{if(!t.localStorage)return!1}catch(e){return!1}var n=t.localStorage[e];return null!=n&&"true"===String(n).toLowerCase()}e.exports=n}).call(t,function(){return this}())},function(e,t){"function"==typeof Object.create?e.exports=function(e,t){e.super_=t,e.prototype=Object.create(t.prototype,{constructor:{value:e,enumerable:!1,writable:!0,configurable:!0}})}:e.exports=function(e,t){e.super_=t;var n=function(){};n.prototype=t.prototype,e.prototype=new n,e.prototype.constructor=e}},function(e,t){e.exports=function(e){return e&&"object"==typeof e&&"function"==typeof e.copy&&"function"==typeof e.fill&&"function"==typeof e.readUInt8}},26,function(e,t){},748,748,function(e,t,n,r,o){"use strict";var i=n(r),a=(n(o),function(e){var t=this;if(t.instancePool.length){var n=t.instancePool.pop();return t.call(n,e),n}return new t(e)}),s=function(e,t){var n=this;if(n.instancePool.length){var r=n.instancePool.pop();return n.call(r,e,t),r}return new n(e,t)},l=function(e,t,n){var r=this;if(r.instancePool.length){var o=r.instancePool.pop();return r.call(o,e,t,n),o}return new r(e,t,n)},u=function(e,t,n,r){var o=this;if(o.instancePool.length){var i=o.instancePool.pop();return o.call(i,e,t,n,r),i}return new o(e,t,n,r)},c=function(e){var t=this;e instanceof t?void 0:i("25"),e.destructor(),t.instancePool.length:port" or "tcp://ip:port", + + # The serverkey of the VC agent's instance. + "volttron-central-serverkey" "VC agent's instance serverkey", + + # interval at which VCP will attempt to connect to the VC agent's + # instance when a disconnection occurs. + "volttron-central-reconnect-interval": 5, + + # The name of instance to be sent to volttron central for displaying + # on the interface. + "instance-name": "name of instances (VC agent's instance ip address as default)", + + # VCP will publish health statistics of the instance at a specified + # interval. + "stats-publish-interval": 30, + + # The VCP provides a topic/replace mapping for the platform. It is + # available via rpc function so that sensitive information won't be + # published through forwarding. + # + # The topic-replace-map is used to search/replace all of the topics + # published from ForwardHistorians and other agents that connect with + # external instances. + "topic-replace-map": { + "from": "to", + "from1": "to1" + } +} +``` diff --git a/services/core/VolttronCentralPlatform/setup.py b/services/core/VolttronCentralPlatform/setup.py index f09851e5f4..43400be0db 100644 --- a/services/core/VolttronCentralPlatform/setup.py +++ b/services/core/VolttronCentralPlatform/setup.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- {{{ # vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et: # -# Copyright 2019, Battelle Memorial Institute. +# Copyright 2020, Battelle Memorial Institute. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/services/core/VolttronCentralPlatform/tests/test_platform_agent_rpc.py b/services/core/VolttronCentralPlatform/tests/test_platform_agent_rpc.py index a7447c216f..f52594a398 100644 --- a/services/core/VolttronCentralPlatform/tests/test_platform_agent_rpc.py +++ b/services/core/VolttronCentralPlatform/tests/test_platform_agent_rpc.py @@ -1,18 +1,53 @@ +# -*- coding: utf-8 -*- {{{ +# vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et: +# +# Copyright 2020, Battelle Memorial Institute. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# This material was prepared as an account of work sponsored by an agency of +# the United States Government. Neither the United States Government nor the +# United States Department of Energy, nor Battelle, nor any of their +# employees, nor any jurisdiction or organization that has cooperated in the +# development of these materials, makes any warranty, express or +# implied, or assumes any legal liability or responsibility for the accuracy, +# completeness, or usefulness or any information, apparatus, product, +# software, or process disclosed, or represents that its use would not infringe +# privately owned rights. Reference herein to any specific commercial product, +# process, or service by trade name, trademark, manufacturer, or otherwise +# does not necessarily constitute or imply its endorsement, recommendation, or +# favoring by the United States Government or any agency thereof, or +# Battelle Memorial Institute. The views and opinions of authors expressed +# herein do not necessarily state or reflect those of the +# United States Government or any agency thereof. +# +# PACIFIC NORTHWEST NATIONAL LABORATORY operated by +# BATTELLE for the UNITED STATES DEPARTMENT OF ENERGY +# under Contract DE-AC05-76RL01830 +# }}} + +import os +import json import logging import pytest - +import subprocess import gevent -from volttron.platform import get_volttron_root, jsonapi -from volttron.platform.agent.known_identities import VOLTTRON_CENTRAL_PLATFORM, \ - CONFIGURATION_STORE -from volttron.platform.jsonrpc import RemoteError, UNAUTHORIZED +from volttron.platform import get_volttron_root, jsonapi, get_services_core +from volttron.platform.agent.known_identities import VOLTTRON_CENTRAL_PLATFORM, CONFIGURATION_STORE from volttron.platform.messaging.health import STATUS_GOOD - -from volttrontesting.utils.agent_additions import add_volttron_central, \ - add_volttron_central_platform, add_listener, add_sqlhistorian -from volttrontesting.utils.platformwrapper import start_wrapper_platform, \ - PlatformWrapper +from volttrontesting.utils.agent_additions import add_volttron_central_platform, add_listener, add_volttron_central +from volttrontesting.utils.platformwrapper import start_wrapper_platform, PlatformWrapper SQLITE_HISTORIAN_CONFIG = { "connection": { @@ -27,35 +62,26 @@ STANDARD_GET_TIMEOUT = 30 _log = logging.getLogger(__name__) -pytest.skip("Needs to be updated based on 6.0 changes", allow_module_level=True) +# pytest.skip("Needs to be updated based on 6.0 changes", allow_module_level=True) -@pytest.fixture(scope="module", - params=[("zmq", False), - ("rmq", True) - ]) -def setup_platform(request): - """ - Creates a single instance of VOLTTRON with a VOLTTRON Central Platform, - a listener agent, and a sqlite historian that is a platform.historian. - The VOLTTRON Central Platform agent is not registered with a VOLTTRON - Central Platform. +@pytest.fixture(scope="module") +def setup_platform(volttron_instance_web): """ - vcp = PlatformWrapper(messagebus=request.param[0], ssl_auth=request.param[1]) - - start_wrapper_platform(vcp, with_http=True, - add_local_vc_address=True) + Creates a single instance of VOLTTRON with a VOLTTRON Central Platform, a listener agent, and a sqlite historian + that is a platform.historian. + The VOLTTRON Central Platform agent is not registered with a VOLTTRON Central Platform. + """ + vcp = volttron_instance_web assert vcp assert vcp.is_running() vcp_uuid = add_volttron_central_platform(vcp) print("VCP uuid: {}".format(vcp_uuid)) # historian_config = SQLITE_HISTORIAN_CONFIG.copy() - # historian_config['connection']['params']['database'] = \ - # vcp.volttron_home + "/data/platform.historian.sqlite" + # historian_config['connection']['params']['database'] = vcp.volttron_home + "/data/platform.historian.sqlite" # - # historian_uuid = add_sqlhistorian(vcp, config=historian_config, - # vip_identity='platform.historian') + # historian_uuid = add_sqlhistorian(vcp, config=historian_config, vip_identity='platform.historian') # listeneer_uuid = add_listener(vcp, vip_identity="platform.listener") assert vcp_uuid, "Invalid vcp uuid returned" @@ -69,22 +95,15 @@ def setup_platform(request): yield vcp - print('Shutting down instance: {}'.format(vcp.volttron_home)) - if vcp.is_running(): - vcp.remove_all_agents() - # Shutdown handles case where the platform hasn't started. - vcp.shutdown_platform() + vcp.remove_agent(vcp_uuid) @pytest.fixture(scope="module") def vc_agent(setup_platform): """ Gets the a volttron central proxy agent to test with. - - The return value is a tuple with the 0th position the instances of the - proxy vc agent. The 1st position will be the identity of the vcp agent - that vc should use as its identity for talking with the vcp instances. - + The return value is a tuple with the 0th position the instances of the proxy vc agent. The 1st position will be the + identity of the vcp agent that vc should use as its identity for talking with the vcp instances. . note:: Please note that the agent is merely a proxy (mock) type of vc agent. @@ -93,9 +112,10 @@ def vc_agent(setup_platform): :return: """ assert setup_platform.instance_name is not None - agent = setup_platform.build_agent(identity='volttron.central') - capabilities = [{'edit_config_store': {'identity': VOLTTRON_CENTRAL_PLATFORM}}] - setup_platform.add_capabilities(agent.core.publickey, capabilities=capabilities) + setup_platform.allow_all_connections() + gevent.sleep(5) + add_volttron_central(setup_platform) + agent = setup_platform.dynamic_agent vcp_identity = None look_for_identity = setup_platform.instance_name + ".platform.agent" @@ -109,27 +129,21 @@ def vc_agent(setup_platform): break if vcp_identity is None: pytest.fail("vcp_identity was not connected to the instance.") - + gevent.sleep(5) yield agent, vcp_identity - agent.core.stop(timeout=STANDARD_GET_TIMEOUT) - @pytest.mark.vcp def test_list_agents(setup_platform, vc_agent, caplog): - # split vc_agent into it's respective parts. vc, vcp_identity = vc_agent - - agent_list = vc.vip.rpc.call(vcp_identity, - "list_agents").get(timeout=2) - assert agent_list and len(agent_list) == 3 - + agent_list = vc.vip.rpc.call(vcp_identity, "list_agents").get(timeout=2) + assert agent_list and len(agent_list) == 2 + listener_uuid = None try: listener_uuid = add_listener(setup_platform) - agent_list = vc.vip.rpc.call(vcp_identity, - "list_agents").get(timeout=2) - assert agent_list and len(agent_list) == 4 + agent_list = vc.vip.rpc.call(vcp_identity, "list_agents").get(timeout=2) + assert agent_list and len(agent_list) == 3 except Exception as e: _log.debug("EXCEPTION: {}".format(e.args)) finally: @@ -139,12 +153,10 @@ def test_list_agents(setup_platform, vc_agent, caplog): @pytest.mark.vcp def test_can_inspect_agent(setup_platform, vc_agent, caplog): - # split vc_agent into it's respective parts. vc, vcp_identity = vc_agent - output = vc.vip.rpc.call(vcp_identity, - 'inspect').get(timeout=3) + output = vc.vip.rpc.call(vcp_identity, 'inspect').get(timeout=3) methods = output['methods'] print("rpc methods are:") @@ -171,8 +183,7 @@ def test_can_call_rpc_method(setup_platform, vc_agent): # split vc_agent into it's respective parts. vc, vcp_identity = vc_agent - health = vc.vip.rpc.call(vcp_identity, - 'get_health').get(timeout=STANDARD_GET_TIMEOUT) + health = vc.vip.rpc.call(vcp_identity, 'get_health').get(timeout=STANDARD_GET_TIMEOUT) assert health['status'] == STATUS_GOOD @@ -181,13 +192,10 @@ def test_can_get_version(setup_platform, vc_agent): # split vc_agent into it's respective parts. vc, vcp_identity = vc_agent - import subprocess, os script = "scripts/get_versions.py" python = "python" - args = [python, script] - response = subprocess.check_output(args=[python, script], - cwd=get_volttron_root(), universal_newlines=True) + response = subprocess.check_output(args=[python, script], cwd=get_volttron_root(), universal_newlines=True) expected_version = None for line in response.split("\n"): agent, version = line.strip().split(',') @@ -195,10 +203,8 @@ def test_can_get_version(setup_platform, vc_agent): expected_version = version break - # Note this is using vcp because it has the version info not the - # vcp_identity - version = vc.vip.rpc.call(VOLTTRON_CENTRAL_PLATFORM, - 'agent.version').get(timeout=STANDARD_GET_TIMEOUT) + # Note this is using vcp because it has the version info not the vcp_identity + version = vc.vip.rpc.call(VOLTTRON_CENTRAL_PLATFORM, 'agent.version').get(timeout=STANDARD_GET_TIMEOUT) # version = setup_platform.call('agent.version', timeout=2) assert version is not None assert version == expected_version @@ -208,8 +214,7 @@ def test_can_get_version(setup_platform, vc_agent): def test_can_change_topic_map(setup_platform, vc_agent): vc, vcp_identity = vc_agent - topic_map = vc.vip.rpc.call(VOLTTRON_CENTRAL_PLATFORM, - 'get_replace_map').get(timeout=STANDARD_GET_TIMEOUT) + topic_map = vc.vip.rpc.call(VOLTTRON_CENTRAL_PLATFORM, 'get_replace_map').get(timeout=STANDARD_GET_TIMEOUT) assert topic_map == {} @@ -229,8 +234,7 @@ def test_can_change_topic_map(setup_platform, vc_agent): gevent.sleep(2) - topic_map = vc.vip.rpc.call(VOLTTRON_CENTRAL_PLATFORM, - 'get_replace_map').get(timeout=STANDARD_GET_TIMEOUT) + topic_map = vc.vip.rpc.call(VOLTTRON_CENTRAL_PLATFORM, 'get_replace_map').get(timeout=STANDARD_GET_TIMEOUT) assert 'fudge' in topic_map assert topic_map['fudge'] == 'ball' @@ -251,10 +255,28 @@ def test_can_change_topic_map(setup_platform, vc_agent): gevent.sleep(2) - topic_map = vc.vip.rpc.call(VOLTTRON_CENTRAL_PLATFORM, - 'get_replace_map').get( - timeout=STANDARD_GET_TIMEOUT) + topic_map = vc.vip.rpc.call(VOLTTRON_CENTRAL_PLATFORM, 'get_replace_map').get(timeout=STANDARD_GET_TIMEOUT) assert 'fudge' not in topic_map assert 'map2' in topic_map assert topic_map['map2'] == 'it' + + +@pytest.mark.vcp +def test_default_config(volttron_instance): + """ + Test the default configuration file included with the agent + """ + publish_agent = volttron_instance.build_agent(identity="test_agent") + gevent.sleep(1) + + config_path = os.path.join(get_services_core("VolttronCentralPlatform"), "config") + with open(config_path, "r") as config_file: + config_json = json.load(config_file) + assert isinstance(config_json, dict) + volttron_instance.install_agent( + agent_dir=get_services_core("VolttronCentralPlatform"), + config_file=config_json, + start=True, + vip_identity="health_test") + assert publish_agent.vip.rpc.call("health_test", "health.get_status").get(timeout=10).get('status') == STATUS_GOOD diff --git a/services/core/VolttronCentralPlatform/tests/test_platformagent.py b/services/core/VolttronCentralPlatform/tests/test_platformagent.py index 61584fae77..52638ca906 100644 --- a/services/core/VolttronCentralPlatform/tests/test_platformagent.py +++ b/services/core/VolttronCentralPlatform/tests/test_platformagent.py @@ -100,6 +100,9 @@ def add_method_response(self, method_name, response): pass +pytest.skip("Deprecated", allow_module_level=True) + + @pytest.fixture(scope="module") def vcp_simulated_vc(request): """ @@ -126,7 +129,7 @@ def vcp_simulated_vc(request): @pytest.mark.pa -@pytest.mark.skip(reason="4.1 fixing tests") +# @pytest.mark.skip(reason="4.1 fixing tests") def test_pa_uses_correct_address_hash(vcp_simulated_vc): p, vc = vcp_simulated_vc @@ -135,7 +138,7 @@ def test_pa_uses_correct_address_hash(vcp_simulated_vc): @pytest.mark.pa -@pytest.mark.skip(reason="4.1 fixing tests") +# @pytest.mark.skip(reason="4.1 fixing tests") def test_get_health(vcp_simulated_vc): p, vc = vcp_simulated_vc @@ -159,7 +162,7 @@ def test_get_health(vcp_simulated_vc): assert health['context'] == 'Let the good-times role' @pytest.mark.pa -@pytest.mark.skip(reason="4.1 fixing tests") +# @pytest.mark.skip(reason="4.1 fixing tests") def test_listagents(vcp_simulated_vc): try: wrapper, vc = vcp_simulated_vc @@ -183,7 +186,7 @@ def test_listagents(vcp_simulated_vc): os.environ.pop('VOLTTRON_HOME') @pytest.mark.pa -@pytest.mark.skip(reason="4.1 fixing tests") +# @pytest.mark.skip(reason="4.1 fixing tests") def test_manage_agent(vcp_instance): """ Test that we can manage a `VolttronCentralPlatform`. @@ -208,7 +211,7 @@ def test_manage_agent(vcp_instance): @pytest.mark.pa -@pytest.mark.xfail(reason="Need to upgrade") +# @pytest.mark.xfail(reason="Need to upgrade") def test_can_get_agentlist(vcp_instance): """ Test that we can retrieve an agent list from an agent. @@ -247,7 +250,7 @@ def test_can_get_agentlist(vcp_instance): @pytest.mark.pa -@pytest.mark.skip(reason="4.1 fixing tests") +# @pytest.mark.skip(reason="4.1 fixing tests") def test_agent_can_be_managed(vcp_instance): wrapper = vcp_instance[0] publickey, secretkey = get_new_keypair() @@ -271,7 +274,7 @@ def test_agent_can_be_managed(vcp_instance): @pytest.mark.pa -@pytest.mark.skip(reason="4.1 fixing tests") +# @pytest.mark.skip(reason="4.1 fixing tests") def test_status_good_when_agent_starts(vcp_instance): wrapper = vcp_instance[0] connection = wrapper.build_connection(peer=VOLTTRON_CENTRAL_PLATFORM) diff --git a/services/core/VolttronCentralPlatform/tests/test_vcp_units.py b/services/core/VolttronCentralPlatform/tests/test_vcp_units.py index e346d1fa0a..9ee2c46aaf 100644 --- a/services/core/VolttronCentralPlatform/tests/test_vcp_units.py +++ b/services/core/VolttronCentralPlatform/tests/test_vcp_units.py @@ -4,7 +4,7 @@ # Patch the VolttronCentralPlatform so the underlying Agent interfaces are mocked -# so we can just test the things that the MasterWebService is responsible for. +# so we can just test the things that the PlatformWebService is responsible for. VolttronCentralPlatform.__bases__ = (AgentMock.imitate(Agent, Agent()),) diff --git a/services/core/VolttronCentralPlatform/vcplatform/agent.py b/services/core/VolttronCentralPlatform/vcplatform/agent.py index 22526d3899..761af6747a 100644 --- a/services/core/VolttronCentralPlatform/vcplatform/agent.py +++ b/services/core/VolttronCentralPlatform/vcplatform/agent.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- {{{ # vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et: # -# Copyright 2019, Battelle Memorial Institute. +# Copyright 2020, Battelle Memorial Institute. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -189,8 +189,8 @@ def __init__(self, reconnect_interval, vc_address, self._platform_driver_ids = None self._device_publishes = {} self._devices = {} - # master driver config store stat times - self._master_driver_stat_times = {} + # platform driver config store stat times + self._platform_driver_stat_times = {} # instance id is the vip identity of this agent on the remote platform. self._instance_id = None @@ -539,10 +539,10 @@ def enable_connection_heartbeat(): return self._vc_connection if self._vc_address is None or self._vc_serverkey is None: - _log.warn('volttron_central_address is None in config store ' - 'and volttron.central is not a peer.') - _log.warn('Recommend adding volttron.central address or adding a ' - '"config" file to the config store.') + _log.warning('volttron_central_address is None in config store ' + 'and volttron.central is not a peer.') + _log.warning('Recommend adding volttron.central address or adding a ' + '"config" file to the config store.') return None self._vc_connection = build_agent( @@ -952,13 +952,13 @@ def get_devices(self): for platform_driver_id in self._platform_driver_ids: fname = os.path.join(os.environ['VOLTTRON_HOME'], "configuration_store/{}.store".format(platform_driver_id)) stat_time = os.stat(fname).st_mtime if os.path.exists(fname) else None - if self._master_driver_stat_times.get(platform_driver_id, None) != stat_time: + if self._platform_driver_stat_times.get(platform_driver_id, None) != stat_time: config_changed = True found_a_platform_driver = found_a_platform_driver or stat_time - self._master_driver_stat_times[platform_driver_id] = stat_time + self._platform_driver_stat_times[platform_driver_id] = stat_time if not found_a_platform_driver: - _log.debug("No master driver currently on this platform.") + _log.debug("No platform driver currently on this platform.") return {} if not config_changed: diff --git a/services/core/VolttronCentralPlatform/vcplatform/vcconnection.py b/services/core/VolttronCentralPlatform/vcplatform/vcconnection.py index df74b586aa..0be5199cff 100644 --- a/services/core/VolttronCentralPlatform/vcplatform/vcconnection.py +++ b/services/core/VolttronCentralPlatform/vcplatform/vcconnection.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- {{{ # vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et: # -# Copyright 2019, Battelle Memorial Institute. +# Copyright 2020, Battelle Memorial Institute. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/services/core/WeatherDotGov/README.md b/services/core/WeatherDotGov/README.md new file mode 100644 index 0000000000..a6c6ba1024 --- /dev/null +++ b/services/core/WeatherDotGov/README.md @@ -0,0 +1,74 @@ +# WeatherDotgov Agent + +This agent provides the ability to query for current and forecast +weather data from NOAA. The agent extends BaseWeatherAgent that provides +caching of recently requested data, as well as mapping of weather point +names from NOAA\'s naming scheme to the standardized CF-conventions +scheme. + +## Requirements + +The WeatherDotgov agent requires the Pint package. This package can be +installed in an activated environment with: + + pip install pint + +## Configuration + +The following is an example configuration for the Weather.gov agent. All +configuration parameters are optional. + +**Parameters** + +1. \"database_file\" - sqlite database file for weather data caching. + Defaults to \"weather.sqlite\" in the agent\'s data directory +2. \"max_size_gb\" - maximum size of cache database. When cache + exceeds this size, data will get purged from cache until the cache + is within the configured size. +3. \"poll_locations\" - list of locations to periodically poll for + current data +4. \"poll_interval\" - polling frequency or the number of seconds + between each poll. + + ``` + { + "database_file": "weather.sqlite", + "max_size_gb": 1, + "poll_locations": [{"station": "KLAX"}, {"station": "KPHX"}], + "poll_interval": 60 + } + ``` + +# Registry Configuration + +The registry configuration file for this agent can be found in agent\'s +data directory. This configuration provides the point name mapping from +NOAA\'s point scheme to the CF-conventions scheme by default. The file +leaves the unit name columns for each point blank, as this agent does +not include unit conversion. Points that do not specify +\'Standard_Point_Name\' were found to not have a logical match to any +point found in the CF-Conventions. For these points NOAA point names +(Service_Point_Name) will be used. + + |Service_Point_Name | Standard_Point_Name | Service_Units | Standard_Units | + |--------------------|--------------------------------|----------------|------------------| + |heatIndex | | | | + |presentWeather | | | | + |seaLevelPressure |air_pressure_at_mean_sea_level | | | + |temperature |air_temperature | | | + + +## Notes + +The WeatherDotGov agent does not utilize an API key, as NOAA allows +users to gather weather data for free, and does not provide nor require +keys. + +This implementation of the weather agent does not include historical +weather data, as NOAA does not provide an accessible endpoint from which +historical data may be obtained. + +Data provided by NOAA is in a nested dictionary format. The base weather +agent does not handle unit conversion for arbitrary nested dictionary +format and hence this agent does not support unit conversion at this +time. diff --git a/services/core/WeatherDotGov/README.rst b/services/core/WeatherDotGov/README.rst deleted file mode 100644 index cfe53b4392..0000000000 --- a/services/core/WeatherDotGov/README.rst +++ /dev/null @@ -1,71 +0,0 @@ -.. _WeatherDotgov Agent: - -================= -WeatherDotgov Agent -================= - -This agent provides the ability to query for current and forecast weather -data from NOAA. The agent extends BaseWeatherAgent that provides caching of -recently requested data, as well as mapping of weather point names from NOAA's -naming scheme to the standardized CF-conventions scheme. - -Requirements ------------- -The WeatherDotgov agent requires the Pint package. This package can be installed in an -activated environment with: - -:: - - pip install pint - -Configuration -------------- -The following is an example configuration for the Weather.gov agent. All -configuration parameters are optional. - -**Parameters** - - 1. "database_file" - sqlite database file for weather data caching. Defaults to "weather.sqlite" in the agent's data directory - 2. "max_size_gb" - maximum size of cache database. When cache exceeds this size, data will get purged from cache until the cache is within the configured size. - 3. "poll_locations" - list of locations to periodically poll for current data - 4. "poll_interval" - polling frequency or the number of seconds between each poll. - -:: - - { - "database_file": "weather.sqlite", - "max_size_gb": 1, - "poll_locations": [{"station": "KLAX"}, {"station": "KPHX"}], - "poll_interval": 60 - } - -Registry Configuration ----------------------- -The registry configuration file for this agent can be found in agent's data -directory. This configuration provides the point name mapping from NOAA's point -scheme to the CF-conventions scheme by default. The file leaves the unit name -columns for each point blank, as this agent does not include unit conversion. -Points that do not specify 'Standard_Point_Name' were found to not have a -logical match to any point found in the CF-Conventions. For these points NOAA -point names (Service_Point_Name) will be used. - -.. csv-table:: Registry Configuration - :header: Service_Point_Name,Standard_Point_Name,Service_Units,Standard_Units - - heatIndex,,, - presentWeather,,, - seaLevelPressure,air_pressure_at_mean_sea_level,, - temperature,air_temperature,, - -Notes -~~~~~ -The WeatherDotGov agent does not utilize an API key, as NOAA allows users to -gather weather data for free, and does not provide nor require keys. - -This implementation of the weather agent does not include historical weather -data, as NOAA does not provide an accessible endpoint from which historical -data may be obtained. - -Data provided by NOAA is in a nested dictionary format. The base weather agent -does not handle unit conversion for arbitrary nested dictionary format and hence -this agent does not support unit conversion at this time. diff --git a/services/core/WeatherDotGov/setup.py b/services/core/WeatherDotGov/setup.py index 2a15943424..fa3c80d063 100644 --- a/services/core/WeatherDotGov/setup.py +++ b/services/core/WeatherDotGov/setup.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- {{{ # vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et: # -# Copyright 2019, Battelle Memorial Institute. +# Copyright 2020, Battelle Memorial Institute. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/services/core/WeatherDotGov/tests/test_weatherdotgov.py b/services/core/WeatherDotGov/tests/test_weatherdotgov.py index e293f01990..2498e53f10 100644 --- a/services/core/WeatherDotGov/tests/test_weatherdotgov.py +++ b/services/core/WeatherDotGov/tests/test_weatherdotgov.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- {{{ # vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et: # -# Copyright 2019, Battelle Memorial Institute. +# Copyright 2020, Battelle Memorial Institute. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -35,17 +35,19 @@ # BATTELLE for the UNITED STATES DEPARTMENT OF ENERGY # under Contract DE-AC05-76RL01830 # }}} -import copy +import os +import json +import copy import pytest import gevent -from mock import MagicMock import sqlite3 import logging -from volttron.platform.agent import utils from datetime import datetime -from volttron.platform.messaging.health import STATUS_GOOD +from mock import MagicMock +from volttron.platform.messaging.health import STATUS_GOOD +from volttron.platform.agent import utils from volttron.platform import get_services_core __version__ = "0.1.0" @@ -76,9 +78,8 @@ def cleanup_cache(volttron_instance, query_agent, weather): tables = ["get_current_weather", "get_hourly_forecast"] version = query_agent.vip.rpc.call(identity, 'get_version').get(timeout=3) cwd = volttron_instance.volttron_home - database_file = "/".join([cwd, "agents", weather, "weatherdotgovagent-" + - version, "weatherdotgovagent-" + version + - ".agent-data", "weather.sqlite"]) + database_file = "/".join([cwd, "agents", weather, "weatherdotgovagent-" + version, "weatherdotgovagent-" + version + + ".agent-data", "weather.sqlite"]) _log.debug(database_file) sqlite_connection = sqlite3.connect(database_file) cursor = sqlite_connection.cursor() @@ -100,8 +101,7 @@ def query_agent(request, volttron_instance): prefix="weather/poll/current", callback=agent.poll_callback).get() - # 2: add a tear down method to stop the fake - # agent that published to message bus + # 2: add a tear down method to stop the fake agent that published to message bus def stop_agent(): print("In teardown method of query_agent") agent.core.stop() @@ -120,10 +120,9 @@ def weather(request, volttron_instance): agent = volttron_instance.install_agent( vip_identity=identity, agent_dir=source, - start=False, + start=True, config_file=config) - volttron_instance.start_agent(agent) gevent.sleep(3) def stop_agent(): @@ -146,8 +145,7 @@ def test_success_current(cleanup_cache, weather, query_agent, locations): :param weather: instance of weather service to be tested :param query_agent: agent to leverage to use RPC calls """ - query_data = query_agent.vip.rpc.call(identity, 'get_current_weather', - locations).get(timeout=30) + query_data = query_agent.vip.rpc.call(identity, 'get_current_weather', locations).get(timeout=30) print(query_data) assert len(query_data) == len(locations) for record in query_data: @@ -162,18 +160,14 @@ def test_success_current(cleanup_cache, weather, query_agent, locations): else: results = record.get("weather_error") if results.startswith("Remote API returned no data") or \ - results.startswith("Remote API redirected request, " - "but redirect failed") \ - or results.startswith("Remote API returned invalid " - "response")\ - or results.startswith("API request failed with unexpected " - "response"): + results.startswith("Remote API redirected request, but redirect failed") \ + or results.startswith("Remote API returned invalid response")\ + or results.startswith("API request failed with unexpected response"): assert True else: assert False - cache_data = query_agent.vip.rpc.call(identity, 'get_current_weather', - locations).get(timeout=30) + cache_data = query_agent.vip.rpc.call(identity, 'get_current_weather', locations).get(timeout=30) # check names returned are valid assert len(cache_data) == len(cache_data) @@ -189,12 +183,10 @@ def test_success_current(cleanup_cache, weather, query_agent, locations): () ]) def test_current_fail(weather, query_agent, locations): - query_data = query_agent.vip.rpc.call(identity, 'get_current_weather', - locations).get(timeout=30) + query_data = query_agent.vip.rpc.call(identity, 'get_current_weather', locations).get(timeout=30) for record in query_data: error = record.get("weather_error") - assert error.startswith("Invalid location format.") or error.startswith( - "Invalid location") + assert error.startswith("Invalid location format.") or error.startswith("Invalid location") assert record.get("weather_results") is None @@ -212,25 +204,20 @@ def test_success_forecast(cleanup_cache, weather, query_agent, locations): :param weather: instance of weather service to be tested :param query_agent: agent to leverage to use RPC calls """ - query_data = query_agent.vip.rpc.call(identity, 'get_hourly_forecast', - locations, hours=2).get(timeout=30) + query_data = query_agent.vip.rpc.call(identity, 'get_hourly_forecast', locations, hours=2).get(timeout=30) # print(query_data) assert len(query_data) == len(locations) for x in range(0, len(query_data)): location_data = query_data[x] assert (location_data.get("lat") and location_data.get("long")) or \ - (location_data.get("wfo") and location_data.get( - "x") and location_data.get("y")) + (location_data.get("wfo") and location_data.get("x") and location_data.get("y")) results = location_data.get("weather_results") error = location_data.get("weather_error") if error and not results: if error.startswith("Remote API returned no data") \ - or error.startswith("Remote API redirected request, but " - "redirect failed") \ - or error.startswith("Remote API returned invalid " - "response") \ - or error.startswith("API request failed with " - "unexpected response"): + or error.startswith("Remote API redirected request, but redirect failed") \ + or error.startswith("Remote API returned invalid response") \ + or error.startswith("API request failed with unexpected response"): assert True else: assert False @@ -240,9 +227,7 @@ def test_success_forecast(cleanup_cache, weather, query_agent, locations): forecast_time = utils.parse_timestamp_string(record[0]) assert isinstance(forecast_time, datetime) - cache_data = query_agent.vip.rpc.call(identity, 'get_hourly_forecast', - locations, - hours=2).get(timeout=30) + cache_data = query_agent.vip.rpc.call(identity, 'get_hourly_forecast', locations, hours=2).get(timeout=30) assert len(cache_data) == len(query_data) for x in range(0, len(cache_data)): query_location_data = query_data[x] @@ -250,14 +235,10 @@ def test_success_forecast(cleanup_cache, weather, query_agent, locations): assert cache_location_data.get( "generation_time") == query_location_data.get("generation_time") if cache_location_data.get("lat") and cache_location_data.get("long"): - assert cache_location_data.get("lat") == query_location_data.get( - "lat") - assert cache_location_data.get("long") == query_location_data.get( - "long") - elif cache_location_data.get("wfo") and cache_location_data.get( - "x") and cache_location_data.get("y"): - assert cache_location_data.get("wfo") == query_location_data.get( - "wfo") + assert cache_location_data.get("lat") == query_location_data.get("lat") + assert cache_location_data.get("long") == query_location_data.get("long") + elif cache_location_data.get("wfo") and cache_location_data.get("x") and cache_location_data.get("y"): + assert cache_location_data.get("wfo") == query_location_data.get("wfo") assert cache_location_data.get("x") == query_location_data.get("x") assert cache_location_data.get("y") == query_location_data.get("y") else: @@ -277,12 +258,9 @@ def test_success_forecast(cleanup_cache, weather, query_agent, locations): else: results = cache_location_data.get("weather_error") if results.startswith("Remote API returned no data") \ - or results.startswith("Remote API redirected request, but " - "redirect failed") \ - or results.startswith("Remote API returned invalid " - "response") \ - or results.startswith("API request failed with unexpected " - "response"): + or results.startswith("Remote API redirected request, but redirect failed") \ + or results.startswith("Remote API returned invalid response") \ + or results.startswith("API request failed with unexpected response"): assert True else: assert False @@ -297,8 +275,7 @@ def test_success_forecast(cleanup_cache, weather, query_agent, locations): ]) def test_hourly_forecast_fail(weather, query_agent, locations): - query_data = query_agent.vip.rpc.call(identity, 'get_hourly_forecast', - locations).get(timeout=30) + query_data = query_agent.vip.rpc.call(identity, 'get_hourly_forecast', locations).get(timeout=30) for record in query_data: error = record.get("weather_error") if error.startswith("Invalid location format."): @@ -322,18 +299,24 @@ def test_hourly_forecast_fail(weather, query_agent, locations): ['weather/poll/current/KLAX', 'weather/poll/current/KABQ']), ]) -def test_polling_locations_valid_config(volttron_instance, query_agent, config, - result_topics): +def test_polling_locations_valid_config(volttron_instance, query_agent, cleanup_cache, config, result_topics): agent_uuid = None - query_agent.poll_callback.reset_mock() try: agent_uuid = volttron_instance.install_agent( vip_identity="poll.weather", agent_dir=get_services_core("WeatherDotGov"), - start=False, + start=True, config_file=config) - volttron_instance.start_agent(agent_uuid) - gevent.sleep(3) + + # wait for the agent to start up + gevent.sleep(1) + + # make sure we don't have any existing callback args + query_agent.poll_callback.reset_mock() + + # wait for the duration of the update interval + gevent.sleep(config.get("poll_interval")) + print(query_agent.poll_callback.call_args_list) assert len(result_topics) == query_agent.poll_callback.call_count assert "poll.weather" == query_agent.poll_callback.call_args[0][1] @@ -353,9 +336,39 @@ def test_polling_locations_valid_config(volttron_instance, query_agent, config, assert len(results1) == len(config["poll_locations"]) i = i + 1 assert query_agent.vip.rpc.call( - "poll.weather", "health.get_status").get(timeout=10).get( - 'status') == STATUS_GOOD + "poll.weather", "health.get_status").get(timeout=10).get('status') == STATUS_GOOD finally: if agent_uuid: volttron_instance.stop_agent(agent_uuid) volttron_instance.remove_agent(agent_uuid) + + +@pytest.mark.weather2 +def test_default_config(volttron_instance, cleanup_cache, query_agent): + """ + Test the default configuration file included with the agent + """ + locations = [{"station": "KLAX"}] + publish_agent = volttron_instance.build_agent(identity="test_agent") + gevent.sleep(1) + + config_path = os.path.join(get_services_core("WeatherDotGov"), "config") + with open(config_path, "r") as config_file: + config_json = json.load(config_file) + assert isinstance(config_json, dict) + volttron_instance.install_agent( + agent_dir=get_services_core("WeatherDotGov"), + config_file=config_json, + start=True, + vip_identity="health_test") + assert publish_agent.vip.rpc.call("health_test", "health.get_status").get(timeout=10).get('status') == STATUS_GOOD + + query_data = query_agent.vip.rpc.call("health_test", 'get_current_weather', locations).get(timeout=30) + print(query_data) + assert len(query_data) == len(locations) + for record in query_data: + # check format here + assert record.get("observation_time") + assert record.get("station") + # check weather error message + results = record.get("weather_results") diff --git a/services/core/WeatherDotGov/weatherdotgov/agent.py b/services/core/WeatherDotGov/weatherdotgov/agent.py index 91b8b7a6df..902cd82ec2 100644 --- a/services/core/WeatherDotGov/weatherdotgov/agent.py +++ b/services/core/WeatherDotGov/weatherdotgov/agent.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- {{{ # vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et: # -# Copyright 2019, Battelle Memorial Institute. +# Copyright 2020, Battelle Memorial Institute. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/services/ops/AgentWatcher/README.md b/services/ops/AgentWatcher/README.md new file mode 100644 index 0000000000..f0dcbabd76 --- /dev/null +++ b/services/ops/AgentWatcher/README.md @@ -0,0 +1,37 @@ +## Agent Watcher + +The Agent Watcher is used to monitor agents running on a VOLTTRON instance. Specifically it monitors whether a set of +VIP identities (peers) are connected to the instance. If any of the peers in the set are not present then an alert will +be sent. + +### Configuration + +The agent has two configuration values: + +* watchlist: a list of VIP identities to watch on the platform instance +* check-period: interval in seconds between the agent watcher checking the platform peerlist and publishing alerts + +``` +{ + "watchlist": [ + "platform.driver", + "platform.actuator" + ], + "check-period": 10 +} +``` + + +### Example Publish + +The following is an example publish from a platform with an instance of the Platform Driver installed but not running. + +``` +Peer: pubsub +Sender: watcheragent-0.1_1 +Bus: +Topic: alerts/AgentWatcher/james_watcheragent-0_1_1 +Headers: {'alert_key': 'AgentWatcher', 'min_compatible_version': '3.0', 'max_compatible_version': ''} +Message: ('{"status": "BAD", "context": "Agent(s) expected but but not running ' + '[\'platform.driver\']", "last_updated": "2021-01-25T23:25:43.065109+00:00"}') +``` diff --git a/services/ops/AgentWatcher/README.rst b/services/ops/AgentWatcher/README.rst deleted file mode 100644 index 81fa841c05..0000000000 --- a/services/ops/AgentWatcher/README.rst +++ /dev/null @@ -1,10 +0,0 @@ -.. _Agent_Watcher - -============= -Agent Watcher -============= - -The Agent Watcher is used to monitor agents running on a VOLTTRON instance. -Specifically it monitors whether a set of VIP identities (peers) are connected -to the instance. If any of the peers in the set are not present then an alert -will be sent. \ No newline at end of file diff --git a/services/ops/AgentWatcher/config b/services/ops/AgentWatcher/config index 6b360ba94b..66a4b62024 100644 --- a/services/ops/AgentWatcher/config +++ b/services/ops/AgentWatcher/config @@ -1,12 +1,7 @@ { - # AgentWatcher will send an alert if any of the following - # VIP identities are not running on the platform "watchlist": [ "platform.driver", - "platform.historian" - ] - - # Time to wait between agent running checks - # defaults to 10 seconds. - # "check-period": 10 + "platform.actuator" + ], + "check-period": 10 } diff --git a/services/ops/AgentWatcher/setup.py b/services/ops/AgentWatcher/setup.py index eaf3948ea7..cc64769bff 100644 --- a/services/ops/AgentWatcher/setup.py +++ b/services/ops/AgentWatcher/setup.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- {{{ # vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et: # -# Copyright 2019, Battelle Memorial Institute. +# Copyright 2020, Battelle Memorial Institute. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/services/ops/AgentWatcher/tests/test_agent_watcher.py b/services/ops/AgentWatcher/tests/test_agent_watcher.py index 5c8ff17244..daa4c3cb36 100644 --- a/services/ops/AgentWatcher/tests/test_agent_watcher.py +++ b/services/ops/AgentWatcher/tests/test_agent_watcher.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- {{{ # vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et: # -# Copyright 2019, Battelle Memorial Institute. +# Copyright 2020, Battelle Memorial Institute. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -36,11 +36,13 @@ # under Contract DE-AC05-76RL01830 # }}} +import os +import json import pytest import gevent - from volttron.platform import get_ops, get_examples, jsonapi +from volttron.platform.messaging.health import STATUS_GOOD WATCHER_CONFIG = { "watchlist": ["listener"], @@ -50,6 +52,7 @@ alert_messages = {} listener_uuid = None + @pytest.fixture(scope='module') def platform(request, volttron_instance): global listener_uuid @@ -84,6 +87,10 @@ def onmessage(peer, sender, bus, topic, headers, message): def stop(): volttron_instance.stop_agent(listener_uuid) volttron_instance.stop_agent(watcher_uuid) + + volttron_instance.remove_agent(listener_uuid) + volttron_instance.remove_agent(watcher_uuid) + agent.core.stop() alert_messages.clear() @@ -108,3 +115,43 @@ def test_agent_watcher(platform): gevent.sleep(2) assert not alert_messages + + +def test_default_config(platform): + """ + Test the default configuration file included with the agent + """ + publish_agent = platform.build_agent(identity="test_agent") + gevent.sleep(1) + + config_path = os.path.join(get_ops("AgentWatcher"), "config") + with open(config_path, "r") as config_file: + config_json = json.load(config_file) + assert isinstance(config_json, dict) + + assert 'watchlist' in config_json and 'check-period' in config_json + assert isinstance(config_json.get('watchlist'), list) and ( + isinstance(config_json.get('check-period'), int) or isinstance(config_json.get('check-period'), float)) + if len(config_json.get('watchlist')) > 0: + for watch in config_json.get('watchlist'): + assert isinstance(watch, str) + + platform.install_agent( + agent_dir=get_ops("AgentWatcher"), + config_file=config_json, + start=True, + vip_identity="health_test") + + gevent.sleep(2) + + if len(config_json.get('watchlist')) > 0: + assert f"Agent(s) expected but but not running {config_json.get('watchlist')}" in alert_messages + else: + assert not alert_messages + + assert publish_agent.vip.rpc.call("health_test", "health.get_status").get(timeout=10).get('status') == STATUS_GOOD + + publish_agent.core.stop() + + gevent.sleep(2) + assert alert_messages diff --git a/services/ops/AgentWatcher/watcher/agent.py b/services/ops/AgentWatcher/watcher/agent.py index 5e23da85b1..e9675ff5ab 100644 --- a/services/ops/AgentWatcher/watcher/agent.py +++ b/services/ops/AgentWatcher/watcher/agent.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- {{{ # vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et: # -# Copyright 2019, Battelle Memorial Institute. +# Copyright 2020, Battelle Memorial Institute. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/services/ops/EmailerAgent/README.md b/services/ops/EmailerAgent/README.md new file mode 100644 index 0000000000..9989166d5a --- /dev/null +++ b/services/ops/EmailerAgent/README.md @@ -0,0 +1,105 @@ +Emailer +======= + +The Emailer agent allows an instance of the VOLTTRON platform to send +email. When used in combination with the Alert agent, alerts from +unpublished configured devices will automatically be sent. In addition, +agents are able to send emails directly through the pub/sub interface. + +Agents needing to send an email through the instance can do so by +sending the following header and message to the `platform/send_email` topic +which is monitored by the Emailer agent. The following +is the expected payload for the message body and the optional header. + + +Optional Headers +---------------- + +Emails by default will be sent to the initial configured email +addresses. The below headers will overwrite those properties for the +current email being sent. + +``` json +{ + "from-address": 'foo@bar.com', + "to-addresses": ['alpha.beta@foo.com', 'bob-and-joe@bar.com'] +} +``` + + +Required Message Body +--------------------- + +``` json +{ + "subject": "I am a happy camper", + "message": "This is a big long string message that I am sending" +} +``` + + +Example Sending of Email +------------------------ + +``` python +headers = { + "from-address": 'foo@bar.com', + "to-addresses": ['alpha.beta@foo.com', 'bob-and-joe@bar.com'] +} + +message = { + "subject": "I am a happy camper", + "message": "This is a big long string message that I am sending" +} + +self.vip.pubsub.publish('pubsub', topic='platform/send_email', + headers=headers, message=message) +``` + + +Configuration Options +--------------------- + +The following JSON configuration file shows all the options currently +supported by the Forward Historian agent. + +``` python +{ + # The smtp-address (Simple Mail Transfer Protocol) to ship the email + # from (the "from-address" to each of the recipients). + "smtp-address": "smtp.example.com", + + # The smtp-username is to provide the username of the SMTP server + # which is being used for sending the messages. + "smtp-username":"", + + # The smtp-password is to provide the password of the SMTP server + # corresponding to the username which is being used for sending the messages. + "smtp-password":"", + + # The smtp-port is to provide the port of the SMTP server. + "smtp-port":"", + + # The smtp-tls yes or no if we want to use TLS. + "smtp-tls":, + + # The sending address of the email. This value will be listed in the + # FROM attributed of the message envelop. It will also be show in the + # reply of the message when a recipient chooses reply from their + # email client. + "from-address": "no-reply@example.com", + + # A list of default email addresses for sending alerts to. Each + # address will be sent a copy of the email as if from a mailing list. + "to-addresses": [ + "admin1@example.com" + ], + + # When an alert is sent typically it can have the effect of being + # sent many times. This setting throttles the sending of email only + # after a specific number of minutes. + # + # DEFAULT: "allow-frequency-minutes": 60 + "allow-frequency-minutes": 120 +} +``` diff --git a/services/ops/EmailerAgent/README.rst b/services/ops/EmailerAgent/README.rst deleted file mode 100644 index f309ec93ee..0000000000 --- a/services/ops/EmailerAgent/README.rst +++ /dev/null @@ -1,103 +0,0 @@ -.. _Emailer - -======= -Emailer -======= - -The Emailer agent allows an instance of the VOLTTRON platform to send email. -When used in combination with the AlertAgent agent alerts from unpublished -configured devices will automatically be sent. In addition, agents -are able to send emails directly through the pubsub interface. - -Agents needing to send an email through the instance can do so by sending the -following header and message to the emailer topic. The emailer monitors the -'platform/send_email' topic. The following is the expected payload for the -message body and the optional header. - -Optional Headers -~~~~~~~~~~~~~~~ - -Emails by default will be sent to the initial configured email addresses. The -below headers will overwrite those properties for the current email being sent. - -.. code-block:: python - - { - "from-address": 'foo@bar.com', - "to-addresses": ['alpha.beta@foo.com', 'bob-and-joe@bar.com'] - } - -Required Message Body -~~~~~~~~~~~~~~~~~~~~ - -.. code-block:: python - - { - "subject": "I am a happy camper", - "message": "This is a big long string message that I am sending" - } - -Example Sending of Email -~~~~~~~~~~~~~~~~~~~~~~~ - -.. code-block:: python - - headers = { - "from-address": 'foo@bar.com', - "to-addresses": ['alpha.beta@foo.com', 'bob-and-joe@bar.com'] - } - - message = { - "subject": "I am a happy camper", - "message": "This is a big long string message that I am sending" - } - - self.vip.pubsub.publish('pubsub', topic='platform/send_email', - headers=headers, message=message) - -Configuration Options ---------------------- - -The following JSON configuration file shows all the options currently supported -by the ForwardHistorian agent. - -.. code-block:: python - - { - # The smtp-address (Simple Mail Transfer Protocol) to ship the email - # from the "from-address" to each of the recipients. - "smtp-address": "smtp.example.com", - - # The smtp-username is to provide the username of the SMTP server - # which is being used for sending the messages. - "smtp-username":"", - - # The smtp-password is to provide the password of the SMTP server - # corresponding to the username which is being used for sending the messages. - "smtp-password":"", - - # The smtp-port is to provide the port of the SMTP server. - "smtp-port":"", - - # The smtp-tls yes or no if we want to use TLS. - "smtp-tls":, - - # The sending address of the email. This value will be listed in the - # FROM attributed of the message envelop. It will also be show in the - # reply of the message when a recipient chooses reply from their - # email client. - "from-address": "no-reply@example.com", - - # A list of default email addresses for sending alerts to. Each - # address will be sent a copy of the email as if from a mailing list. - "to-addresses": [ - "admin1@example.com" - ], - - # When an alert is sent typically it can have the effect of being - # sent many times. This setting throttles the sending of email only - # after a specific number of minutes. - # - # DEFAULT: "allow-frequency-minutes": 60 - "allow-frequency-minutes": 120 - } \ No newline at end of file diff --git a/services/ops/EmailerAgent/emailer/agent.py b/services/ops/EmailerAgent/emailer/agent.py index c02f53dc8e..6dab6ef69c 100644 --- a/services/ops/EmailerAgent/emailer/agent.py +++ b/services/ops/EmailerAgent/emailer/agent.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- {{{ # vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et: # -# Copyright 2019, Battelle Memorial Institute. +# Copyright 2020, Battelle Memorial Institute. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -319,8 +319,7 @@ def on_alert_message(self, peer, sender, bus, topic, headers, message): :param message: """ if not self.current_config.get('send_alerts_enabled'): - _log.warn('Alert message found but not sent enable alerts ' - 'enable by setting send_alerts_enabled to True') + _log.warning('Alert message found but not sent enable alerts enable by setting send_alerts_enabled to True') return mailkey = headers.get(ALERT_KEY, None) diff --git a/services/ops/EmailerAgent/setup.py b/services/ops/EmailerAgent/setup.py index 74e2c8eafa..c309604c50 100644 --- a/services/ops/EmailerAgent/setup.py +++ b/services/ops/EmailerAgent/setup.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- {{{ # vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et: # -# Copyright 2019, Battelle Memorial Institute. +# Copyright 2020, Battelle Memorial Institute. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/services/ops/FailoverAgent/README.md b/services/ops/FailoverAgent/README.md new file mode 100644 index 0000000000..418304ab64 --- /dev/null +++ b/services/ops/FailoverAgent/README.md @@ -0,0 +1,68 @@ +## Failover Agent + +The Failover agent provides a generic high availability option to VOLTTRON. When the primary platform becomes +inactive the secondary platform will start an installed agent. + + +### Standard Failover + +There are two behavior patterns implemented in the agent. In the default +configuration, the secondary instance will ask Volttron Central to verify +that the primary instance is down. This helps to avoid a split brain scenario. +If neither Volttron Central nor the other Failover instance is reachable +then the Failover agent will stop the agent it is managing. + +The following tables show the expected state of the configured agent on corresponding platform instance, given the state +of the agent on the other platform instance. + +**Behavior of Primary Instance** + +| | VC Up | VC Down | +|-----------------|-------|---------| +| Secondary Up | start | start | +| Secondary Down | start | stop | + +**Behavior of Secondary Instance** + +| | VC Up | VC Down | +|--------------|-----------------|---------| +| Primary Up | stop | stop | +| Primary Down | Verify with VC before starting | stop | + + +### Simple Failover + +There is also a *simple* configuration available that does not involve +coordination with Volttron Central. The secondary agent will start its managed +agent if believes the primary to be inactive. The simple primary always has its +managed agent started. + + +### Configuration + +Failover behavior is set in the failover agent's configuration file. Example +primary and secondary configuration files are shown below. + +``` +{ | { + "agent_id": "primary", | "agent_id": "secondary", + "simple_behavior": true, | "simple_behavior": true, + | + "remote_vip": "tcp://127.0.0.1:8001", | "remote_vip": "tcp://127.0.0.1:8000", + "remote_serverkey": "", | "remote_serverkey": "", + | + "agent_vip_identity": "platform.driver",| "agent_vip_identity": "platform.driver", + | + "heartbeat_period": 10, | "heartbeat_period": 10, + | + "timeout": 120 | "timeout": 120 +} | } +``` + +- **agent_id** - primary **or** secondary +- **simple_behavior** - Switch to turn on or off simple behavior. Both instances should match. +- **remote_vip** - Address where *remote_id* can be reached. +- **remote_serverkey** - The public key of the platform where *remote_id* lives. +- **agent_vip_identity** - The :term:`VIP Identity` of the agent that we want to manage. +- **heartbeat_period** - Send a message to *remote_id* with this period. Measured in seconds. +- **timeout** - Consider a platform inactive if a heartbeat has not been received for *timeout* seconds. diff --git a/services/ops/FailoverAgent/failover/agent.py b/services/ops/FailoverAgent/failover/agent.py index 5ef50858c9..a5d233baf4 100644 --- a/services/ops/FailoverAgent/failover/agent.py +++ b/services/ops/FailoverAgent/failover/agent.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- {{{ # vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et: # -# Copyright 2019, Battelle Memorial Institute. +# Copyright 2020, Battelle Memorial Institute. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -226,7 +226,7 @@ def simple_primary_state_machine(self, current_state): if current_state != self._state: context = 'Starting agent {}'.format(self.agent_vip_identity) self._state = current_state - _log.warn(context) + _log.warning(context) status = Status.build(STATUS_GOOD, context=context) self.vip.health.send_alert(alert_key, status) @@ -234,8 +234,8 @@ def simple_primary_state_machine(self, current_state): 'agent_status', self.agent_uuid).get() - is_running = proc_info[0] > 0 and proc_info[1] == None - if not is_running: + is_not_running = proc_info[0] is None and proc_info[1] is None + if is_not_running: self._agent_control('start_agent') def simple_secondary_state_machine(self, current_state): @@ -256,7 +256,7 @@ def simple_secondary_state_machine(self, current_state): self.agent_vip_identity) if current_state != self._state: self._state = current_state - _log.warn(context) + _log.warning(context) status = Status.build(STATUS_GOOD, context=context) self.vip.health.send_alert(alert_key, status) @@ -267,15 +267,26 @@ def simple_secondary_state_machine(self, current_state): self.agent_vip_identity) if current_state != self._state: self._state = current_state - _log.warn(context) + _log.warning(context) status = Status.build(STATUS_BAD, context=context) self.vip.health.send_alert(alert_key, status) + agents = self.vip.rpc.call(CONTROL, 'list_agents').get() + _log.info(f"simple_secondary_state_machine List agents: {self.agent_uuid}, {agents}") + + agents_stats = self.vip.rpc.call(CONTROL, 'status_agents').get() + _log.info(f"simple_secondary_state_machine Agent stats: {self.agent_uuid}, {agents_stats}") + proc_info = self.vip.rpc.call(CONTROL, 'agent_status', self.agent_uuid).get() - is_running = proc_info[0] > 0 and proc_info[1] == None - if not is_running: + + _log.info(f"simple_secondary_state_machine: {self.agent_uuid}, {proc_info}") + + is_not_running = proc_info[0] is None and proc_info[1] is None + + if is_not_running: + _log.info(f"simple_secondary_state_machine, starting agent: {self.agent_uuid}") self._agent_control('start_agent') diff --git a/services/ops/FailoverAgent/setup.py b/services/ops/FailoverAgent/setup.py index eaf3948ea7..cc64769bff 100644 --- a/services/ops/FailoverAgent/setup.py +++ b/services/ops/FailoverAgent/setup.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- {{{ # vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et: # -# Copyright 2019, Battelle Memorial Institute. +# Copyright 2020, Battelle Memorial Institute. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/services/ops/FailoverAgent/tests/test_simple_failover.py b/services/ops/FailoverAgent/tests/test_simple_failover.py index 74538b1a00..0100230d19 100644 --- a/services/ops/FailoverAgent/tests/test_simple_failover.py +++ b/services/ops/FailoverAgent/tests/test_simple_failover.py @@ -31,7 +31,7 @@ uuid_primary = None uuid_secondary = None listener_primary = None - +listener_secondary = None def all_agents_running(instance): agents = instance.list_agents() @@ -46,6 +46,8 @@ def simple_failover(request, get_volttron_instances): global uuid_primary global uuid_secondary global listener_primary + global listener_secondary + primary, secondary = get_volttron_instances(2) @@ -79,8 +81,10 @@ def simple_failover(request, get_volttron_instances): config_file=simple_secondary_config) gevent.sleep(SLEEP_TIME) + assert all_agents_running(primary) assert not all_agents_running(secondary) + assert not secondary.is_agent_running(listener_secondary) def cleanup(): primary.stop_agent(uuid_primary) @@ -97,6 +101,7 @@ def cleanup(): def test_simple_failover(simple_failover): global uuid_primary + global listener_secondary alert_messages = {} primary, secondary = simple_failover @@ -110,6 +115,7 @@ def onmessage(peer, sender, bus, topic, headers, message): except KeyError: alert_messages[alert] = 1 + assert not secondary.is_agent_running(listener_secondary) listen1 = primary.build_agent() listen1.vip.pubsub.subscribe(peer='pubsub', prefix='alert', @@ -120,6 +126,7 @@ def onmessage(peer, sender, bus, topic, headers, message): prefix='alert', callback=onmessage).get() + assert not secondary.is_agent_running(listener_secondary) # make sure the secondary will take over primary.stop_agent(uuid_primary) gevent.sleep(SLEEP_TIME) @@ -180,7 +187,7 @@ def test_secondary_on_primary_crash(simple_failover): def test_can_handle_agent_upgrade(simple_failover): global listener_primary primary, secondary = simple_failover - + primary.remove_agent(listener_primary) listener_primary = primary.install_agent(agent_dir=get_examples("ListenerAgent"), vip_identity="listener", diff --git a/services/ops/FileWatchPublisher/README.md b/services/ops/FileWatchPublisher/README.md new file mode 100644 index 0000000000..9cf682c762 --- /dev/null +++ b/services/ops/FileWatchPublisher/README.md @@ -0,0 +1,50 @@ +## File Watch Publisher Agent + +The File Watch Publisher agent watches files listed in its configuration for changes. The agent will detect changes to +those files and publish those changes line-by-line on the topic the user has associated with the file in the +configuration. + +The user should be careful about what files are being watched, and which historians are being used with the +File Watch Publisher. Very long lines being output in individual messages on the message bus can result in some +performance degradation. Some configurations of the File Watch Publisher can affect the system (such as using I/O +resources when a fast-moving log is being captured in a SQLite Historian), so the user should be intentional about which +files the agent is configured to watch and the topics used for publishes. + + +### Example Usage + +The user wants to record logging information from the "myservice" service into a historian agent. + +The user can configure the File Watch Publisher to point at the "myservice.log" file with a corresponding "record" +topic - for example "record/myservice/logs". As "myservice" adds logging entries to its log file, the File Watch +Publisher will capture each new log message and publish it to the "record/myservice/logs" topics on the message bus. + +Below is a File Watch Publisher example configuration to match the above scenario. + + +#### Configuration + +```json +{ + "files": [ + { + "file": "/opt/myservice/logs/myservice.log", + "topic": "record/myservice/logs" + } + ] +} +``` + + +### Example Publish + +The following is an example publish by the File Watch Publisher installed with the above configuration. + +``` +Peer: pubsub +Sender: platform.filewatchpublisher1 +Bus: +Topic: record/myservice/logs +Headers: {'min_compatible_version': '3.0', 'max_compatible_version': ''} +Message: {'line': 'test text', 'timestamp': '2021-01-25T22:54:43.474352Z'} +``` diff --git a/services/ops/FileWatchPublisher/Tests/test_file_watcher.py b/services/ops/FileWatchPublisher/Tests/test_file_watcher.py new file mode 100644 index 0000000000..8e5493c5f3 --- /dev/null +++ b/services/ops/FileWatchPublisher/Tests/test_file_watcher.py @@ -0,0 +1,115 @@ +# -*- coding: utf-8 -*- {{{ +# vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et: +# +# Copyright 2020, Battelle Memorial Institute. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# This material was prepared as an account of work sponsored by an agency of +# the United States Government. Neither the United States Government nor the +# United States Department of Energy, nor Battelle, nor any of their +# employees, nor any jurisdiction or organization that has cooperated in the +# development of these materials, makes any warranty, express or +# implied, or assumes any legal liability or responsibility for the accuracy, +# completeness, or usefulness or any information, apparatus, product, +# software, or process disclosed, or represents that its use would not infringe +# privately owned rights. Reference herein to any specific commercial product, +# process, or service by trade name, trademark, manufacturer, or otherwise +# does not necessarily constitute or imply its endorsement, recommendation, or +# favoring by the United States Government or any agency thereof, or +# Battelle Memorial Institute. The views and opinions of authors expressed +# herein do not necessarily state or reflect those of the +# United States Government or any agency thereof. +# +# PACIFIC NORTHWEST NATIONAL LABORATORY operated by +# BATTELLE for the UNITED STATES DEPARTMENT OF ENERGY +# under Contract DE-AC05-76RL01830 +# }}} + +import os +import gevent +import json +import pytest +from mock import MagicMock + +from volttron.platform.messaging.health import STATUS_GOOD +from volttron.platform import get_ops, get_home +from volttron.platform.vip.agent import Agent + +test_path = os.path.join(get_home(), "test.txt") + +test_config = { + "files": [ + { + "file": test_path, + "topic": "platform/test_topic" + } + ] +} + + +@pytest.fixture(scope="module") +def publish_agent(request, volttron_instance): + # 1: Start a fake agent to publish to message bus + agent = volttron_instance.build_agent(identity='test-agent') + + with open(test_path, "w") as textfile: + textfile.write("test_data") + + agent.callback = MagicMock(name="callback") + agent.callback.reset_mock() + + agent.vip.pubsub.subscribe(peer='pubsub', prefix="platform/test_topic", callback=agent.callback).get() + + def stop_agent(): + print("In teardown method of publish_agent") + if isinstance(agent, Agent): + agent.core.stop() + os.remove(test_path) + + request.addfinalizer(stop_agent) + return agent + + +def test_default_config(volttron_instance, publish_agent): + """ + Test the default configuration file included with the agent + """ + config_path = os.path.join(get_ops("FileWatchPublisher"), "filewatchpublisher.config") + with open(config_path, "r") as config_file: + config_json = json.load(config_file) + assert isinstance(config_json, dict) + watcher_uuid = volttron_instance.install_agent( + agent_dir=get_ops("FileWatchPublisher"), + config_file=config_json, + start=True, + vip_identity="health_test") + assert publish_agent.vip.rpc.call("health_test", "health.get_status").get(timeout=10).get('status') == STATUS_GOOD + volttron_instance.remove_agent(watcher_uuid) + + +# def test_file_watcher(volttron_instance, publish_agent): +# watcher_uuid = volttron_instance.install_agent( +# agent_dir=get_ops("FileWatchPublisher"), +# config_file=test_config, +# start=True, +# vip_identity="health_test") +# +# with open(test_path, "w+") as textfile: +# textfile.write("more test_data") +# +# gevent.sleep(1) +# +# assert publish_agent.callback.call_count == 1 +# print(publish_agent.callback.call_args) +# volttron_instance.remove_agent(watcher_uuid) diff --git a/services/ops/FileWatchPublisher/conftest.py b/services/ops/FileWatchPublisher/conftest.py new file mode 100644 index 0000000000..68e5e611b1 --- /dev/null +++ b/services/ops/FileWatchPublisher/conftest.py @@ -0,0 +1,6 @@ +import sys + +from volttrontesting.fixtures.volttron_platform_fixtures import * + +# Add system path of the agent's directory +sys.path.insert(0, os.path.abspath(os.path.dirname(__file__))) diff --git a/services/ops/FileWatchPublisher/filewatchpublisher.config b/services/ops/FileWatchPublisher/filewatchpublisher.config index 320a2f856e..f52095037e 100644 --- a/services/ops/FileWatchPublisher/filewatchpublisher.config +++ b/services/ops/FileWatchPublisher/filewatchpublisher.config @@ -1,10 +1,12 @@ -[ - { - "file": "/var/log/syslog", - "topic": "platform/syslog" - }, - { - "file": "/home/volttron/tempfile.txt", - "topic": "temp/filepublisher" - } -] +{ + "files": [ + { + "file": "/opt/myservice/logs/myservice.log", + "topic": "record/myservice/logs" + }, + { + "file": "/home/volttron/tempfile.txt", + "topic": "temp/filepublisher" + } + ] +} diff --git a/services/ops/FileWatchPublisher/filewatchpublisher/agent.py b/services/ops/FileWatchPublisher/filewatchpublisher/agent.py index 018076dff5..ef990f53b4 100644 --- a/services/ops/FileWatchPublisher/filewatchpublisher/agent.py +++ b/services/ops/FileWatchPublisher/filewatchpublisher/agent.py @@ -4,7 +4,7 @@ # -*- coding: utf-8 -*- {{{ # vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et: # -# Copyright 2019, Battelle Memorial Institute. +# Copyright 2020, Battelle Memorial Institute. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -47,7 +47,7 @@ from datetime import datetime from volttron.platform.agent.utils import watch_file_with_fullpath -from volttron.platform.vip.agent import Agent, RPC, Core +from volttron.platform.vip.agent import Agent, Core from volttron.platform.agent import utils @@ -57,11 +57,10 @@ def file_watch_publisher(config_path, **kwargs): - """Load the FileWatchPublisher agent configuration and returns and instance + """ + Load the FileWatchPublisher agent configuration and returns and instance of the agent created using that configuration. - :param config_path: Path to a configuration file. - :type config_path: str :returns: FileWatchPublisher agent instance :rtype: FileWatchPublisher agent @@ -71,12 +70,10 @@ def file_watch_publisher(config_path, **kwargs): class FileWatchPublisher(Agent): - """Monitors files from configuration for changes and - publishes added lines on corresponding topics. - Ignores if a file does not exist and move to next file - in configuration with an error message. + """ + Monitors files from configuration for changes and publishes added lines on corresponding topics. + Ignores if a file does not exist and move to next file in configuration with an error message. Exists if all files does not exist. - :param config: Configuration dict :type config: dict @@ -85,7 +82,7 @@ class FileWatchPublisher(Agent): .. code-block:: python { - "publish_file": [ + "publish_file": [ { "file": "/var/log/syslog", "topic": "platform/syslog", @@ -94,17 +91,18 @@ class FileWatchPublisher(Agent): "file": "/home/volttron/tempfile.txt", "topic": "temp/filepublisher", } - ] + ] } """ def __init__(self, config, **kwargs): super(FileWatchPublisher, self).__init__(**kwargs) self.config = config - items = config[:] + items = config.get("files") + assert isinstance(items, list) self.file_topic = {} self.file_end_position = {} - for item in self.config: - file = item["file"] + for item in self.config.get("files"): + file = item["file"] self.file_topic[file] = item["topic"] if os.path.isfile(file): with open(file, 'r') as f: @@ -112,20 +110,20 @@ def __init__(self, config, **kwargs): else: _log.error("File " + file + " does not exists. Ignoring this file.") items.remove(item) - self.config = items + self.files_to_watch = items @Core.receiver('onstart') def starting(self, sender, **kwargs): _log.info("Starting "+self.__class__.__name__+" agent") - if len(self.config) == 0 : + if len(self.files_to_watch) == 0: _log.error("No file to watch and publish. Stopping "+self.__class__.__name__+" agent.") gevent.spawn_later(3, self.core.stop) else: - for item in self.config: + for item in self.files_to_watch: file = item["file"] self.core.spawn(watch_file_with_fullpath, file, self.read_file) - def read_file(self,file): + def read_file(self, file): _log.debug('loading file %s', file) with open(file, 'r') as f: f.seek(self.file_end_position[file]) @@ -137,18 +135,18 @@ def publish_file(self, line, topic): message = {'timestamp': datetime.utcnow().isoformat() + 'Z', 'line': line} _log.debug('publishing message {} on topic {}'.format(message, topic)) - self.vip.pubsub.publish(peer="pubsub", topic=topic, - message=message) + self.vip.pubsub.publish(peer="pubsub", topic=topic, message=message) def get_end_position(self, f): - f.seek(0,2) + f.seek(0, 2) return f.tell() def main(argv=sys.argv): - """Main method called by the platform.""" - utils.vip_main(file_watch_publisher, identity='platform.filewatchpublisher' - , version=__version__) + """ + Main method called by the platform. + """ + utils.vip_main(file_watch_publisher, identity='platform.filewatchpublisher', version=__version__) if __name__ == '__main__': @@ -156,4 +154,4 @@ def main(argv=sys.argv): try: sys.exit(main()) except KeyboardInterrupt: - pass \ No newline at end of file + pass diff --git a/services/ops/FileWatchPublisher/setup.py b/services/ops/FileWatchPublisher/setup.py index df14937761..8567648200 100644 --- a/services/ops/FileWatchPublisher/setup.py +++ b/services/ops/FileWatchPublisher/setup.py @@ -4,7 +4,7 @@ # -*- coding: utf-8 -*- {{{ # vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et: # -# Copyright 2019, Battelle Memorial Institute. +# Copyright 2020, Battelle Memorial Institute. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/services/ops/LogStatisticsAgent/README.md b/services/ops/LogStatisticsAgent/README.md new file mode 100644 index 0000000000..b663023012 --- /dev/null +++ b/services/ops/LogStatisticsAgent/README.md @@ -0,0 +1,45 @@ +## Log Statistics Agent + +The Log Statistics agent periodically reads the "volttron.log" file based on the configured interval, computes the size +delta from the previous hour and publishes the difference in bytes with a timestamp. It also publishes standard +deviation of the size delta every 24 hours. This agent can be useful for detecting unexpected changes to the system +which may be an indication of some sort of failure or breach. + + +### Configuration + +The Log Statistics agent has 4 required configuration values: + +- `file_path`: This should be the path to the "volttron.log" file +- `analysis_interval_secs`: The interval in seconds between publishing the size delta statistic to the message bus +- `publish_topic`: Can be used to specify a topic to publish log statistics to which does not get captured by the + historian framework (topics not prefixed by any of: "datalogger", "record", "analysis", "devices") +- `historian_topic`: Can be used to specify a topic to publish log statistics to which gets captured by the + historian framework ("datalogger", "record", "analysis", "devices") + +The following is an example configuration file: + +```json +{ + "file_path" : "~/volttron/volttron.log", + "analysis_interval_min" : 60, + "publish_topic" : "platform/log_statistics", + "historian_topic" : "record/log_statistics" +} +``` + + +### Periodic Publish + +The Log Statistics agent will run statistics publishes automatically based on the configured intervals. + +The following is an example of a periodic size delta publish: + +``` +Peer: pubsub +Sender: platform.logstatisticsagent1 +Bus: +Topic: platform/log_statistics +Headers: {'min_compatible_version': '3.0', 'max_compatible_version': ''} +Message: {'log_size_delta': 902, 'timestamp': '2021-01-25T22:48:16.924135Z'} +``` diff --git a/services/ops/LogStatisticsAgent/Tests/test_log_statistics.py b/services/ops/LogStatisticsAgent/Tests/test_log_statistics.py new file mode 100644 index 0000000000..8aeeeeee65 --- /dev/null +++ b/services/ops/LogStatisticsAgent/Tests/test_log_statistics.py @@ -0,0 +1,110 @@ +# -*- coding: utf-8 -*- {{{ +# vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et: +# +# Copyright 2020, Battelle Memorial Institute. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# This material was prepared as an account of work sponsored by an agency of +# the United States Government. Neither the United States Government nor the +# United States Department of Energy, nor Battelle, nor any of their +# employees, nor any jurisdiction or organization that has cooperated in the +# development of these materials, makes any warranty, express or +# implied, or assumes any legal liability or responsibility for the accuracy, +# completeness, or usefulness or any information, apparatus, product, +# software, or process disclosed, or represents that its use would not infringe +# privately owned rights. Reference herein to any specific commercial product, +# process, or service by trade name, trademark, manufacturer, or otherwise +# does not necessarily constitute or imply its endorsement, recommendation, or +# favoring by the United States Government or any agency thereof, or +# Battelle Memorial Institute. The views and opinions of authors expressed +# herein do not necessarily state or reflect those of the +# United States Government or any agency thereof. +# +# PACIFIC NORTHWEST NATIONAL LABORATORY operated by +# BATTELLE for the UNITED STATES DEPARTMENT OF ENERGY +# under Contract DE-AC05-76RL01830 +# }}} + +import os +import json +import pytest +from mock import MagicMock + +from volttron.platform.messaging.health import STATUS_GOOD +from volttron.platform.vip.agent import Agent +from volttron.platform import get_ops, get_home + +# TODO fix finding test logs +test_config = { + "file_path": os.path.join(get_home(), "volttron.log"), + "analysis_interval_sec": 2, + "publish_topic": "platform/log_statistics", + "historian_topic": "analysis/log_statistics" +} + + +@pytest.fixture(scope="module") +def publish_agent(request, volttron_instance): + # 1: Start a fake agent to publish to message bus + agent = volttron_instance.build_agent() + + publish_agent.callback = MagicMock(name="callback") + publish_agent.callback.reset_mock() + + agent.vip.pubsub.subscribe(peer='pubsub', prefix=test_config.get("publish_topic"), + callback=publish_agent.callback).get() + + def stop_agent(): + print("In teardown method of publish_agent") + if isinstance(agent, Agent): + agent.core.stop() + + request.addfinalizer(stop_agent) + return agent + + +def test_default_config(volttron_instance, publish_agent): + """ + Test the default configuration file included with the agent + """ + config_path = os.path.join(get_ops("LogStatisticsAgent"), "logstatisticsagent.config") + with open(config_path, "r") as config_file: + config_json = json.load(config_file) + assert isinstance(config_json, dict) + stats_uuid = volttron_instance.install_agent( + agent_dir=get_ops("LogStatisticsAgent"), + config_file=config_json, + start=True, + vip_identity="health_test") + assert publish_agent.vip.rpc.call("health_test", "health.get_status").get(timeout=10).get('status') == STATUS_GOOD + volttron_instance.remove_agent(stats_uuid) + + +# def test_log_stats(volttron_instance, publish_agent): +# stats_uuid = volttron_instance.install_agent( +# agent_dir=get_ops("LogStatisticsAgent"), +# config_file=test_config, +# start=True, +# vip_identity="health_test") +# +# gevent.sleep(1) +# +# # building another agent should populate the logs +# volttron_instance.build_agent(identity="log_populate") +# +# gevent.sleep(2) +# +# # TODO do mock asserts +# +# volttron_instance.remove_agent(stats_uuid) diff --git a/services/ops/LogStatisticsAgent/conftest.py b/services/ops/LogStatisticsAgent/conftest.py new file mode 100644 index 0000000000..68e5e611b1 --- /dev/null +++ b/services/ops/LogStatisticsAgent/conftest.py @@ -0,0 +1,6 @@ +import sys + +from volttrontesting.fixtures.volttron_platform_fixtures import * + +# Add system path of the agent's directory +sys.path.insert(0, os.path.abspath(os.path.dirname(__file__))) diff --git a/services/ops/LogStatisticsAgent/logstatisticsagent.config b/services/ops/LogStatisticsAgent/logstatisticsagent.config index 959105b2c8..176d45ed88 100644 --- a/services/ops/LogStatisticsAgent/logstatisticsagent.config +++ b/services/ops/LogStatisticsAgent/logstatisticsagent.config @@ -1,5 +1,5 @@ { - "file_path" : "/home/volttron/volttron.log", + "file_path" : "~/volttron/volttron.log", "analysis_interval_sec" : 60, "publish_topic" : "platform/log_statistics", "historian_topic" : "analysis/log_statistics" diff --git a/services/ops/LogStatisticsAgent/logstatisticsagent/agent.py b/services/ops/LogStatisticsAgent/logstatisticsagent/agent.py index fe8041c5c5..71e45913a1 100644 --- a/services/ops/LogStatisticsAgent/logstatisticsagent/agent.py +++ b/services/ops/LogStatisticsAgent/logstatisticsagent/agent.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- {{{ # vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et: # -# Copyright 2019, Battelle Memorial Institute. +# Copyright 2020, Battelle Memorial Institute. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -52,11 +52,10 @@ def log_statistics(config_path, **kwargs): - """Load the LogStatisticsAgent agent configuration and returns and instance - of the agent created using that configuration. - + """ + Load the LogStatisticsAgent agent configuration and returns and instance + of the agent created using that configuration. :param config_path: Path to a configuration file. - :type config_path: str :returns: LogStatisticsAgent agent instance :rtype: LogStatisticsAgent agent @@ -67,20 +66,18 @@ def log_statistics(config_path, **kwargs): class LogStatisticsAgent(Agent): """ - LogStatisticsAgent reads volttron.log file size every hour, - compute the size delta from previous hour and publish the difference - with timestamp. It also publishes standard deviation every 24 hours. + LogStatisticsAgent reads volttron.log file size every hour, compute the size delta from previous hour and publish + the difference with timestamp. It also publishes standard deviation every 24 hours. :param config: Configuration dict :type config: dict - Example configuration: .. code-block:: python - { - "file_path" : "/home/volttron/volttron.log", - "analysis_interval_sec" : 60, - "publish_topic" : "platform/log_statistics", - "historian_topic" : "analysis/log_statistics" - } + { + "file_path" : "/home/volttron/volttron.log", + "analysis_interval_sec" : 60, + "publish_topic" : "platform/log_statistics", + "historian_topic" : "analysis/log_statistics" + } """ def __init__(self, config, **kwargs): @@ -101,10 +98,8 @@ def starting(self, sender, **kwargs): def publish_analysis(self): """ - Publishes file's size increment in previous time interval (60 minutes) - with timestamp. - Also publishes standard deviation of file's hourly size differences - every 24 hour. + Publishes file's size increment in previous time interval (60 minutes) with timestamp. + Also publishes standard deviation of file's hourly size differences every 24 hour. """ if self._scheduled_event is not None: self._scheduled_event.cancel() @@ -137,22 +132,19 @@ def publish_analysis(self): _log.debug('publishing message {} with header {} on historian topic {}' .format(historian_message, headers, self.historian_topic)) - self.vip.pubsub.publish(peer="pubsub", topic=self.historian_topic, headers = headers, + self.vip.pubsub.publish(peer="pubsub", topic=self.historian_topic, headers=headers, message=historian_message) self.size_delta_list = [] _log.debug('publishing message {} on topic {}'.format(publish_message, self.publish_topic)) - self.vip.pubsub.publish(peer="pubsub", topic=self.publish_topic, - message=publish_message) + self.vip.pubsub.publish(peer="pubsub", topic=self.publish_topic, message=publish_message) _log.debug('Scheduling next periodic call') now = get_aware_utc_now() - next_update_time = now + datetime.timedelta( - seconds=self.analysis_interval_sec) + next_update_time = now + datetime.timedelta(seconds=self.analysis_interval_sec) - self._scheduled_event = self.core.schedule( - next_update_time, self.publish_analysis) + self._scheduled_event = self.core.schedule(next_update_time, self.publish_analysis) def get_file_size(self): try: @@ -162,7 +154,9 @@ def get_file_size(self): def main(argv=sys.argv): - """Main method called by the platform.""" + """ + Main method called by the platform. + """ utils.vip_main(log_statistics, identity='platform.logstatisticsagent') diff --git a/services/ops/LogStatisticsAgent/setup.py b/services/ops/LogStatisticsAgent/setup.py index 66195f016d..17c11f1f2e 100644 --- a/services/ops/LogStatisticsAgent/setup.py +++ b/services/ops/LogStatisticsAgent/setup.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- {{{ # vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et: # -# Copyright 2019, Battelle Memorial Institute. +# Copyright 2020, Battelle Memorial Institute. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/services/ops/MessageDebuggerAgent/README.md b/services/ops/MessageDebuggerAgent/README.md new file mode 100644 index 0000000000..c5a7560f3d --- /dev/null +++ b/services/ops/MessageDebuggerAgent/README.md @@ -0,0 +1,640 @@ +## Message Debugger Agent + +VOLTTRON agent messages are routed over the VOLTTRON message bus. The Message Debugger Agent provides enhanced +examination of this message stream's contents as an aid to debugging and troubleshooting agents and drivers. + +This feature is implemented to provide visibility into the ZeroMQ message bus. The RabbitMQ message bus includes +methods for message debugging by default in the [RabbitMQ management UI](https://www.rabbitmq.com/management.html). + + +### Enabling the Message Debugger + +In order to use the Message Debugger, two steps are required: + +- VOLTTRON must have been started with a ``--msgdebug`` command line option. +- The Message Debugger Agent must be running. + +When VOLTTRON has been started with ``--msgdebug``, its Router publishes each message +to an IPC socket for which the Message Debugger Agent is a subscriber. This is kept disabled +by default because it consumes a significant quantity of CPU and memory resources, potentially +affecting VOLTTRON timing and performance. So as a general rule, the ``--msgdebug`` option +should be employed during development/debugging only, and should not be left enabled in +a production environment. + +Example of starting VOLTTRON with the ``--msgdebug`` command line option: + + (volttron) ./start-volttron --msgdebug + +If VOLTTRON is running in this mode, the stream of routed messages is available to +a subscribing Message Debugger Agent. It can be started from volttron-ctl in the same +fashion as other agents, for example: + +``` +(volttron) $ vctl status + AGENT IDENTITY TAG STATUS +fd listeneragent-3.2 listener listener +08 messagedebuggeragent-0.1 platform.messagedebugger platform.messagedebugger +e1 vcplatformagent-3.5.4 platform.agent vcp +47 volttroncentralagent-3.5.5 volttron.central vc + +(volttron) $ vctl start 08 +Starting 089c53f0-f225-4608-aecb-3e86e0df30eb messagedebuggeragent-0.1 + +(volttron) $ vctl status + AGENT IDENTITY TAG STATUS +fd listeneragent-3.2 listener listener +08 messagedebuggeragent-0.1 platform.messagedebugger platform.messagedebugger running [43498] +e1 vcplatformagent-3.5.4 platform.agent vcp +47 volttroncentralagent-3.5.5 volttron.central vc +``` + + +### Message Viewer + +The Message Viewer is a separate process that interacts with the Message Debugger Agent +primarily via VOLTTRON RPC calls. These calls allow it to request and report on filtered sets +of message data. + + +The [Message Viewer](https://github.com/VOLTTRON/volttron/blob/main/services/ops/MessageDebuggerAgent/messageviewer/viewer.py) +implements a command-line UI, subclassing Python's ``Cmd`` class. Most of the command-line options that it displays +result in a MessageDebuggerAgent RPC request. The Message Viewer formats and displays the results. + +In Linux, the Message Viewer can be started as follows, and displays the following menu: + +``` +(volttron) $ cd services/ops/MessageDebuggerAgent/messageviewer +(volttron) $ python viewer.py +Welcome to the MessageViewer command line. Supported commands include: + display_message_stream + display_messages + display_exchanges + display_exchange_details + display_session_details_by_agent + display_session_details_by_topic + + list_sessions + set_verbosity + list_filters + set_filter + clear_filters + clear_filter + + start_streaming + stop_streaming + start_session + stop_session + delete_session + delete_database + + help + quit +Please enter a command. +Viewer> +``` + + +### Debug Sessions + +The Message Debugger Agent tags each message with a debug session ID (a serial number), +which groups a set of messages that are bounded by a start time and an end time. The ``list_sessions`` +command describes each session in the database: + +``` +Viewer> list_sessions + rowid start_time end_time num_messages + 1 2017-03-20 17:07:13.867951 - 2243 + 2 2017-03-20 17:17:35.725224 - 1320 + 3 2017-03-20 17:33:35.103204 2017-03-20 17:46:15.657487 12388 +``` + +A new session is started by default when the Agent is started. After that, the ``stop_session`` +and ``start_session`` commands can be used to create new session boundaries. If the Agent is running +but no session is active (i.e., because ``stop_session`` was used to stop it), messages are +still written to the database, but they have no session ID. + + +#### Filtered Display + +The ``set_filter `` command enables filtered display of messages. A variety +of properties can be filtered. + +In the following example, message filters are defined by session_id and sender, and the ``display_messages`` +command displays the results: + +``` +Viewer> set_filter session_id 4 +Set filters to {'session_id': '4'} +Viewer> set_filter sender testagent +Set filters to {'sender': 'testagent', 'session_id': '4'} +Viewer> display_messages + timestamp direction sender recipient request_id subsystem method topic device point result + 11:51:00 incoming testagent messageviewer.connection - RPC pubsub.sync - - - - + 11:51:00 outgoing testagent pubsub - RPC pubsub.push - - - - + 11:51:00 incoming testagent platform.driver 1197886248649056372.284581685 RPC get_point - chargepoint1 Status - + 11:51:01 outgoing testagent platform.driver 1197886248649056372.284581685 RPC - - - - AVAILABLE + 11:51:01 incoming testagent pubsub 1197886248649056373.284581649 RPC pubsub.publish test_topic/test_subtopic - - - + 11:51:01 outgoing testagent pubsub 1197886248649056373.284581649 RPC - - - - None +``` + + +### Debug Message Exchanges + +A VOLTTRON message's request ID is not unique to a single message. A group of messages in an "exchange" +(essentially a small conversation among agents) will often share a common request ID, for instance during RPC +request/response exchanges. + +The following example uses the same filters as above, and then uses ``display_exchanges`` +to display a single line for each message exchange, reducing the number of displayed rows from 6 to 2. +Note that not all messages have a request ID; messages with no ID are absent from the responses to +exchange queries. + +``` +Viewer> list_filters +{'sender': 'testagent', 'session_id': '4'} +Viewer> display_exchanges + sender recipient sender_time topic device point result + testagent platform.driver 11:51:00 - chargepoint1 Status AVAILABLE + testagent pubsub 11:51:01 test_topic/test_subtopic - - None +``` + + +#### Special Filters + +Most filters that can be set with the ``set_filter`` command are simple string matches on +one or another property of a message. Some filters have special characteristics, though. +The ``set_filter starttime `` and ``set_filter endtime `` filters are +inequalities that test for messages after a start time or before an end time. + +In the following example, note the use of quotes in the ``endtime`` value supplied to +`set_filter`. Any filter value can be delimited with quotes. Quotes must be +used when a value contains embedded spaces, as is the case here: + +``` +Viewer> list_sessions + rowid start_time end_time num_messages + 1 2017-03-20 17:07:13.867951 - - + 2 2017-03-20 17:17:35.725224 - - + 3 2017-03-21 11:48:33.803288 2017-03-21 11:50:57.181136 6436 + 4 2017-03-21 11:50:59.656693 2017-03-21 11:51:05.934895 450 + 5 2017-03-21 11:51:08.431871 - 74872 + 6 2017-03-21 12:17:30.568260 - 2331 +Viewer> set_filter session_id 5 +Set filters to {'session_id': '5'} +Viewer> set_filter sender testagent +Set filters to {'sender': 'testagent', 'session_id': '5'} +Viewer> set_filter endtime '2017-03-21 11:51:30' +Set filters to {'endtime': '2017-03-21 11:51:30', 'sender': 'testagent', 'session_id': '5'} +Viewer> display_exchanges + sender recipient sender_time topic device point result + testagent platform.driver 11:51:11 - chargepoint1 Status AVAILABLE + testagent pubsub 11:51:11 test_topic/test_subtopic - - None + testagent platform.driver 11:51:25 - chargepoint1 Status AVAILABLE + testagent pubsub 11:51:25 test_topic/test_subtopic - - None + testagent platform.driver 11:51:26 - chargepoint1 Status AVAILABLE + testagent pubsub 11:51:26 test_topic/test_subtopic - - None +``` + +Another filter type with special behavior is ``set_filter topic ``. Ordinarily, filters do an exact +match on a message property. Since message topics are often expressed as hierarchical substrings, +though, the ``topic`` filter does a substring match on the left edge of a message's topic, +as in the following example: + +``` +Viewer> set_filter topic test_topic +Set filters to {'topic': 'test_topic', 'endtime': '2017-03-21 11:51:30', 'sender': 'testagent', 'session_id': '5'} +Viewer> display_exchanges + sender recipient sender_time topic device point result + testagent pubsub 11:51:11 test_topic/test_subtopic - - None + testagent pubsub 11:51:25 test_topic/test_subtopic - - None + testagent pubsub 11:51:26 test_topic/test_subtopic - - None +Viewer> +``` + +Another filter type with special behavior is ``set_filter results_only 1``. In the JSON representation of a +response to an RPC call, for example an RPC call to a Platform Driver interface, the response to the +RPC request typically appears as the value of a 'result' tag. The ``results_only`` filter matches +only those messages that have a non-empty value for this tag. + +In the following example, note that when the ``results_only`` filter is set, it is given a value +of '1'. This is actually a meaningless value that gets ignored. It must be supplied because the +set_filter command syntax requires that a value be supplied as a parameter. + +In the following example, note the use of ``clear_filter `` to remove a single +named filter from the list of filters that are currently in effect. There is also a ``clear_filters`` +command, which clears all current filters. + +``` +Viewer> clear_filter topic +Set filters to {'endtime': '2017-03-21 11:51:30', 'sender': 'testagent', 'session_id': '5'} +Viewer> set_filter results_only 1 +Set filters to {'endtime': '2017-03-21 11:51:30', 'sender': 'testagent', 'session_id': '5', 'results_only': '1'} +Viewer> display_exchanges + sender recipient sender_time topic device point result + testagent platform.driver 11:51:11 - chargepoint1 Status AVAILABLE + testagent platform.driver 11:51:25 - chargepoint1 Status AVAILABLE + testagent platform.driver 11:51:26 - chargepoint1 Status AVAILABLE +``` + + +#### Streamed Display + +In addition to exposing a set of RPC calls that allow other agents (like the Message Viewer) +to query the Message Debugger Agent's SQLite database of recent messages, the Agent can also +publish messages in real time as it receives them. + +This feature is disabled by default due to the large quantity of data that it might need to +handle. When it is enabled, the Agent applies the filters currently in effect to each message as +it is received, and re-publishes the transformed, ready-for-debugging message to a socket +if it meets the filter criteria. The Message Viewer can listen on that socket and display +the message stream as it arrives. + +In the following ``display_message_stream`` example, the Message Viewer displays all messages +sent by the agent named 'testagent', as they arrive. It continues to display messages until +execution is interrupted with ctrl-C: + +``` +Viewer> clear_filters +Set filters to {} +Viewer> set_filter sender testagent +Set filters to {'sender': 'testagent'} +Viewer> display_message_stream +Streaming debug messages + timestamp direction sender recipient request_id subsystem method topic device point result + 12:28:58 outgoing testagent pubsub - RPC pubsub.push - - - - + 12:28:58 incoming testagent platform.dr 11978862486 RPC get_point - chargepoint Status - + iver 49056826.28 1 + 4581713 + 12:28:58 outgoing testagent platform.dr 11978862486 RPC - - - - AVAILABLE + iver 49056826.28 + 4581713 + 12:28:58 incoming testagent pubsub 11978862486 RPC pubsub.publ test_topic/ - - - + 49056827.28 ish test_subtop + 4581685 ic + 12:28:58 outgoing testagent pubsub 11978862486 RPC - - - - None + 49056827.28 + 4581685 + 12:28:58 outgoing testagent pubsub - RPC pubsub.push - - - - +^CViewer> stop_streaming +Stopped streaming debug messages +``` + +(Note the use of wrapping in the column formatting. Since these messages aren't known in advance, the +Message Viewer has incomplete information about how wide to make each column. Instead, it must +make guesses based on header widths, data widths in the first row received, and min/max values, +and then wrap the data when it overflows the column boundaries.) + + +#### Single-Line Display + +Another filter with special behavior is ``set_filter freq ``. This filter, which takes a number N +as its value, displays only one row, the most recently captured row that satisfies the filter criteria. +(Like other filters, this filter can be used with either ``display_messages`` or ``display_exchanges``.) +It then waits N seconds, reissues the query, and overwrites the old row with the new one. +It continues this periodic single-line overwritten display until it is interrupted with ctrl-C: + +``` +Viewer> list_filters +{'sender': 'testagent'} +Viewer> set_filter freq 10 +Set filters to {'freq': '10', 'sender': 'testagent'} +Viewer> display_exchanges + sender recipient sender_time topic device point result + testagent pubsub 12:31:28 test_topic/test_subtopic - - None +``` + +(Again, the data isn't known in advance, so the Message Viewer has to guess the best +width of each column. In this single-line display format, data gets truncated if it doesn't fit, +because no wrapping can be performed -- only one display line is available.) + + +#### Displaying Exchange Details + +The ``display_exchange_details `` command provides a way to get more specific details +about an exchange, i.e. about all messages that share a common request ID. At low or medium +verbosity, when this command is used (supplying the relevant request ID, which can be obtained +from the output of other commands), it displays one row for each message: + +``` +Viewer> set_filter sender testagent +Set filters to {'sender': 'testagent', 'session_id': '4'} +Viewer> display_messages + timestamp direction sender recipient request_id subsystem method topic device point result + 11:51:00 incoming testagent messageviewer.connection - RPC pubsub.sync - - - - + 11:51:00 outgoing testagent pubsub - RPC pubsub.push - - - - + 11:51:00 incoming testagent platform.driver 1197886248649056372.284581685 RPC get_point - chargepoint1 Status - + 11:51:01 outgoing testagent platform.driver 1197886248649056372.284581685 RPC - - - - AVAILABLE + 11:51:01 incoming testagent pubsub 1197886248649056373.284581649 RPC pubsub.publish test_topic/test_subtopic - - - + 11:51:01 outgoing testagent pubsub 1197886248649056373.284581649 RPC - - - - None +Viewer> display_exchange_details 1197886248649056373.284581649 + timestamp direction sender recipient request_id subsystem method topic device point result + 11:51:01 incoming testagent pubsub 1197886248649056373.284581649 RPC pubsub.publish test_topic/test_subtopic - - - + 11:51:01 outgoing testagent pubsub 1197886248649056373.284581649 RPC - - - - None +``` + +At high verbosity, ``display_exchange_details`` switches display formats, showing all properties for +each message in a json-like dictionary format: + +``` +Viewer> set_verbosity high +Set verbosity to high +Viewer> display_exchange_details 1197886248649056373.284581649 + +{ + "data": "{\"params\":{\"topic\":\"test_topic/test_subtopic\",\"headers\":{\"Date\":\"2017-03-21T11:50:56.293830\",\"max_compatible_version\":\"\",\"min_compatible_version\":\"3.0\"},\"message\":[{\"property_1\":1,\"property_2\":2},{\"property_3\":3,\"property_4\":4}],\"bus\":\"\"},\"jsonrpc\":\"2.0\",\"method\":\"pubsub.publish\",\"id\":\"15828311332408898779.284581649\"}", + "device": "", + "direction": "incoming", + "frame7": "", + "frame8": "", + "frame9": "", + "headers": "{u'Date': u'2017-03-21T11:50:56.293830', u'max_compatible_version': u'', u'min_compatible_version': u'3.0'}", + "message": "[{u'property_1': 1, u'property_2': 2}, {u'property_3': 3, u'property_4': 4}]", + "message_size": 374, + "message_value": "{u'property_1': 1, u'property_2': 2}", + "method": "pubsub.publish", + "params": "{u'topic': u'test_topic/test_subtopic', u'headers': {u'Date': u'2017-03-21T11:50:56.293830', u'max_compatible_version': u'', u'min_compatible_version': u'3.0'}, u'message': [{u'property_1': 1, u'property_2': 2}, {u'property_3': 3, u'property_4': 4}], u'bus': u''}", + "point": "", + "point_value": "", + "recipient": "pubsub", + "request_id": "1197886248649056373.284581649", + "result": "", + "sender": "testagent", + "session_id": 4, + "subsystem": "RPC", + "timestamp": "2017-03-21 11:51:01.027623", + "topic": "test_topic/test_subtopic", + "user_id": "", + "vip_signature": "VIP1" +} + +{ + "data": "{\"params\":{\"topic\":\"test_topic/test_subtopic\",\"headers\":{\"Date\":\"2017-03-21T11:50:56.293830\",\"max_compatible_version\":\"\",\"min_compatible_version\":\"3.0\"},\"message\":[{\"property_1\":1,\"property_2\":2},{\"property_3\":3,\"property_4\":4}],\"bus\":\"\"},\"jsonrpc\":\"2.0\",\"method\":\"pubsub.publish\",\"id\":\"15828311332408898779.284581649\"}", + "device": "", + "direction": "outgoing", + "frame7": "", + "frame8": "", + "frame9": "", + "headers": "{u'Date': u'2017-03-21T11:50:56.293830', u'max_compatible_version': u'', u'min_compatible_version': u'3.0'}", + "message": "[{u'property_1': 1, u'property_2': 2}, {u'property_3': 3, u'property_4': 4}]", + "message_size": 383, + "message_value": "{u'property_1': 1, u'property_2': 2}", + "method": "pubsub.publish", + "params": "{u'topic': u'test_topic/test_subtopic', u'headers': {u'Date': u'2017-03-21T11:50:56.293830', u'max_compatible_version': u'', u'min_compatible_version': u'3.0'}, u'message': [{u'property_1': 1, u'property_2': 2}, {u'property_3': 3, u'property_4': 4}], u'bus': u''}", + "point": "", + "point_value": "", + "recipient": "testagent", + "request_id": "1197886248649056373.284581649", + "result": "", + "sender": "pubsub", + "session_id": 4, + "subsystem": "RPC", + "timestamp": "2017-03-21 11:51:01.031183", + "topic": "test_topic/test_subtopic", + "user_id": "testagent", + "vip_signature": "VIP1" +} +``` + + +### Verbosity + +As mentioned in the previous section, Agent and Viewer behavior can be adjusted by changing +the current verbosity with the ``set_verbosity `` command. The default verbosity is low. +low, medium and high levels are available: + +``` +Viewer> set_verbosity high +Set verbosity to high +Viewer> set_verbosity none +Invalid verbosity choice none; valid choices are ['low', 'medium', 'high'] +``` + +At high verbosity, the following query formatting rules are in effect: + +- When displaying timestamps, display the full date and time (including microseconds), not just HH:MM:SS. +- In responses to display_message_exchanges, use dictionary format (see example in previous section). +- Display all columns, not just "interesting" columns (see the list below). +- Don't exclude messages/exchanges based on excluded senders/receivers (see the list below). + +At medium or low verbosity: + +- When displaying timestamps, display HH:MM:SS only. +- In responses to display_message_exchanges, use table format. +- Display "interesting" columns only (see the list below). +- Exclude messages/exchanges for certain senders/receivers (see the list below). + +At low verbosity: + +- If > 1000 objects are returned by a query, display the count only. + +The following "interesting" columns are displayed at low and medium verbosity levels +(at high verbosity levels, all properties are displayed): + +``` +Debug Message Debug Message Exchange Debug Session + +timestamp sender_time rowid +direction start_time +sender sender end_time +recipient recipient num_messages +request_id +subsystem +method +topic topic +device device +point point +result result +``` + +Messages from the following senders, or to the following receivers, are excluded at +low and medium verbosity levels: + +``` +Sender Receiver + +(empty) (empty) +None +control control +config.store config.store +pubsub +control.connection +messageviewer.connection +platform.messagedebugger +platform.messagedebugger.loopback_rpc +``` + +These choices about which columns are "interesting" and which senders/receivers are excluded +are defined as parameters in Message Viewer, and can be adjusted as necessary by changing +global value lists in `viewer.py`. + +### Session Statistics + +One useful tactic for starting at a summary level and drilling down is to capture a set +of messages for a session and then examine the counts of sending and receiving agents, +or sending agents and topics. This gives hints on which values might serve as useful filters +for more specific queries. + +The ``display_session_details_by_agent `` command displays statistics by sending and +receiving agent. Sending agents are table columns, and receiving agents are table rows. +This query also applies whatever filters are currently in effect; the filters can reduce +the counts and can also reduce the number of columns and rows. + +The following example shows the command being used to list all senders and receivers for +messages sent during debug session 7: + +``` +Viewer> list_sessions + rowid start_time end_time num_messages + 1 2017-03-20 17:07:13.867951 - - + 2 2017-03-20 17:17:35.725224 - - + 3 2017-03-21 11:48:33.803288 2017-03-21 11:50:57.181136 6436 + 4 2017-03-21 11:50:59.656693 2017-03-21 11:51:05.934895 450 + 5 2017-03-21 11:51:08.431871 - 74872 + 6 2017-03-21 12:17:30.568260 2017-03-21 12:38:29.070000 60384 + 7 2017-03-21 12:38:31.617099 2017-03-21 12:39:53.174712 3966 +Viewer> clear_filters +Set filters to {} +Viewer> display_session_details_by_agent 7 + Receiving Agent control listener messageviewer.connection platform.driver platform.messagedebugger pubsub testagent + (No Receiving Agent) - - 2 - - - - + control - - - - - 2 - + listener - - - - - 679 - + messageviewer.connection - - - - 3 - - + platform.driver - - - - - 1249 16 + platform.messagedebugger - - 3 - - - - + pubsub 2 679 - 1249 - 4 31 + testagent - - - 16 - 31 - +``` + +The ``display_session_details_by_topic `` command is similar to ``display_session_details_by_agent``, +but each row contains statistics for a topic instead of for a receiving agent: + +``` +Viewer> display_session_details_by_topic 7 + Topic control listener messageviewer.connection platform.driver platform.messagedebugger pubsub testagent + (No Topic) 1 664 5 640 3 1314 39 + devices/chargepoint1/Address - - - 6 - 6 - + devices/chargepoint1/City - - - 6 - 6 - + devices/chargepoint1/Connector - - - 5 - 5 - + devices/chargepoint1/Country - - - 5 - 5 - + devices/chargepoint1/Current - - - 6 - 6 - + devices/chargepoint1/Description - - - 6 - 6 - + devices/chargepoint1/Energy - - - 5 - 5 - + devices/chargepoint1/Lat - - - 6 - 6 - + devices/chargepoint1/Level - - - 5 - 5 - + devices/chargepoint1/Long - - - 6 - 6 - + devices/chargepoint1/Mode - - - 5 - 5 - + devices/chargepoint1/Power - - - 6 - 6 - + devices/chargepoint1/Reservable - - - 5 - 5 - + devices/chargepoint1/State - - - 6 - 6 - + devices/chargepoint1/Status - - - 5 - 5 - + devices/chargepoint1/Status.TimeSta - - - 6 - 6 - + mp + devices/chargepoint1/Type - - - 6 - 6 - + devices/chargepoint1/Voltage - - - 5 - 5 - + devices/chargepoint1/alarmTime - - - 6 - 6 - + devices/chargepoint1/alarmType - - - 6 - 6 - + devices/chargepoint1/all - - - 5 - 5 - + devices/chargepoint1/allowedLoad - - - 6 - 6 - + devices/chargepoint1/clearAlarms - - - 6 - 6 - + devices/chargepoint1/currencyCode - - - 6 - 6 - + devices/chargepoint1/driverAccountN - - - 5 - 5 - + umber + devices/chargepoint1/driverName - - - 5 - 5 - + devices/chargepoint1/endTime - - - 5 - 5 - + devices/chargepoint1/mainPhone - - - 6 - 6 - + devices/chargepoint1/maxPrice - - - 5 - 5 - + devices/chargepoint1/minPrice - - - 5 - 5 - + devices/chargepoint1/numPorts - - - 6 - 6 - + devices/chargepoint1/orgID - - - 5 - 5 - + devices/chargepoint1/organizationNa - - - 5 - 5 - + me + devices/chargepoint1/percentShed - - - 6 - 6 - + devices/chargepoint1/portLoad - - - 6 - 6 - + devices/chargepoint1/portNumber - - - 6 - 6 - + devices/chargepoint1/sessionID - - - 5 - 5 - + devices/chargepoint1/sessionTime - - - 6 - 6 - + devices/chargepoint1/sgID - - - 6 - 6 - + devices/chargepoint1/sgName - - - 6 - 6 - + devices/chargepoint1/shedState - - - 5 - 5 - + devices/chargepoint1/startTime - - - 6 - 6 - + devices/chargepoint1/stationID - - - 5 - 5 - + devices/chargepoint1/stationMacAddr - - - 6 - 6 - + devices/chargepoint1/stationManufac - - - 5 - 5 - + turer + devices/chargepoint1/stationModel - - - 6 - 6 - + devices/chargepoint1/stationName - - - 5 - 5 - + devices/chargepoint1/stationRightsP - - - 6 - 6 - + rofile + devices/chargepoint1/stationSerialN - - - 6 - 6 - + um + heartbeat/control 1 - - - - 1 - + heartbeat/listener - 15 - - - 15 - + heartbeat/platform.driver - - - 1 - 1 - + heartbeat/pubsub - - - - - 2 - + test_topic/test_subtopic - - - - - 8 8 +``` + + +### Database Administration + +The Message Debugger Agent stores message data in a SQLite database's DebugMessage, +DebugMessageExchange and DebugSession tables. If the database isn't present already +when the Agent is started, it is created automatically. + +The SQLite database can consume a lot of disk space in a relatively short time, +so the Message Viewer has command-line options that recover that space by +deleting the database or by deleting all messages belonging to a given debug session. + +The ``delete_session `` command deletes the database's DebugSession row +with the indicated ID, and also deletes all DebugMessage and DebugMessageExchange rows +with that session ID. In the following example, ``delete_session`` deletes the 60,000 +DebugMessages that were captured during a 20-minute period as session 6: + +``` +Viewer> list_sessions + rowid start_time end_time num_messages + 1 2017-03-20 17:07:13.867951 - - + 2 2017-03-20 17:17:35.725224 - - + 3 2017-03-21 11:48:33.803288 2017-03-21 11:50:57.181136 6436 + 4 2017-03-21 11:50:59.656693 2017-03-21 11:51:05.934895 450 + 5 2017-03-21 11:51:08.431871 - 74872 + 6 2017-03-21 12:17:30.568260 2017-03-21 12:38:29.070000 60384 + 7 2017-03-21 12:38:31.617099 2017-03-21 12:39:53.174712 3966 + 8 2017-03-21 12:42:08.482936 - 3427 +Viewer> delete_session 6 +Deleted debug session 6 +Viewer> list_sessions + rowid start_time end_time num_messages + 1 2017-03-20 17:07:13.867951 - - + 2 2017-03-20 17:17:35.725224 - - + 3 2017-03-21 11:48:33.803288 2017-03-21 11:50:57.181136 6436 + 4 2017-03-21 11:50:59.656693 2017-03-21 11:51:05.934895 450 + 5 2017-03-21 11:51:08.431871 - 74872 + 7 2017-03-21 12:38:31.617099 2017-03-21 12:39:53.174712 3966 + 8 2017-03-21 12:42:08.482936 - 4370 +``` + +The ``delete_database`` command deletes the entire SQLite database, removing all records +of previously-captured DebugMessages, DebugMessageExchanges and DebugSessions. +The database will be re-created the next time a debug session is started. + +``` +Viewer> delete_database +Database deleted +Viewer> list_sessions +No query results +Viewer> start_session +Message debugger session 1 started +Viewer> list_sessions + rowid start_time end_time num_messages + 1 2017-03-22 12:39:40.320252 - 180 +``` + +It's recommended that the database be deleted if changes are made to the DebugMessage, +DebugMessageExchange or DebugSession object structures that are defined in agent.py. +A skew between these data structures in Python code vs. the ones in the database can +cause instability in the Message Debugger Agent, perhaps causing it to fail. If a failure +of this kind prevents use of the Message Viewer's ``delete_database`` command, the +database can be deleted directly from the filesystem. By default, it is located +in $VOLTTRON_HOME's ``run`` directory. diff --git a/services/ops/MessageDebuggerAgent/messagedebugger/agent.py b/services/ops/MessageDebuggerAgent/messagedebugger/agent.py index b574f4fd73..df113672a5 100644 --- a/services/ops/MessageDebuggerAgent/messagedebugger/agent.py +++ b/services/ops/MessageDebuggerAgent/messagedebugger/agent.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- {{{ # vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et: # -# Copyright 2019, Battelle Memorial Institute. +# Copyright 2020, Battelle Memorial Institute. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -673,7 +673,7 @@ def __init__(self, msg_elements, session_id): self.frame9 = bytes(msg_elements[10]) if len(msg_elements) > 10 else '' self.method = '' self.params = '' - self.topic = self.frame7 # MasterDriverAgent device topics go in routed message's frame 7 + self.topic = self.frame7 # PlatformDriverAgent device topics go in routed message's frame 7 self.headers = '' self.message = '' self.message_value = '' diff --git a/services/ops/MessageDebuggerAgent/messageviewer/viewer.py b/services/ops/MessageDebuggerAgent/messageviewer/viewer.py index b97a43eb19..0051595e00 100644 --- a/services/ops/MessageDebuggerAgent/messageviewer/viewer.py +++ b/services/ops/MessageDebuggerAgent/messageviewer/viewer.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- {{{ # vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et: # -# Copyright 2019, Battelle Memorial Institute. +# Copyright 2020, Battelle Memorial Institute. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/services/ops/MessageDebuggerAgent/tests/test_message_debugging.py b/services/ops/MessageDebuggerAgent/tests/test_message_debugging.py index 5cc2004064..6785acb561 100644 --- a/services/ops/MessageDebuggerAgent/tests/test_message_debugging.py +++ b/services/ops/MessageDebuggerAgent/tests/test_message_debugging.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- {{{ # vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et: # -# Copyright 2019, Battelle Memorial Institute. +# Copyright 2020, Battelle Memorial Institute. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -61,7 +61,7 @@ @pytest.fixture(scope='module') def agent(request, volttron_instance_msgdebug): - master_uuid = volttron_instance_msgdebug.install_agent(agent_dir=get_ops("MessageDebuggerAgent"), + platform_uuid = volttron_instance_msgdebug.install_agent(agent_dir=get_ops("MessageDebuggerAgent"), config_file=DEBUGGER_CONFIG, start=True) gevent.sleep(2) @@ -69,7 +69,7 @@ def agent(request, volttron_instance_msgdebug): gevent.sleep(20) # wait for the agent to start def stop(): - volttron_instance_msgdebug.stop_agent(master_uuid) + volttron_instance_msgdebug.stop_agent(platform_uuid) msg_debugger_agent.core.stop() request.addfinalizer(stop) diff --git a/services/ops/SysMonAgent/README.md b/services/ops/SysMonAgent/README.md new file mode 100644 index 0000000000..6f2b326c16 --- /dev/null +++ b/services/ops/SysMonAgent/README.md @@ -0,0 +1,54 @@ +## System Monitoring (SysMon) Agent + +The System Monitoring Agent (colloquially “SysMon”) can be installed on the platform to monitor system resource metrics, +including percent CPU utilization, percent system memory (RAM) utilization, and percent storage (disk) utilization based +on disk path. + +### Configuration + +The SysMon agent has 5 configuration values, all of which are optional: + +- "base_topic": Topic prefix used to publish all system metric points, is formatted with the metric function name in + publishes (i.e. "base/topic/prefix/cpu_percent") - default "datalogger/log/platform" +- "cpu_check_interval": Interval in seconds between publishes of % all core CPU utilization - default 5 +- "memory_check_interval": Interval in seconds between publishes of % system memory (RAM) utilization - default 5 +- "disk_check_interval": Interval in seconds between publishes of % disk utilization for the configured disk - + default 5 +- "disk_path": Directory path used as the root directory for a mounted disk (Currently, the SysMon agent supports + collecting metrics for only 1 disk at a time) - default "/" + +```json +{ + "base_topic": "datalogger/log/platform", + "cpu_check_interval": 5, + "memory_check_interval": 5, + "disk_check_interval": 5, + "disk_path": "/" +} +``` + + +### Periodic Publish + +At the interval specified by the configuration option for each resource, the agent will automatically query the system +for the resource utilization statistics and publish it to the message bus using the topic as previously described. The +message content for each publish will contain only a single numeric value for that specific topic. Currently, +“scrape_all” style publishes are not supported. + +The following are example publishes as captured by the Listener agent into the VOLTTRON log: + +``` +2020-03-10 11:20:33,755 (listeneragent-3.3 7993) listener.agent INFO: Peer: pubsub, Sender: platform.sysmon:, Bus: , Topic: datalogger/log/platform/cpu_percent, Headers: {'min_compatible_version': '3.0', 'max_compatible_version': ''}, Message: +4.8 +2020-03-10 11:20:33,804 (listeneragent-3.3 7993) listener.agent INFO: Peer: pubsub, Sender: platform.sysmon:, Bus: , Topic: datalogger/log/platform/memory_percent, Headers: {'min_compatible_version': '3.0', 'max_compatible_version': ''}, Message: +35.6 +2020-03-10 11:20:33,809 (listeneragent-3.3 7993) listener.agent INFO: Peer: pubsub, Sender: platform.sysmon:, Bus: , Topic: datalogger/log/platform/disk_percent, Headers: {'min_compatible_version': '3.0', 'max_compatible_version': ''}, Message: +65.6 +``` + + +### JSON RPC Methods + +- cpu_percent: Returns current % all core CPU utilization, takes no parameters +- memory_percent: Returns current % system memory (RAM) utilization, takes no parameters +- disk_percent: Returns current % disk (ROM) utilization for the configured disk, takes no parameters diff --git a/services/ops/SysMonAgent/setup.py b/services/ops/SysMonAgent/setup.py index df14937761..8567648200 100644 --- a/services/ops/SysMonAgent/setup.py +++ b/services/ops/SysMonAgent/setup.py @@ -4,7 +4,7 @@ # -*- coding: utf-8 -*- {{{ # vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et: # -# Copyright 2019, Battelle Memorial Institute. +# Copyright 2020, Battelle Memorial Institute. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/services/ops/SysMonAgent/sysmon/agent.py b/services/ops/SysMonAgent/sysmon/agent.py index 852545be10..f5b7e59f4e 100644 --- a/services/ops/SysMonAgent/sysmon/agent.py +++ b/services/ops/SysMonAgent/sysmon/agent.py @@ -4,7 +4,7 @@ # -*- coding: utf-8 -*- {{{ # vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et: # -# Copyright 2019, Battelle Memorial Institute. +# Copyright 2020, Battelle Memorial Institute. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/services/ops/SysMonAgent/tests/test_sysmonagent.py b/services/ops/SysMonAgent/tests/test_sysmonagent.py index 682364b54e..be405c835e 100644 --- a/services/ops/SysMonAgent/tests/test_sysmonagent.py +++ b/services/ops/SysMonAgent/tests/test_sysmonagent.py @@ -4,7 +4,7 @@ # -*- coding: utf-8 -*- {{{ # vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et: # -# Copyright 2019, Battelle Memorial Institute. +# Copyright 2020, Battelle Memorial Institute. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -44,11 +44,11 @@ Pytest test cases for SysMonAgent """ +import os import pytest -from volttron.platform import jsonapi +from volttron.platform import jsonapi, get_ops from volttrontesting.utils.utils import poll_gevent_sleep -from volttron.platform import get_ops _test_config = { "base_topic": "test1/sysmon", @@ -58,6 +58,11 @@ "disk_path": "/" } +config_path = os.path.join(get_ops("SysMonAgent"), "sysmonagent.config") +with open(config_path, "r") as config_file: + default_config_json = jsonapi.load(config_file) +assert isinstance(default_config_json, dict) + @pytest.fixture() def sysmon_tester_agent(request, volttron_instance, tmpdir): @@ -67,7 +72,10 @@ def sysmon_tester_agent(request, volttron_instance, tmpdir): config = tmpdir.mkdir('config').join('config') config.write(jsonapi.dumps(_test_config)) - sysmon_uuid = volttron_instance.install_agent(agent_dir=get_ops("SysMonAgent"), config_file=str(config), start=True) + sysmon_uuid = volttron_instance.install_agent( + agent_dir=get_ops("SysMonAgent"), + config_file=_test_config, + start=True) agent = volttron_instance.build_agent() @@ -81,7 +89,9 @@ def stop_agent(): def listen(agent, config): - """Assert all SysMonAgent topics have been heard""" + """ + Assert all SysMonAgent topics have been heard + """ base_topic = config['base_topic'] short_topics = ['cpu_percent', 'memory_percent', 'disk_percent'] topics = [base_topic + '/' + x for x in short_topics] @@ -98,13 +108,25 @@ def add_topic(peer, sender, bus, topic, headers, messages): def test_listen(sysmon_tester_agent): - """Test that data is published to expected topics""" + """ + Test that data is published to expected topics + """ listen(sysmon_tester_agent, _test_config) def test_reconfigure_then_listen(sysmon_tester_agent): - """Test that the topic can be reconfigured""" + """ + Test that the topic can be reconfigured + """ new_config = _test_config.copy() new_config['base_topic'] = 'test2/sysmon' sysmon_tester_agent.vip.rpc.call('platform.sysmon', 'reconfigure', **new_config) listen(sysmon_tester_agent, new_config) + + +def test_default_config(sysmon_tester_agent): + """ + Test that the topic can be reconfigured + """ + sysmon_tester_agent.vip.rpc.call('platform.sysmon', 'reconfigure', **default_config_json) + listen(sysmon_tester_agent, default_config_json) diff --git a/services/ops/ThresholdDetectionAgent/README.md b/services/ops/ThresholdDetectionAgent/README.md new file mode 100644 index 0000000000..1ae7318738 --- /dev/null +++ b/services/ops/ThresholdDetectionAgent/README.md @@ -0,0 +1,91 @@ +## Threshold Detection Agent + +The ThresholdDetectionAgent will publish an alert when a value published to a topic exceeds or falls below a configured +value. + +The agent subscribes to the topics listed in the configuration file and publishes alerts when the callback receives a +value for the point above the max (if configured) or below the min (if configured) corresponding to the point in the +configuration file. + + +### Configuration + +The Threshold Detection agent supports observing individual point values from their respective topics or from a device's +all publish. Points to watch are configured as JSON key-value pairs as follows: + +* Key: The key is the point topic for the point to watch, or the device's "all" topic if watching points from the all +publish (i.e. "devices/campus/building/device/point" or "devices/campus/building/device/all" if using the all topic) + +* Value: Using point topic: JSON object specifying the min ('threshold_min') and max ('threshold_max) threshold values +for the point. Only one of the thresholds are required, but both may be used. + +Example: + +```json +{ + "point0": { + "threshold_max": 10, + "threshold_min": 0 + }, + "point1": { + "threshold_max": 42 + } +} +``` + +Using device "all" topic: JSON object with the key as the point name and value being the threshold object described +above + +Example + +```json +{ + "devices/some/device/all": { + "point0": { + "threshold_max": 10, + "threshold_min": 0 + }, + "point1": { + "threshold_max": 42 + } + } +} +``` + +Example configuration: + +```json +{ + "datalogger/log/platform/cpu_percent": { + "threshold_max": 99 + }, + "datalogger/log/platform/memory_percent": { + "threshold_max": 99 + }, + "datalogger/log/platform/disk_percent": { + "threshold_max": 97 + }, + "devices/campus/building/fake/all": { + "EKG_Sin": { + "threshold_max": 0.1, + "threshold_min": -0.1 + } + } +} +``` + + +### Example Publish + +This example publish uses the example config above along with a fake driver running on the platform. + +``` +Peer: pubsub +Sender: platform.thresholddetection +Bus: +Topic: alerts/ThresholdDetectionAgent/james_platform_thresholddetection +Headers: {'alert_key': 'devices/campus/building/fake/all', 'min_compatible_version': '3.0', 'max_compatible_version': ''} +Message: ('{"status": "BAD", "context": "devices/campus/building/fake/all(EKG_Sin) ' + 'value (-0.4999999999999997)is below acceptable limit (-0.1)", ' + '"last_updated": "2021-01-25T22:39:35.035606+00:00"}') +``` diff --git a/services/ops/ThresholdDetectionAgent/setup.py b/services/ops/ThresholdDetectionAgent/setup.py index df14937761..8567648200 100644 --- a/services/ops/ThresholdDetectionAgent/setup.py +++ b/services/ops/ThresholdDetectionAgent/setup.py @@ -4,7 +4,7 @@ # -*- coding: utf-8 -*- {{{ # vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et: # -# Copyright 2019, Battelle Memorial Institute. +# Copyright 2020, Battelle Memorial Institute. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/services/ops/ThresholdDetectionAgent/tests/test_threshold_detection.py b/services/ops/ThresholdDetectionAgent/tests/test_threshold_detection.py index cdff520e03..ec99ac41df 100644 --- a/services/ops/ThresholdDetectionAgent/tests/test_threshold_detection.py +++ b/services/ops/ThresholdDetectionAgent/tests/test_threshold_detection.py @@ -4,7 +4,7 @@ # -*- coding: utf-8 -*- {{{ # vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et: # -# Copyright 2019, Battelle Memorial Institute. +# Copyright 2020, Battelle Memorial Institute. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/services/ops/ThresholdDetectionAgent/thresholddetection/agent.py b/services/ops/ThresholdDetectionAgent/thresholddetection/agent.py index b22541291d..d7f4dd0def 100644 --- a/services/ops/ThresholdDetectionAgent/thresholddetection/agent.py +++ b/services/ops/ThresholdDetectionAgent/thresholddetection/agent.py @@ -4,7 +4,7 @@ # -*- coding: utf-8 -*- {{{ # vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et: # -# Copyright 2019, Battelle Memorial Institute. +# Copyright 2020, Battelle Memorial Institute. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/services/ops/TopicWatcher/README.md b/services/ops/TopicWatcher/README.md new file mode 100644 index 0000000000..997252141f --- /dev/null +++ b/services/ops/TopicWatcher/README.md @@ -0,0 +1,70 @@ +## Topic Watcher Agent + +The Topic Watcher Agent listens to a set of configured topics and publishes an alert if they are not published within +some time limit. In addition to for individual messages or data points, the Topic Watcher Agent supports inspecting +device "all" topics. This can be useful when a device contains volatile points that may not be published. + + +### Configuration + +Topics are organized by groups in a JSON structure with the group's identifier as the key. Any alerts raised will +summarize all missing topics in the group. + +There are two configuration options for watching topics. For single message topics (such as a single +device point), configuration consists of a key value pair of the topic to its time limit. + +``` +{ + "groupname: { + "devices/campus/building/point": 10 + } +} +``` + +For points published in an "all" style publish, configuration consts of a key mapping to an object as follows: +A `seconds` key for the time limit in seconds, and a `points` key consisting of a list of individual points in the +`all` publish. + +The following is an example "all" publish configuration which configures the Topic Watcher to check for the `temperature` +and `PowerState` points which are expected to be inside the "all" publishes. + +``` +{ + "groupname": { + "devices/fakedriver1/all": { + "seconds": 10, + "points": ["temperature", "PowerState"] + } + } +} +``` + +It is possible to configure the Topic Watcher to handle both "all" topics and single point topics for the same group: + +``` +{ + "groupname": { + "devices/fakedriver0/all": 10, + "devices/fakedriver1/all": { + "seconds": 10, + "points": ["temperature", "PowerState"] + } + } +} +``` + + +### Example Publish + +The following is an example publish from the Topic Watcher Agent using the above configuration. + +``` +Peer: pubsub +Sender: platform.topic_watcher +Bus: +Topic: alerts/AlertAgent/james_platform_topic_watcher +Headers: {'alert_key': 'AlertAgent Timeout for group group1', 'min_compatible_version': '3.0', 'max_compatible_version': ''} +Message: ('{"status": "BAD", "context": "Topic(s) not published within time limit: ' + '[\'devices/fakedriver0/all\']", "last_updated": ' + '"2021-01-25T23:10:07.905633+00:00"}') +``` diff --git a/services/ops/TopicWatcher/setup.py b/services/ops/TopicWatcher/setup.py index eaf3948ea7..cc64769bff 100644 --- a/services/ops/TopicWatcher/setup.py +++ b/services/ops/TopicWatcher/setup.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- {{{ # vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et: # -# Copyright 2019, Battelle Memorial Institute. +# Copyright 2020, Battelle Memorial Institute. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/services/ops/TopicWatcher/tests/test_remote_topic_watcher.py b/services/ops/TopicWatcher/tests/test_remote_topic_watcher.py index b985082c7b..84c0704b99 100644 --- a/services/ops/TopicWatcher/tests/test_remote_topic_watcher.py +++ b/services/ops/TopicWatcher/tests/test_remote_topic_watcher.py @@ -2,7 +2,7 @@ # -*- coding: utf-8 -*- {{{ # vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et: # -# Copyright 2019, Battelle Memorial Institute. +# Copyright 2020, Battelle Memorial Institute. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -108,6 +108,7 @@ def onmessage(peer, sender, bus, topic, headers, message): assert alert_messages alert_messages.clear() + @pytest.mark.alert def test_alert_multi_messagebus_publish(volttron_multi_messagebus): """ @@ -168,7 +169,17 @@ def onmessage(peer, sender, bus, topic, headers, message): ) gevent.sleep(6) + assert u"Topic(s) not published within time limit: ['fakedevice', " \ u"'fakedevice2/all', ('fakedevice2/all', 'point')]" in \ + alert_messages or \ + u"Topic(s) not published within time limit: ['fakedevice', " \ + u"('fakedevice2/all', 'point'), 'fakedevice2/all']" in \ + alert_messages or \ + u"Topic(s) not published within time limit: ['fakedevice2/all', " \ + u"('fakedevice2/all', 'point'), 'fakedevice']" in \ + alert_messages or \ + u"Topic(s) not published within time limit: [('fakedevice2/all', 'point'), " \ + u"'fakedevice2/all', 'fakedevice']" in \ alert_messages - alert_messages.clear() \ No newline at end of file + alert_messages.clear() diff --git a/services/ops/TopicWatcher/tests/test_topic_watcher.py b/services/ops/TopicWatcher/tests/test_topic_watcher.py index 36621d5bd2..3c7f446d79 100644 --- a/services/ops/TopicWatcher/tests/test_topic_watcher.py +++ b/services/ops/TopicWatcher/tests/test_topic_watcher.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- {{{ # vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et: # -# Copyright 2019, Battelle Memorial Institute. +# Copyright 2020, Battelle Memorial Institute. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -298,10 +298,12 @@ def test_watch_topic_new_group(volttron_instance, agent, cleanup_db): gevent.sleep(6) assert len(alert_messages) == 2 + assert "Topic(s) not published within time limit: ['fakedevice', " \ "'fakedevice2/all', ('fakedevice2/all', 'point')]" in alert_messages or \ - "Topic(s) not published within time limit: ['fakedevice', " \ - "('fakedevice2/all', 'point')], 'fakedevice2/all'" in alert_messages + "Topic(s) not published within time limit: ['fakedevice', ('fakedevice2/all', 'point'), " \ + "'fakedevice2/all']" in alert_messages + assert "Topic(s) not published within time limit: ['newtopic']" in \ alert_messages diff --git a/services/ops/TopicWatcher/topic_watcher/agent.py b/services/ops/TopicWatcher/topic_watcher/agent.py index a9596e6ee4..27f4a96e1a 100644 --- a/services/ops/TopicWatcher/topic_watcher/agent.py +++ b/services/ops/TopicWatcher/topic_watcher/agent.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- {{{ # vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et: # -# Copyright 2019, Battelle Memorial Institute. +# Copyright 2020, Battelle Memorial Institute. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/setup.py b/setup.py index 23b0d3dd9e..2ec369bd8b 100644 --- a/setup.py +++ b/setup.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- {{{ # vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et: # -# Copyright 2019, Battelle Memorial Institute. +# Copyright 2020, Battelle Memorial Institute. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/volttron/lint/__init__.py b/volttron/lint/__init__.py index 6238b7aba0..e56fc543b9 100644 --- a/volttron/lint/__init__.py +++ b/volttron/lint/__init__.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- {{{ # vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et: # -# Copyright 2019, Battelle Memorial Institute. +# Copyright 2020, Battelle Memorial Institute. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/volttron/lint/bacpypes.debugging.py b/volttron/lint/bacpypes.debugging.py index 53c5739ddc..023422792f 100644 --- a/volttron/lint/bacpypes.debugging.py +++ b/volttron/lint/bacpypes.debugging.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- {{{ # vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et: # -# Copyright 2019, Battelle Memorial Institute. +# Copyright 2020, Battelle Memorial Institute. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/volttron/lint/clock.py b/volttron/lint/clock.py index 1b49e33a78..e5d76462e8 100644 --- a/volttron/lint/clock.py +++ b/volttron/lint/clock.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- {{{ # vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et: # -# Copyright 2019, Battelle Memorial Institute. +# Copyright 2020, Battelle Memorial Institute. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/volttron/lint/gevent.socket.py b/volttron/lint/gevent.socket.py index 221f0620e0..1b7076c7ed 100644 --- a/volttron/lint/gevent.socket.py +++ b/volttron/lint/gevent.socket.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- {{{ # vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et: # -# Copyright 2019, Battelle Memorial Institute. +# Copyright 2020, Battelle Memorial Institute. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/volttron/lint/greenlet.py b/volttron/lint/greenlet.py index 294de9ef2b..9ac82c9ffe 100644 --- a/volttron/lint/greenlet.py +++ b/volttron/lint/greenlet.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- {{{ # vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et: # -# Copyright 2019, Battelle Memorial Institute. +# Copyright 2020, Battelle Memorial Institute. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/volttron/lint/twisted.internet.reactor.py b/volttron/lint/twisted.internet.reactor.py index 9b7aa306bb..5401721c9a 100644 --- a/volttron/lint/twisted.internet.reactor.py +++ b/volttron/lint/twisted.internet.reactor.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- {{{ # vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et: # -# Copyright 2019, Battelle Memorial Institute. +# Copyright 2020, Battelle Memorial Institute. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/volttron/lint/zmq.py b/volttron/lint/zmq.py index 7487d414cb..176cc21db5 100644 --- a/volttron/lint/zmq.py +++ b/volttron/lint/zmq.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- {{{ # vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et: # -# Copyright 2019, Battelle Memorial Institute. +# Copyright 2020, Battelle Memorial Institute. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/volttron/platform/__init__.py b/volttron/platform/__init__.py index 88f678f8f8..38097b710c 100644 --- a/volttron/platform/__init__.py +++ b/volttron/platform/__init__.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- {{{ # vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et: # -# Copyright 2019, Battelle Memorial Institute. +# Copyright 2020, Battelle Memorial Institute. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -41,13 +41,15 @@ import logging import os +import traceback + import psutil import sys from configparser import ConfigParser -from ..utils.frozendict import FrozenDict from urllib.parse import urlparse -__version__ = '7.0' +from ..utils.frozendict import FrozenDict +__version__ = '8.0' _log = logging.getLogger(__name__) @@ -77,7 +79,7 @@ def get_home(): vhome = vhome[:-1] if os.environ.get('VOLTTRON_HOME') is not None: log = logging.getLogger('volttron') - log.warn("Removing / from the end of VOLTTRON_HOME") + log.warning("Removing / from the end of VOLTTRON_HOME") os.environ['VOLTTRON_HOME'] = vhome return vhome @@ -91,15 +93,43 @@ def get_config_path() -> str: return os.path.join(get_home(), "config") -def get_address(): +def get_address(verify_listening=False): """Return the VIP address of the platform - If the VOLTTRON_VIP_ADDR environment variable is set, it used. + If the VOLTTRON_VIP_ADDR environment variable is set, it is used to connect to. Otherwise, it is derived from get_home().""" address = os.environ.get('VOLTTRON_VIP_ADDR') if not address: + # Connect via virtual unix socket if linux platform (mac doesn't have @ in it) abstract = '@' if sys.platform.startswith('linux') else '' address = 'ipc://%s%s/run/vip.socket' % (abstract, get_home()) + import zmq.green as zmqgreen + import zmq + # The following block checks to make sure that we can + # connect to the zmq based upon the ipc address. + # + # The zmq.sock.bind() will raise an error because the + # address is already bound (therefore volttron is running there) + sock = None + try: + # TODO: We should not just do the connection test when verfiy_listening is True but always + # Though we leave this here because we have backward compatible unit tests that require + # the get_address to not have somethiing bound to the address. + if verify_listening: + ctx = zmqgreen.Context.instance() + sock = ctx.socket(zmq.PUB) # or SUB - does not make any difference + sock.bind(address) + raise ValueError("Unable to connect to vip address " + f"make sure VOLTTRON_HOME: {get_home()} " + "is set properly") + except zmq.error.ZMQError as e: + print(f"Zmq error was {e}\n{traceback.format_exc()}") + finally: + try: + sock.close() + except AttributeError as e: # Raised when sock is None type + pass + return address @@ -254,3 +284,19 @@ def build_vip_address_string(vip_root, serverkey, publickey, secretkey): raise ValueError('Invalid vip root specified!') return root + + +def update_volttron_script_path(path: str) -> str: + """ + Assumes that path's current working directory is in the root directory of the volttron codebase. + + Prepend 'VOLTTRON_ROOT' to internal volttron script if 'VOLTTRON_ROOT' is set and return new path; + otherwise, return original path + :param path: relative path to the internal volttron script + :return: updated path to volttron script + """ + if os.environ['VOLTTRON_ROOT']: + args = path.split("/") + path = f"{os.path.join(os.environ['VOLTTRON_ROOT'], *args)}" + _log.debug(f"Path to script: {path}") + return path diff --git a/volttron/platform/agent/__init__.py b/volttron/platform/agent/__init__.py index 63c3a0d308..e23a02d27b 100644 --- a/volttron/platform/agent/__init__.py +++ b/volttron/platform/agent/__init__.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- {{{ # vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et: # -# Copyright 2019, Battelle Memorial Institute. +# Copyright 2020, Battelle Memorial Institute. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/volttron/platform/agent/bacnet_proxy_reader.py b/volttron/platform/agent/bacnet_proxy_reader.py index b8dd2d98f8..35da88500c 100644 --- a/volttron/platform/agent/bacnet_proxy_reader.py +++ b/volttron/platform/agent/bacnet_proxy_reader.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- {{{ # vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et: # -# Copyright 2019, Battelle Memorial Institute. +# Copyright 2020, Battelle Memorial Institute. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -313,7 +313,7 @@ def _build_query_map_for_type(self, object_type, index): key = '{}-{}'.format(index, 'stateText') query_map[key] = [object_type, index, "stateText"] - if object_type != 'multiSTateInput': + if object_type != 'multiStateInput': key = '{}-{}'.format(index, "relinquishDefault") query_map[key] = [object_type, index, "relinquishDefault"] elif object_type == 'loop': diff --git a/volttron/platform/agent/base.py b/volttron/platform/agent/base.py index 31cc7a236c..e92e097df7 100644 --- a/volttron/platform/agent/base.py +++ b/volttron/platform/agent/base.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- {{{ # vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et: # -# Copyright 2019, Battelle Memorial Institute. +# Copyright 2020, Battelle Memorial Institute. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/volttron/platform/agent/base_aggregate_historian.py b/volttron/platform/agent/base_aggregate_historian.py index 40e3e8eaae..cedca1bf1b 100644 --- a/volttron/platform/agent/base_aggregate_historian.py +++ b/volttron/platform/agent/base_aggregate_historian.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- {{{ # vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et: # -# Copyright 2019, Battelle Memorial Institute. +# Copyright 2020, Battelle Memorial Institute. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -334,13 +334,13 @@ def collect_aggregate_data(self, collection_time, agg_time_period, data['aggregation_type'].lower(), agg_time_period)) if not aggregate_topic_id: - _log.warn("Name:{} Type: {} Aggregation Period: {} --" - "No such aggregate topic found. This could have happened if the " - "configuration of the agent changed after the last schedule for data collection" - " Stopping collection for the outdated configuration".format( - data['aggregation_topic_name'].lower(), - data['aggregation_type'].lower(), - agg_time_period)) + _log.warning("Name:{} Type: {} Aggregation Period: {} --" + "No such aggregate topic found. This could have happened if the " + "configuration of the agent changed after the last schedule for data collection" + " Stopping collection for the outdated configuration".format( + data['aggregation_topic_name'].lower(), + data['aggregation_type'].lower(), + agg_time_period)) schedule_next = False break # break out of for loop and move to finally block @@ -355,12 +355,11 @@ def collect_aggregate_data(self, collection_time, agg_time_period, topic_ids = list(topic_map.values()) _log.debug("topic ids loaded {} ".format(topic_ids)) else: - _log.warn( - "Skipping recording of aggregate data for {topic} " - "between {start_time} and {end_time} as ".format( - topic=topic_pattern, - start_time=start_time, - end_time=end_time)) + _log.warning("Skipping recording of aggregate data for {topic} " + "between {start_time} and {end_time} as ".format( + topic=topic_pattern, + start_time=start_time, + end_time=end_time)) return agg_value, count = self.collect_aggregate( @@ -369,24 +368,18 @@ def collect_aggregate_data(self, collection_time, agg_time_period, start_time, end_time) if count == 0: - _log.warn( - "No records found for topic {topic} between " - "{start_time} and {end_time}".format( - topic=topic_pattern if topic_pattern else - data['topic_names'], - start_time=start_time, - end_time=end_time)) + _log.warning("No records found for topic {topic} between {start_time} and {end_time}".format( + topic=topic_pattern if topic_pattern else + data['topic_names'], + start_time=start_time, + end_time=end_time)) elif count < data.get('min_count', 0): - _log.warn( - "Skipping recording of aggregate data for {topic} " - "between {start_time} and {end_time} as number of " - "records is less than minimum allowed(" - "{count})".format( - topic=topic_pattern if topic_pattern - else data['topic_names'], - start_time=start_time, - end_time=end_time, - count=data.get('min_count', 0))) + _log.warning("Skipping recording of aggregate data for {topic} between {start_time} and {end_time}" + " as number of records is less than minimum allowed({count})".format( + topic=topic_pattern if topic_pattern else data['topic_names'], + start_time=start_time, + end_time=end_time, + count=data.get('min_count', 0))) else: _log.debug("data is {} aggg_time_period is {}".format(data, agg_time_period)) _log.debug(" topic id map {}".format(self.agg_topic_id_map)) diff --git a/volttron/platform/agent/base_historian.py b/volttron/platform/agent/base_historian.py index c35dae02d4..1d25101ac1 100644 --- a/volttron/platform/agent/base_historian.py +++ b/volttron/platform/agent/base_historian.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- {{{ # vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et: # -# Copyright 2019, Battelle Memorial Institute. +# Copyright 2020, Battelle Memorial Institute. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -316,6 +316,7 @@ def add_timing_data_to_header(headers, agent_id, phase): STATUS_KEY_CACHE_COUNT = "cache_count" STATUS_KEY_PUBLISHING = "publishing" STATUS_KEY_CACHE_FULL = "cache_full" +STATUS_KEY_TIME_ERROR = "records_with_invalid_timestamp" class BaseHistorianAgent(Agent): @@ -368,6 +369,8 @@ def __init__(self, custom_topics={}, device_data_filter={}, all_platforms=False, + time_tolerance=None, + time_tolerance_topics=None, **kwargs): super(BaseHistorianAgent, self).__init__(**kwargs) @@ -416,6 +419,14 @@ def __init__(self, STATUS_KEY_CACHE_FULL: False } self._all_platforms = bool(all_platforms) + self._time_tolerance = float(time_tolerance) if time_tolerance else None + if self._time_tolerance is not None: + if time_tolerance_topics is None: + time_tolerance_topics = ["devices"] + elif not isinstance(time_tolerance_topics, list): + raise ValueError(f"time_tolerance_topic should a list of topics. Got value({time_tolerance_topics}) of " + f"type {type(time_tolerance_topics)}") + self._time_tolerance_topics = time_tolerance_topics self._default_config = { "retry_period":self._retry_period, @@ -429,13 +440,15 @@ def __init__(self, "capture_device_data": capture_device_data, "capture_log_data": capture_log_data, "capture_analysis_data": capture_analysis_data, - "capture_record_data": capture_record_data, + "capture_record_data": capture_record_data, "message_publish_count": self._message_publish_count, "storage_limit_gb": storage_limit_gb, "history_limit_days": history_limit_days, "custom_topics": custom_topics, "device_data_filter": device_data_filter, - "all_platforms": self._all_platforms + "all_platforms": self._all_platforms, + "time_tolerance": self._time_tolerance, + "time_tolerance_topics": self._time_tolerance_topics } self.vip.config.set_default("config", self._default_config) @@ -533,9 +546,21 @@ def _configure(self, config_name, action, contents): all_platforms = bool(config.get("all_platforms", False)) + time_tolerance = config.get("time_tolerance") + time_tolerance_topics = config.get("time_tolerance_topics") + time_tolerance = float(time_tolerance) if time_tolerance else None + if time_tolerance is not None: + if time_tolerance_topics is None: + time_tolerance_topics = ["devices"] + elif not isinstance(time_tolerance_topics, list): + raise ValueError( + f"time_tolerance_topic should a list of topics. Got value({time_tolerance_topics}) of " + f"type {type(time_tolerance_topics)}") + self._time_tolerance_topics = time_tolerance_topics + except ValueError as e: self._backup_storage_report = 0.9 - _log.error("Failed to load base historian settings. Settings not applied!") + _log.exception("Failed to load base historian settings. Settings not applied!") return query = Query(self.core) @@ -560,6 +585,8 @@ def _configure(self, config_name, action, contents): self._all_platforms = all_platforms self._readonly = readonly self._message_publish_count = message_publish_count + self._time_tolerance = time_tolerance + self._time_tolerance_topics = time_tolerance_topics custom_topics_list = [] for handler, topic_list in config.get("custom_topics", {}).items(): @@ -746,6 +773,14 @@ def get_renamed_topic(self, input_topic): _log.debug("Output topic after replacements {}".format(output_topic)) return output_topic + def does_time_exceed_tolerance(self, topic, utc_timestamp): + if self._time_tolerance: + # If time tolerance is set, and it needs to be checked for this topic + # compare the incoming timestamp with the current time. + if topic.startswith(tuple(self._time_tolerance_topics)): + return abs(get_aware_utc_now() - utc_timestamp).seconds > self._time_tolerance + return False + def _capture_record_data(self, peer, sender, bus, topic, headers, message): # _log.debug('Capture record data {}'.format(topic)) @@ -755,6 +790,7 @@ def _capture_record_data(self, peer, sender, bus, topic, headers, timestamp = get_aware_utc_now() if timestamp_string is not None: timestamp, my_tz = process_timestamp(timestamp_string, topic) + headers['time_error'] = self.does_time_exceed_tolerance(topic, timestamp) if sender == 'pubsub.compat': message = compat.unpack_legacy_message(headers, message) @@ -813,6 +849,7 @@ def _capture_log_data(self, peer, sender, bus, topic, headers, message): readings = [(get_aware_utc_now(), readings)] elif isinstance(readings[0], str): my_ts, my_tz = process_timestamp(readings[0], topic) + headers['time_error'] = self.does_time_exceed_tolerance(topic, my_ts) readings = [(my_ts, readings[1])] if tz: meta['tz'] = tz @@ -907,6 +944,8 @@ def _capture_data(self, peer, sender, bus, topic, headers, message, timestamp = get_aware_utc_now() if timestamp_string is not None: timestamp, my_tz = process_timestamp(timestamp_string, topic) + headers['time_error'] = self.does_time_exceed_tolerance(topic, timestamp) + try: # 2.0 agents compatability layer makes sender == pubsub.compat so # we can do the proper thing when it is here @@ -1004,9 +1043,10 @@ def _capture_actuator_data(self, topic, headers, message, match): @staticmethod def _get_status_from_context(context): status = STATUS_GOOD - if (context.get("backlogged") or - context.get("cache_full") or - not context.get("publishing")): + if (context.get(STATUS_KEY_BACKLOGGED) or + context.get(STATUS_KEY_CACHE_FULL) or + not context.get(STATUS_KEY_PUBLISHING) or + context.get(STATUS_KEY_TIME_ERROR)): status = STATUS_BAD return status @@ -1097,18 +1137,26 @@ def _do_process_loop(self): # We wake the thread after a configuration change by passing a None to the queue. # Backup anything new before checking for a stop. - cache_full = backupdb.backup_new_data((x for x in new_to_publish if x is not None)) + cache_full = backupdb.backup_new_data(new_to_publish, bool(self._time_tolerance)) backlog_count = backupdb.get_backlog_count() if cache_full: self._send_alert({STATUS_KEY_CACHE_FULL: cache_full, STATUS_KEY_BACKLOGGED: True, - STATUS_KEY_CACHE_COUNT: backlog_count}, + STATUS_KEY_CACHE_COUNT: backlog_count, + STATUS_KEY_TIME_ERROR: backupdb.time_error_records}, "historian_cache_full") else: old_backlog_state = self._current_status_context[STATUS_KEY_BACKLOGGED] - self._update_status({STATUS_KEY_CACHE_FULL: cache_full, - STATUS_KEY_BACKLOGGED: old_backlog_state and backlog_count > 0, - STATUS_KEY_CACHE_COUNT: backlog_count}) + state = { + STATUS_KEY_CACHE_FULL: cache_full, + STATUS_KEY_BACKLOGGED: old_backlog_state and backlog_count > 0, + STATUS_KEY_CACHE_COUNT: backlog_count, + STATUS_KEY_TIME_ERROR: backupdb.time_error_records} + self._update_status(state) + if backupdb.time_error_records: + self._send_alert( + state, + "Historian received records with invalid timestamp. Please check records in time_error table.") # Check for a stop for reconfiguration. if self._stop_process_loop: @@ -1394,14 +1442,17 @@ def __init__(self, owner, backup_storage_limit_gb, backup_storage_report, self._backup_cache = {} # Count of records in cache. self._record_count = 0 + self.time_error_records = False self._meta_data = defaultdict(dict) self._owner = weakref.ref(owner) self._backup_storage_limit_gb = backup_storage_limit_gb self._backup_storage_report = backup_storage_report self._connection = None self._setupdb(check_same_thread) + self._dupe_ids = [] + self._unique_ids = [] - def backup_new_data(self, new_publish_list): + def backup_new_data(self, new_publish_list, time_tolerance_check): """ :param new_publish_list: An iterable of records to cache to disk. :type new_publish_list: iterable @@ -1410,8 +1461,10 @@ def backup_new_data(self, new_publish_list): """ #_log.debug("Backing up unpublished values.") c = self._connection.cursor() - + self.time_error_records = False # will update at the end of the method for item in new_publish_list: + if item is None: + continue source = item['source'] topic = item['topic'] meta = item.get('meta', {}) @@ -1438,21 +1491,47 @@ def backup_new_data(self, new_publish_list): (source, topic_id, name, value)) meta_dict[name] = value - for timestamp, value in readings: - if timestamp is None: - timestamp = get_aware_utc_now() - try: - c.execute( - '''INSERT INTO outstanding - values(NULL, ?, ?, ?, ?, ?)''', - (timestamp, source, topic_id, dumps(value), dumps(headers))) - self._record_count += 1 - except sqlite3.IntegrityError: - # In the case where we are upgrading an existing installed historian the - # unique constraint may still exist on the outstanding database. - # Ignore this case. - _log.warning(f"sqlite3.Integrity error -- {e}") - pass + # Check outside loop so that we do the check inside loop only if necessary + if time_tolerance_check: + for timestamp, value in readings: + if timestamp is None: + timestamp = get_aware_utc_now() + elif headers["time_error"]: + _log.warning(f"Found data with timestamp {timestamp} that is out of configured tolerance ") + c.execute( + '''INSERT INTO time_error + values(NULL, ?, ?, ?, ?, ?)''', + (timestamp, source, topic_id, dumps(value), dumps(headers))) + self.time_error_records = True + continue # continue to the next record. don't record in outstanding + try: + c.execute( + '''INSERT INTO outstanding + values(NULL, ?, ?, ?, ?, ?)''', + (timestamp, source, topic_id, dumps(value), dumps(headers))) + self._record_count += 1 + except sqlite3.IntegrityError as e: + # In the case where we are upgrading an existing installed historian the + # unique constraint may still exist on the outstanding database. + # Ignore this case. + _log.warning(f"sqlite3.Integrity error -- {e}") + pass + else: + for timestamp, value in readings: + if timestamp is None: + timestamp = get_aware_utc_now() + try: + c.execute( + '''INSERT INTO outstanding + values(NULL, ?, ?, ?, ?, ?)''', + (timestamp, source, topic_id, dumps(value), dumps(headers))) + self._record_count += 1 + except sqlite3.IntegrityError as e: + # In the case where we are upgrading an existing installed historian the + # unique constraint may still exist on the outstanding database. + # Ignore this case. + _log.warning(f"sqlite3.Integrity error -- {e}") + pass cache_full = False if self._backup_storage_limit_gb is not None: @@ -1476,39 +1555,66 @@ def free_count(): # page count doesnt update even after deleting all records # and record count becomes zero. If we have deleted all record # exit. - _log.debug(f"record count before check is {self._record_count} page count is {p}" - f" free count is {f}") + # _log.debug(f"record count before check is {self._record_count} page count is {p}" + # f" free count is {f}") # max_pages gets updated based on inserts but freelist_count doesn't # enter delete loop based on page_count min_free_pages = p - self.max_pages + error_record_count = 0 + get_error_count_from_db = True while p > self.max_pages: cache_full = True - c.execute( - '''DELETE FROM outstanding - WHERE ROWID IN - (SELECT ROWID FROM outstanding - ORDER BY ROWID ASC LIMIT 100)''') - #self._connection.commit() - if self._record_count < c.rowcount: - self._record_count = 0 + if time_tolerance_check and get_error_count_from_db: + # if time_tolerance_check is enabled and this the first time + # we get into this loop, get the count from db + c.execute("SELECT count(ts) from time_error") + error_record_count = c.fetchone()[0] + get_error_count_from_db = False # after this we will reduce count as we delete + if error_record_count > 0: + # if time_error table has records, try deleting those first before outstanding table + _log.info("cache size exceeded limit Deleting data from time_error") + c.execute( + '''DELETE FROM time_error + WHERE ROWID IN + (SELECT ROWID FROM time_error + ORDER BY ROWID ASC LIMIT 100)''') + error_record_count -= c.rowcount else: - self._record_count -= c.rowcount - p = page_count() #page count doesn't reflect delete without commit - f = free_count() # freelist count does. So using that to break from loop + # error record count is 0, sp set time_error_records to False + self.time_error_records = False + _log.info("cache size exceeded limit Deleting data from outstanding") + c.execute( + '''DELETE FROM outstanding + WHERE ROWID IN + (SELECT ROWID FROM outstanding + ORDER BY ROWID ASC LIMIT 100)''') + if self._record_count < c.rowcount: + self._record_count = 0 + else: + self._record_count -= c.rowcount + p = page_count() # page count doesn't reflect delete without commit + f = free_count() # freelist count does. So using that to break from loop if f >= min_free_pages: break _log.debug(f" Cleaning cache since we are over the limit. " f"After delete of 100 records from cache" - f" record count is {self._record_count} page count is {p} freelist count is{f}") + f" record count is {self._record_count} time_error record count is {error_record_count} " + f"page count is {p} freelist count is{f}") - except Exception as e: - _log.warning(f"Exception when check page count and deleting{e}") + except Exception: + _log.exception(f"Exception when checking page count and deleting") try: self._connection.commit() - except Exception as e: - _log.warning(f"Exception in committing after back db storage {e}") - + except Exception: + _log.exception(f"Exception in committing after back db storage") + + if time_tolerance_check and not self.time_error_records: + # No time error records in this batch. Check if there are records from earlier inserts + # that admin hasn't dealt with yet. + c.execute("SELECT ROWID FROM time_error LIMIT 1") + if c.fetchone(): + self.time_error_records = True return cache_full def remove_successfully_published(self, successful_publishes, @@ -1518,41 +1624,44 @@ def remove_successfully_published(self, successful_publishes, If None is found in `successful_publishes` we assume that everything was published. - :param successful_publishes: List of records that was published. + :param successful_publishes: Set of records that was published. :param submit_size: Number of things requested from previous call to :py:meth:`get_outstanding_to_publish` - :type successful_publishes: list + :type successful_publishes: set :type submit_size: int """ - #_log.debug("Cleaning up successfully published values.") c = self._connection.cursor() - - if None in successful_publishes: - c.execute('''DELETE FROM outstanding - WHERE ROWID IN - (SELECT ROWID FROM outstanding - ORDER BY ts LIMIT ?)''', (submit_size,)) - if self._record_count < c.rowcount: - self._record_count = 0 + try: + if None in successful_publishes: + c.executemany('''DELETE FROM outstanding + WHERE id = ?''', + ((_id,) for _id in self._unique_ids)) + if self._record_count < c.rowcount: + self._record_count = 0 + else: + self._record_count -= len(self._unique_ids) else: - self._record_count -= c.rowcount - else: - temp = list(successful_publishes) - temp.sort() - c.executemany('''DELETE FROM outstanding - WHERE id = ?''', - ((_id,) for _id in - successful_publishes)) - self._record_count -= len(temp) + temp = list(successful_publishes) + temp.sort() + c.executemany('''DELETE FROM outstanding + WHERE id = ?''', + ((_id,) for _id in + successful_publishes)) + self._record_count -= len(temp) + finally: + # if we don't clear these attributes on every publish, we could possibly delete a non-existing record on the next publish + self._unique_ids.clear() + self._dupe_ids.clear() self._connection.commit() def get_outstanding_to_publish(self, size_limit): """ - Retrieve up to `size_limit` records from the cache. + Retrieve up to `size_limit` records from the cache. Guarantees a unique list of records, + where unique is defined as (topic, timestamp). :param size_limit: Max number of records to retrieve. :type size_limit: int @@ -1561,9 +1670,9 @@ def get_outstanding_to_publish(self, size_limit): """ # _log.debug("Getting oldest outstanding to publish.") c = self._connection.cursor() - c.execute('select * from outstanding order by ts limit ?', - (size_limit,)) + c.execute('select * from outstanding order by ts limit ?', (size_limit,)) results = [] + unique_records = set() for row in c: _id = row[0] timestamp = row[1] @@ -1572,30 +1681,43 @@ def get_outstanding_to_publish(self, size_limit): value = loads(row[4]) headers = {} if row[5] is None else loads(row[5]) meta = self._meta_data[(source, topic_id)].copy() + topic = self._backup_cache[topic_id] + + # check for duplicates before appending row to results + if (topic_id, timestamp) in unique_records: + _log.debug(f"Found duplicate from cache: {row}") + self._dupe_ids.append(_id) + continue + unique_records.add((topic_id, timestamp)) + self._unique_ids.append(_id) + results.append({'_id': _id, 'timestamp': timestamp.replace(tzinfo=pytz.UTC), 'source': source, - 'topic': self._backup_cache[topic_id], + 'topic': topic, 'value': value, 'headers': headers, 'meta': meta}) c.close() - # If we were backlogged at startup and our initial estimate was # off this will correct it. if len(results) < size_limit: self._record_count = len(results) + # if we have duplicates, we must count them as part of the "real" total of _record_count + if self._dupe_ids: + _log.debug(f"Adding duplicates to the total record count: {self._dupe_ids}") + self._record_count += len(self._dupe_ids) + return results def get_backlog_count(self): """ - Retrieve the current number of records in the cashe. + Retrieve the current number of records in the cache. """ return self._record_count - def close(self): self._connection.close() self._connection = None @@ -1680,6 +1802,21 @@ def _setupdb(self, check_same_thread): c.execute('''CREATE INDEX IF NOT EXISTS outstanding_ts_index ON outstanding (ts)''') + c.execute("SELECT name FROM sqlite_master WHERE type='table' " + "AND name='time_error';") + + if c.fetchone() is None: + _log.debug("Configuring backup DB for the first time.") + self._connection.execute('''PRAGMA auto_vacuum = FULL''') + self._connection.execute('''CREATE TABLE time_error + (id INTEGER PRIMARY KEY, + ts timestamp NOT NULL, + source TEXT NOT NULL, + topic_id INTEGER NOT NULL, + value_string TEXT NOT NULL, + header_string TEXT)''') + c.execute('''CREATE INDEX time_error_ts_index ON time_error (ts)''') + c.execute("SELECT name FROM sqlite_master WHERE type='table' " "AND name='metadata';") diff --git a/volttron/platform/agent/base_market_agent/__init__.py b/volttron/platform/agent/base_market_agent/__init__.py index 67b8ffd185..51c0d9b5f9 100644 --- a/volttron/platform/agent/base_market_agent/__init__.py +++ b/volttron/platform/agent/base_market_agent/__init__.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- {{{ # vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et: # -# Copyright 2019, Battelle Memorial Institute. +# Copyright 2020, Battelle Memorial Institute. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -57,10 +57,10 @@ class MarketAgent(Agent): an auction market. By inheriting from this agent all the remote communication with the MarketService is handled and the sub-class can be unconcerned with those details. """ - def __init__(self, verbose_logging = True, **kwargs): + def __init__(self, verbose_logging=True, timeout=60, **kwargs): super(MarketAgent, self).__init__(**kwargs) _log.debug("vip_identity: " + self.core.identity) - rpc_proxy = RpcProxy(self.vip.rpc.call, verbose_logging) + rpc_proxy = RpcProxy(self.vip.rpc.call, verbose_logging=verbose_logging, timeout=timeout) self.registrations = RegistrationManager(rpc_proxy) self.verbose_logging = verbose_logging diff --git a/volttron/platform/agent/base_market_agent/buy_sell.py b/volttron/platform/agent/base_market_agent/buy_sell.py index 57ffa8f81f..606d5e3a32 100644 --- a/volttron/platform/agent/base_market_agent/buy_sell.py +++ b/volttron/platform/agent/base_market_agent/buy_sell.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- {{{ # vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et: # -# Copyright 2019, Battelle Memorial Institute. +# Copyright 2020, Battelle Memorial Institute. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/volttron/platform/agent/base_market_agent/error_codes.py b/volttron/platform/agent/base_market_agent/error_codes.py index 67a6449203..2ac5bbd067 100644 --- a/volttron/platform/agent/base_market_agent/error_codes.py +++ b/volttron/platform/agent/base_market_agent/error_codes.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- {{{ # vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et: # -# Copyright 2019, Battelle Memorial Institute. +# Copyright 2020, Battelle Memorial Institute. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/volttron/platform/agent/base_market_agent/market_registration.py b/volttron/platform/agent/base_market_agent/market_registration.py index 8797cfcbb8..def2e46872 100644 --- a/volttron/platform/agent/base_market_agent/market_registration.py +++ b/volttron/platform/agent/base_market_agent/market_registration.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- {{{ # vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et: # -# Copyright 2019, Battelle Memorial Institute. +# Copyright 2020, Battelle Memorial Institute. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/volttron/platform/agent/base_market_agent/offer.py b/volttron/platform/agent/base_market_agent/offer.py index 59ee4e7803..f3acc2b571 100644 --- a/volttron/platform/agent/base_market_agent/offer.py +++ b/volttron/platform/agent/base_market_agent/offer.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- {{{ # vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et: # -# Copyright 2019, Battelle Memorial Institute. +# Copyright 2020, Battelle Memorial Institute. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/volttron/platform/agent/base_market_agent/point.py b/volttron/platform/agent/base_market_agent/point.py index e53c660c14..b9fa08d400 100644 --- a/volttron/platform/agent/base_market_agent/point.py +++ b/volttron/platform/agent/base_market_agent/point.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- {{{ # vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et: # -# Copyright 2019, Battelle Memorial Institute. +# Copyright 2020, Battelle Memorial Institute. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -47,46 +47,47 @@ class Point(tuple): _fields = ('quantity', 'price') def __new__(_cls, quantity, price): - 'Create new instance of Point(quantity, price)' - if (quantity < 0 or quantity is None): - raise ValueError('The quantity provided ({}) is an invalid value.'.format(quantity)) - if (price < 0 or price is None): - raise ValueError('The price provided ({}) is an invalid value.'.format(price)) + """Create new instance of Point(quantity, price)""" +# if (quantity < 0 or quantity is None): +# raise ValueError('The quantity provided ({}) is an invalid value.'.format(quantity)) +# if (price < 0 or price is None): +# raise ValueError('The price provided ({}) is an invalid value.'.format(price)) + # Catch exception to float_quantity = float(quantity) float_price = float(price) return _tuple.__new__(_cls, (float_quantity, float_price)) @classmethod def _make(cls, iterable, new=tuple.__new__, len=len): - 'Make a new Point object from a sequence or iterable' + """Make a new Point object from a sequence or iterable""" result = new(cls, iterable) if len(result) != 2: raise TypeError('Expected 2 arguments, got %d' % len(result)) return result def __repr__(self): - 'Return a nicely formatted representation string' + """Return a nicely formatted representation string""" return 'Point(quantity=%r, price=%r)' % self def _asdict(self): - 'Return a new OrderedDict which maps field names to their values' + """Return a new OrderedDict which maps field names to their values""" return OrderedDict(zip(self._fields, self)) def _replace(_self, **kwds): - 'Return a new Point object replacing specified fields with new values' + """Return a new Point object replacing specified fields with new values""" result = _self._make(map(kwds.pop, ('quantity', 'price'), _self)) if kwds: - raise ValueError('Got unexpected field names: %r' % list(kwds.keys())) + raise ValueError('Got unexpected field names: %r' % kwds.keys()) return result def __getnewargs__(self): - 'Return self as a plain tuple. Used by copy and pickle.' + """Return self as a plain tuple. Used by copy and pickle.""" return tuple(self) __dict__ = _property(_asdict) def __getstate__(self): - 'Exclude the OrderedDict from pickling' + """Exclude the OrderedDict from pickling""" pass def tuppleize(self): diff --git a/volttron/platform/agent/base_market_agent/poly_line.py b/volttron/platform/agent/base_market_agent/poly_line.py index dfb2181663..943e180587 100644 --- a/volttron/platform/agent/base_market_agent/poly_line.py +++ b/volttron/platform/agent/base_market_agent/poly_line.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- {{{ # vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et: # -# Copyright 2019, Battelle Memorial Institute. +# Copyright 2020, Battelle Memorial Institute. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -62,12 +62,12 @@ def add(self, point): return doSort = False # if len(self.points) > 0 and point.y < self.points[-1].y: - if len(self.points) > 0 and point.x < self.points[-1].x: + if len(self.points) > 0: doSort = True self.points.append(point) if doSort: - self.points.sort() + self.points.sort(key=lambda tup: tup[1], reverse=True) self.xs = None self.ys = None if point.x is not None and point.y is not None: @@ -108,7 +108,7 @@ def sum(x1, x2): return x1 return x1 + x2 - def x(self, y, left=None, right=None): + def x(self, y): if not self.points: return None if y is None: @@ -118,10 +118,10 @@ def x(self, y, left=None, right=None): # ascending = self.ys[0]= p1_qmin and p2_qmax >= p1_qmax: + quantity = np.mean([point[0] for point in pl_1]) + price = np.mean([point[1] for point in pl_1]) + + elif p2_qmin <= p1_qmin and p2_qmin <= p1_qmax: + quantity = p2_qmax + price = p1_pmax + + else: + price = None + quantity = None + return quantity, price + @staticmethod + def line_intersection(line1, line2): + x1x3 = line1[0][0]-line2[0][0] + y3y4 = line2[0][1]-line2[1][1] + y1y3 = line1[0][1]-line2[0][1] + y1y2 = line1[0][1]-line1[1][1] + x3x4 = line2[0][0]-line2[1][0] + x1x2 = line1[0][0]-line1[1][0] + y1y3 = line1[0][1]-line2[0][1] + if x1x2*y3y4 - y1y2*x3x4 == 0: + return None + t = (x1x3*y3y4 - y1y3*x3x4)/(x1x2*y3y4 - y1y2*x3x4) + # u=(x1x2*y1y3-y1y2*x1x3)/(x1x2*y3y4-y1y2*x3x4) + x = line1[0][0] + t*(line1[1][0] - line1[0][0]) + y = line1[0][1] + t*(line1[1][1] - line1[0][1]) + # if x>max(line1[0][0],line1[1][0]) or x>max(line2[0][0],line2[1][0]) or xmax(line1[0][1],line1[1][1]) or y>max(line2[0][1],line2[1][1]) or y max(line1[0][1], line1[1][1]): + return min(line1[0][0], line1[1][0]), y + if y > max(line2[0][1], line2[1][1]): + return min(line2[0][0], line2[1][0]), y + if y < min(line1[0][1], line1[1][1]): + return max(line1[0][0], line1[1][0]), y + if y < min(line2[0][1], line2[1][1]): + return max(line2[0][0], line2[1][0]), y + return x, y + + @staticmethod + def poly_intersection(poly1, poly2): + poly1 = poly1.points + poly2 = poly2.points + for i, p1_first_point in enumerate(poly1[:-1]): + p1_second_point = poly1[i + 1] + + for j, p2_first_point in enumerate(poly2[:-1]): + p2_second_point = poly2[j + 1] + + if PolyLine.line_intersection((p1_first_point, p1_second_point), (p2_first_point, p2_second_point)): + x, y = PolyLine.line_intersection((p1_first_point, p1_second_point), (p2_first_point, p2_second_point)) + return x, y + + return False + @staticmethod def compare(demand_curve, supply_curve): aux = {} @@ -276,10 +348,18 @@ def compare(demand_curve, supply_curve): demand_min_quantity = demand_curve.min_x() supply_max_quantity = supply_curve.max_x() supply_min_quantity = supply_curve.min_x() - aux['Sn,Dn'] = cmp(supply_min_quantity,demand_min_quantity) - aux['Sn,DX'] = cmp(supply_min_quantity,demand_max_quantity) - aux['Sx,Dn'] = cmp(supply_max_quantity,demand_min_quantity) - aux['Sx,DX'] = cmp(supply_max_quantity,demand_max_quantity) - return aux + demand_max_price = demand_curve.max_y() + demand_min_price = demand_curve.min_y() + supply_max_price = supply_curve.max_y() + supply_min_price = supply_curve.min_y() + aux['SQn,DQn'] = (supply_min_quantity > demand_min_quantity) - (supply_min_quantity < demand_min_quantity) + aux['SQn,DQx'] = (supply_min_quantity > demand_max_quantity) - (supply_min_quantity < demand_max_quantity) + aux['SQx,DQn'] = (supply_max_quantity > demand_min_quantity) - (supply_max_quantity < demand_min_quantity) + aux['SQx,DQx'] = (supply_max_quantity > demand_max_quantity) - (supply_max_quantity < demand_max_quantity) + aux['SPn,DPn'] = (supply_min_price > demand_min_price) - (supply_min_price < demand_min_price) + aux['SPn,DPx'] = (supply_min_price > demand_max_price) - (supply_min_price < demand_max_price) + aux['SPx,DPn'] = (supply_max_price > demand_min_price) - (supply_max_price < demand_min_price) + aux['SPx,DPx'] = (supply_max_price > demand_max_price) - (supply_max_price < demand_max_price) + return aux diff --git a/volttron/platform/agent/base_market_agent/poly_line_factory.py b/volttron/platform/agent/base_market_agent/poly_line_factory.py index 590db896c0..56a5606275 100644 --- a/volttron/platform/agent/base_market_agent/poly_line_factory.py +++ b/volttron/platform/agent/base_market_agent/poly_line_factory.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- {{{ # vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et: # -# Copyright 2019, Battelle Memorial Institute. +# Copyright 2020, Battelle Memorial Institute. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -45,6 +45,15 @@ _log = logging.getLogger(__name__) utils.setup_logging() + +def remove(duplicate): + final_list = [] + for num in duplicate: + if num not in final_list: + final_list.append(num) + return final_list + + class PolyLineFactory(object): @staticmethod def combine(lines, increment): @@ -86,19 +95,47 @@ def combine(lines, increment): for y in ys: xt = None for line in lines: - x = line.x(y, left=np.nan) - + x = line.x(y) + # print x, y if x is not None: xt = x if xt is None else xt + x composite.add(Point(xt, y)) return composite + @staticmethod + def combine_withoutincrement(lines): + + # we return a new PolyLine which is a composite (summed horizontally) of inputs + composite = PolyLine() + if len(lines) < 2: + if isinstance(lines[0], list): + for point in lines[0]: + composite.add(Point(point[0], point[1])) + return composite + return lines[0] + # find the range defined by the curves + ys=[] + for l in lines: + ys=ys+l.vectorize()[1] + + ys = remove(ys) + + ys.sort(reverse=True) + for y in ys: + xt = None + for line in lines: + x = line.x(y) + if x is not None: + xt = x if xt is None else xt + x + composite.add(Point(xt, y)) + return composite + @staticmethod def fromTupples(points): - polyLine = PolyLine() + poly_line = PolyLine() for p in points: if p is not None and len(p) == 2: - polyLine.add(Point(p[0], p[1])) - return polyLine + poly_line.add(Point(p[0], p[1])) + return poly_line diff --git a/volttron/platform/agent/base_market_agent/registration_manager.py b/volttron/platform/agent/base_market_agent/registration_manager.py index c05cf9bbf3..f0d9b68af5 100644 --- a/volttron/platform/agent/base_market_agent/registration_manager.py +++ b/volttron/platform/agent/base_market_agent/registration_manager.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- {{{ # vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et: # -# Copyright 2019, Battelle Memorial Institute. +# Copyright 2020, Battelle Memorial Institute. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -37,6 +37,7 @@ # }}} import logging +import gevent from volttron.platform.agent import utils from volttron.platform.agent.base_market_agent.error_codes import NOT_FORMED @@ -45,6 +46,9 @@ _log = logging.getLogger(__name__) utils.setup_logging() +GREENLET_ENABLED = False + + class RegistrationManager(object): """ The ReservationManager manages a list of MarketReservations for the MarketAgents. @@ -70,33 +74,49 @@ def make_offer(self, market_name, buyer_seller, curve): result = False error_message = "Market: {} {} was not found in the local list of markets".format(market_name, buyer_seller) for registration in self.registrations: - if (registration.market_name == market_name): + if registration.market_name == market_name: result, error_message = registration.make_offer(buyer_seller, curve, self.rpc_proxy) return result, error_message def request_reservations(self, timestamp): + greenlets = [] + _log.debug("Registration manager request_reservations") for registration in self.registrations: - registration.request_reservations(timestamp, self.rpc_proxy) + if GREENLET_ENABLED: + event = gevent.spawn(registration.request_reservations, timestamp, self.rpc_proxy) + greenlets.append(event) + else: + registration.request_reservations(timestamp, self.rpc_proxy) + gevent.joinall(greenlets) + _log.debug("After request reserverations!") def request_offers(self, timestamp, unformed_markets): + greenlets = [] + _log.debug("Registration manager request_offers") for registration in self.registrations: - if (registration.market_name not in unformed_markets): - registration.request_offers(timestamp) + if registration.market_name not in unformed_markets: + if GREENLET_ENABLED: + event = gevent.spawn(registration.request_offers, timestamp) + greenlets.append(event) + else: + registration.request_offers(timestamp) else: error_message = 'The market {} has not received a buy and a sell reservation.'.format(registration.market_name) registration.report_error(timestamp, NOT_FORMED, error_message, {}) + gevent.joinall(greenlets) + _log.debug("After request offers!") def report_clear_price(self, timestamp, market_name, price, quantity): for registration in self.registrations: - if (registration.market_name == market_name): + if registration.market_name == market_name: registration.report_clear_price(timestamp, price, quantity) def report_aggregate(self, timestamp, market_name, buyer_seller, aggregate_curve): for registration in self.registrations: - if (registration.market_name == market_name): + if registration.market_name == market_name: registration.report_aggregate(timestamp, buyer_seller, aggregate_curve) def report_error(self, timestamp, market_name, error_code, error_message, aux): for registration in self.registrations: - if (registration.market_name == market_name): + if registration.market_name == market_name: registration.report_error(timestamp, error_code, error_message, aux) diff --git a/volttron/platform/agent/base_market_agent/rpc_proxy.py b/volttron/platform/agent/base_market_agent/rpc_proxy.py index 6f527ace11..f544e394e5 100644 --- a/volttron/platform/agent/base_market_agent/rpc_proxy.py +++ b/volttron/platform/agent/base_market_agent/rpc_proxy.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- {{{ # vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et: # -# Copyright 2019, Battelle Memorial Institute. +# Copyright 2020, Battelle Memorial Institute. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -46,13 +46,14 @@ _log = logging.getLogger(__name__) utils.setup_logging() + class RpcProxy(object): """ The purpose of the RpcProxy is to allow the MarketRegistration to make RPC calls on the agent that subclasses of the agent can't see and therefore can't make. """ - def __init__(self, rpc_call, verbose_logging = True): + def __init__(self, rpc_call, verbose_logging=True, timeout=60): """ The initalization needs the rpc_call method to grant access to the RPC calls needed to communicate with the marketService. @@ -60,6 +61,7 @@ def __init__(self, rpc_call, verbose_logging = True): """ self.rpc_call = rpc_call self.verbose_logging = verbose_logging + self.timeout = timeout def make_reservation(self, market_name, buyer_seller): """ @@ -72,7 +74,7 @@ def make_reservation(self, market_name, buyer_seller): The agent shall use the pre-defined strings provided. """ try: - self.rpc_call(PLATFORM_MARKET_SERVICE, 'make_reservation', market_name, buyer_seller).get(timeout=5.0) + self.rpc_call(PLATFORM_MARKET_SERVICE, 'make_reservation', market_name, buyer_seller).get(timeout=self.timeout) has_reservation = True except RemoteError as e: has_reservation = False @@ -93,19 +95,19 @@ def make_offer(self, market_name, buyer_seller, curve): """ try: self.rpc_call(PLATFORM_MARKET_SERVICE, 'make_offer', market_name, buyer_seller, - curve.tuppleize()).get(timeout=5.0) + curve.tuppleize()).get(timeout=self.timeout) result = (True, None) if self.verbose_logging: _log.debug("Market: {} {} has made an offer Curve: {}".format(market_name, - buyer_seller, - curve.points)) + buyer_seller, + curve.points)) except RemoteError as e: result = (False, e.message) _log.info( "Market: {} {} has had an offer rejected because {}".format(market_name, buyer_seller, e.message)) except gevent.Timeout as e: - result = (False, str(e)) - _log.info("Market: {} {} has had an offer rejected because {}".format(market_name, buyer_seller, e)) + result = (False, e.message) + _log.info("Market: {} {} has had an offer rejected because {}".format(market_name, buyer_seller, e.message)) return result diff --git a/volttron/platform/agent/base_simulation_integration/__init__.py b/volttron/platform/agent/base_simulation_integration/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/volttron/platform/agent/base_simulation_integration/base_sim_integration.py b/volttron/platform/agent/base_simulation_integration/base_sim_integration.py new file mode 100644 index 0000000000..16aac88e26 --- /dev/null +++ b/volttron/platform/agent/base_simulation_integration/base_sim_integration.py @@ -0,0 +1,70 @@ +# -*- coding: utf-8 -*- {{{ +# vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et: +# +# Copyright 2020, Battelle Memorial Institute. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# This material was prepared as an account of work sponsored by an agency of +# the United States Government. Neither the United States Government nor the +# United States Department of Energy, nor Battelle, nor any of their +# employees, nor any jurisdiction or organization that has cooperated in the +# development of these materials, makes any warranty, express or +# implied, or assumes any legal liability or responsibility for the accuracy, +# completeness, or usefulness or any information, apparatus, product, +# software, or process disclosed, or represents that its use would not infringe +# privately owned rights. Reference herein to any specific commercial product, +# process, or service by trade name, trademark, manufacturer, or otherwise +# does not necessarily constitute or imply its endorsement, recommendation, or +# favoring by the United States Government or any agency thereof, or +# Battelle Memorial Institute. The views and opinions of authors expressed +# herein do not necessarily state or reflect those of the +# United States Government or any agency thereof. +# +# PACIFIC NORTHWEST NATIONAL LABORATORY operated by +# BATTELLE for the UNITED STATES DEPARTMENT OF ENERGY +# under Contract DE-AC05-76RL01830 +# }}} + + +class BaseSimIntegration(object): + def __init__(self, config): + self.config = config + + def start_simulation(self, *args, **kwargs): + pass + + def register_inputs(self, config=None, callback=None, **kwargs): + pass + + def publish_to_simulation(self, topic, message, **kwargs): + pass + + def make_time_request(self, time_request=None, **kwargs): + pass + + def pause_simulation(self, timeout=None, **kwargs): + pass + + def resume_simulation(self, *args, **kwargs): + pass + + @property + def is_sim_installed(self, *args, **kwargs): + return True + + def stop_simulation(self, *args, **kwargs): + pass + + + diff --git a/volttron/platform/agent/base_tagging.py b/volttron/platform/agent/base_tagging.py index c9cac53251..5af9492766 100644 --- a/volttron/platform/agent/base_tagging.py +++ b/volttron/platform/agent/base_tagging.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- {{{ # vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et: # -# Copyright 2019, Battelle Memorial Institute. +# Copyright 2020, Battelle Memorial Institute. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/volttron/platform/agent/base_weather.py b/volttron/platform/agent/base_weather.py index 960175710a..ac459e16e5 100644 --- a/volttron/platform/agent/base_weather.py +++ b/volttron/platform/agent/base_weather.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- {{{ # vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et: # -# Copyright 2019, Battelle Memorial Institute. +# Copyright 2020, Battelle Memorial Institute. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -41,7 +41,6 @@ import csv import sqlite3 import datetime -import pkg_resources from functools import wraps from abc import abstractmethod from gevent import get_hub @@ -80,6 +79,8 @@ FORECAST_TIME TIMESTAMP NOT NULL, POINTS TEXT NOT NULL);""" +AGENT_DATA_DIR = os.path.basename(os.getcwd()) + ".agent-data" + CACHE_READ_ERROR = "Cache read failed" CACHE_WRITE_ERROR = "Cache write failed" CACHE_FULL = "cache_full" @@ -103,7 +104,7 @@ class BaseWeatherAgent(Agent): """ def __init__(self, - database_file="weather.sqlite", + database_file=os.path.join(AGENT_DATA_DIR, "weather.sqlite"), api_key=None, max_size_gb=None, poll_locations=None, @@ -114,6 +115,8 @@ def __init__(self, # Initial agent configuration try: super(BaseWeatherAgent, self).__init__(**kwargs) + if os.path.dirname(database_file) == AGENT_DATA_DIR and not os.path.isdir(AGENT_DATA_DIR): + os.mkdir(AGENT_DATA_DIR) self._database_file = database_file self._async_call = AsyncCall() self._api_key = api_key @@ -396,8 +399,7 @@ def _configure(self, config_name, actions, contents): if max_size_gb is not None: self._max_size_gb = float(max_size_gb) except ValueError: - _log.warn("Invalid value for max_size_gb: {} " - "defaulting to 1GB".format(max_size_gb)) + _log.warning("Invalid value for max_size_gb: {} defaulting to 1GB".format(max_size_gb)) self._max_size_gb = 1 self._api_key = config.get("api_key") @@ -427,7 +429,7 @@ def _configure(self, config_name, actions, contents): "Configuration of weather agent " "successful") except sqlite3.OperationalError as error: - _log.error("Error initializing cache {}".format(error)) + _log.error("Error initializing cache: {}".format(error)) self.vip.health.set_status(STATUS_BAD, "Cache failed to start " "during configuration") diff --git a/volttron/platform/agent/driven.py b/volttron/platform/agent/driven.py index b33fd68afb..927f68dda1 100644 --- a/volttron/platform/agent/driven.py +++ b/volttron/platform/agent/driven.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- {{{ # vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et: # -# Copyright 2019, Battelle Memorial Institute. +# Copyright 2020, Battelle Memorial Institute. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/volttron/platform/agent/exit_codes.py b/volttron/platform/agent/exit_codes.py index 1210465b7e..a96bc2cf1e 100644 --- a/volttron/platform/agent/exit_codes.py +++ b/volttron/platform/agent/exit_codes.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- {{{ # vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et: # -# Copyright 2019, Battelle Memorial Institute. +# Copyright 2020, Battelle Memorial Institute. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/volttron/platform/agent/green.py b/volttron/platform/agent/green.py index f7e8de8e5a..9cebfbc4b6 100644 --- a/volttron/platform/agent/green.py +++ b/volttron/platform/agent/green.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- {{{ # vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et: # -# Copyright 2019, Battelle Memorial Institute. +# Copyright 2020, Battelle Memorial Institute. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/volttron/platform/agent/known_identities.py b/volttron/platform/agent/known_identities.py index 182d0a795b..8a0bfe8219 100644 --- a/volttron/platform/agent/known_identities.py +++ b/volttron/platform/agent/known_identities.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- {{{ # vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et: # -# Copyright 2019, Battelle Memorial Institute. +# Copyright 2020, Battelle Memorial Institute. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -36,8 +36,6 @@ # under Contract DE-AC05-76RL01830 # }}} -# import warnings - AUTH = 'platform.auth' VOLTTRON_CENTRAL = 'volttron.central' @@ -47,17 +45,25 @@ PLATFORM_TOPIC_WATCHER = 'platform.topic_watcher' PLATFORM_SYSMON = 'platform.sysmon' PLATFORM_EMAILER = 'platform.emailer' +PLATFORM_HEALTH = 'platform.health' # The PLATFORM_ALERTER known name is now deprecated PLATFORM_ALERTER = PLATFORM_TOPIC_WATCHER PLATFORM_HISTORIAN = 'platform.historian' PLATFORM_MARKET_SERVICE = 'platform.market' +ROUTER = '' CONTROL = 'control' CONTROL_CONNECTION = 'control.connection' -MASTER_WEB = 'master_web' +PLATFORM_WEB = 'platform_web' CONFIGURATION_STORE = 'config.store' +KEY_DISCOVERY = 'keydiscovery' +PROXY_ROUTER = 'zmq.proxy.router' + +ALL_KNOWN_IDENTITIES = sorted((ROUTER, VOLTTRON_CENTRAL, VOLTTRON_CENTRAL_PLATFORM, PLATFORM_HISTORIAN, CONTROL, + CONTROL_CONNECTION, PLATFORM_WEB, AUTH, PLATFORM_TOPIC_WATCHER, CONFIGURATION_STORE, + PLATFORM_MARKET_SERVICE, PLATFORM_EMAILER, PLATFORM_SYSMON, PLATFORM_HEALTH, + KEY_DISCOVERY, PROXY_ROUTER)) -all_known = (VOLTTRON_CENTRAL, VOLTTRON_CENTRAL_PLATFORM, PLATFORM_HISTORIAN, CONTROL, CONTROL_CONNECTION, MASTER_WEB, - AUTH, PLATFORM_TOPIC_WATCHER, CONFIGURATION_STORE, PLATFORM_MARKET_SERVICE, PLATFORM_EMAILER, - PLATFORM_SYSMON) +PROCESS_IDENTITIES = sorted((AUTH, PLATFORM_HEALTH, CONFIGURATION_STORE, CONTROL, PLATFORM_WEB, KEY_DISCOVERY, + PROXY_ROUTER)) diff --git a/volttron/platform/agent/matching.py b/volttron/platform/agent/matching.py index 5848a7bb91..fc68634237 100644 --- a/volttron/platform/agent/matching.py +++ b/volttron/platform/agent/matching.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- {{{ # vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et: # -# Copyright 2019, Battelle Memorial Institute. +# Copyright 2020, Battelle Memorial Institute. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/volttron/platform/agent/math_utils.py b/volttron/platform/agent/math_utils.py index 94e6dc19d9..33a4969dd1 100644 --- a/volttron/platform/agent/math_utils.py +++ b/volttron/platform/agent/math_utils.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- {{{ # vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et: # -# Copyright 2019, Battelle Memorial Institute. +# Copyright 2020, Battelle Memorial Institute. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/volttron/platform/agent/multithreading.py b/volttron/platform/agent/multithreading.py index feb3625c30..00d8c30de3 100644 --- a/volttron/platform/agent/multithreading.py +++ b/volttron/platform/agent/multithreading.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- {{{ # vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et: # -# Copyright 2019, Battelle Memorial Institute. +# Copyright 2020, Battelle Memorial Institute. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/volttron/platform/agent/sched.py b/volttron/platform/agent/sched.py index 81950759d8..5c67456562 100644 --- a/volttron/platform/agent/sched.py +++ b/volttron/platform/agent/sched.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- {{{ # vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et: # -# Copyright 2019, Battelle Memorial Institute. +# Copyright 2020, Battelle Memorial Institute. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/volttron/platform/agent/utils.py b/volttron/platform/agent/utils.py index 82c3b221e5..f996deead3 100644 --- a/volttron/platform/agent/utils.py +++ b/volttron/platform/agent/utils.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- {{{ # vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et: # -# Copyright 2019, Battelle Memorial Institute. +# Copyright 2020, Battelle Memorial Institute. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -43,9 +43,9 @@ import calendar import errno import logging +import warnings import os -import re -import stat + import subprocess import sys try: @@ -234,8 +234,22 @@ def get_messagebus(): return message_bus +def is_web_enabled(): + """Returns True if web enabled, False otherwise""" + is_web = os.environ.get('BIND_WEB_ADDRESS') + _log.debug("value from env {}".format(is_web)) + if not is_web: + config = load_platform_config() + is_web = config.get('bind-web-address') + _log.debug("value from config {}".format(is_web)) + if not is_web: + return False + return True + return True + + def is_secure_mode(): - """Get type of message bus - zeromq or rabbbitmq.""" + """Returns True if running in secure mode, False otherwise""" string_value = os.environ.get('SECURE_AGENT_USERS') _log.debug("value from env {}".format(string_value)) if not string_value: @@ -509,16 +523,25 @@ def format(self, record): return super(AgentFormatter, self).format(record) -def setup_logging(level=logging.DEBUG): +def setup_logging(level=logging.DEBUG, console=False): root = logging.getLogger() if not root.handlers: handler = logging.StreamHandler() + if isapipe(sys.stderr) and '_LAUNCHED_BY_PLATFORM' in os.environ: handler.setFormatter(JsonFormatter()) + elif console: + # Below format is more readable for console + handler.setFormatter(logging.Formatter('%(levelname)s: %(message)s')) else: fmt = '%(asctime)s %(name)s %(levelname)s: %(message)s' handler.setFormatter(logging.Formatter(fmt)) - + if level != logging.DEBUG: + # import it here so that when urllib3 imports the requests package, ssl would already got + # monkey patched by gevent. + # and this warning is needed only when log level is not debug + from urllib3.exceptions import InsecureRequestWarning + warnings.filterwarnings("ignore", category=InsecureRequestWarning) root.addHandler(handler) root.setLevel(level) diff --git a/volttron/platform/aip.py b/volttron/platform/aip.py index 786690ee43..6a1e396332 100644 --- a/volttron/platform/aip.py +++ b/volttron/platform/aip.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- {{{ # vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et: # -# Copyright 2019, Battelle Memorial Institute. +# Copyright 2020, Battelle Memorial Institute. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -49,6 +49,7 @@ import sys import uuid +import requests import gevent import gevent.event from gevent import subprocess @@ -73,6 +74,7 @@ from .vip.agent import Agent from .auth import AuthFile, AuthEntry, AuthFileEntryAlreadyExists from volttron.utils.rmq_mgmt import RabbitMQMgmt +from volttron.platform import update_volttron_script_path try: from volttron.restricted import auth @@ -196,12 +198,12 @@ def stop(self): try: return gevent.with_timeout(60, process_wait, self.process) except gevent.Timeout: - _log.warn("First timeout") + _log.warning("First timeout") self.process.terminate() try: return gevent.with_timeout(30, process_wait, self.process) except gevent.Timeout: - _log.warn("2nd timeout") + _log.warning("2nd timeout") self.process.kill() try: return gevent.with_timeout(30, process_wait, self.process) @@ -235,7 +237,7 @@ def execute(self, *args, **kwargs): def stop(self): if self.process.poll() is None: - cmd = ["sudo", "scripts/secure_stop_agent.sh", self.agent_user, str(self.process.pid)] + cmd = ["sudo", update_volttron_script_path("scripts/secure_stop_agent.sh"), self.agent_user, str(self.process.pid)] _log.debug("In aip secureexecutionenv {}".format(cmd)) process = subprocess.Popen(cmd, stdout=PIPE, stderr=PIPE) stdout, stderr = process.communicate() @@ -681,7 +683,10 @@ def remove_agent(self, agent_uuid, remove_auth=True): # Delete RabbitMQ user for the agent instance_name = self.instance_name rmq_user = instance_name + '.' + identity - self.rmq_mgmt.delete_user(rmq_user) + try: + self.rmq_mgmt.delete_user(rmq_user) + except requests.exceptions.HTTPError as e: + _log.error(f"RabbitMQ user {rmq_user} is not available to delete. Going ahead and removing agent directory") self.agents.pop(agent_uuid, None) agent_directory = os.path.join(self.install_dir, agent_uuid) volttron_agent_user = None @@ -691,9 +696,9 @@ def remove_agent(self, agent_uuid, remove_auth=True): with open(user_id_path, 'r') as user_id_file: volttron_agent_user = user_id_file.readline() except (KeyError, IOError) as user_id_err: - _log.warn("Volttron agent user not found at {}".format( + _log.warning("Volttron agent user not found at {}".format( user_id_path)) - _log.warn(user_id_err) + _log.warning(user_id_err) if remove_auth: self._unauthorize_agent_keys(agent_uuid) shutil.rmtree(agent_directory) diff --git a/volttron/platform/async_.py b/volttron/platform/async_.py index 739e245109..99f23dff6e 100644 --- a/volttron/platform/async_.py +++ b/volttron/platform/async_.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- {{{ # vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et: # -# Copyright 2019, Battelle Memorial Institute. +# Copyright 2020, Battelle Memorial Institute. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/volttron/platform/auth.py b/volttron/platform/auth.py index 387770b586..4b27519df7 100644 --- a/volttron/platform/auth.py +++ b/volttron/platform/auth.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- {{{ # vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et: # -# Copyright 2019, Battelle Memorial Institute. +# Copyright 2020, Battelle Memorial Institute. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -43,6 +43,7 @@ import random import re import shutil +from typing import Optional import uuid from collections import defaultdict @@ -51,11 +52,12 @@ from gevent.fileobject import FileObject from zmq import green as zmq -from volttron.platform import jsonapi -from volttron.platform.agent.known_identities import VOLTTRON_CENTRAL_PLATFORM, CONTROL, MASTER_WEB +from volttron.platform import jsonapi, get_home +from volttron.platform.agent.known_identities import VOLTTRON_CENTRAL_PLATFORM, CONTROL, PLATFORM_WEB, CONTROL_CONNECTION +from volttron.platform.certs import Certs from volttron.platform.vip.agent.errors import VIPError from volttron.platform.vip.pubsubservice import ProtectedPubSubTopics -from .agent.utils import strip_comments, create_file_if_missing, watch_file +from .agent.utils import strip_comments, create_file_if_missing, watch_file, get_messagebus from .vip.agent import Agent, Core, RPC from .vip.socket import encode_key, BASE64_ENCODED_CURVE_KEY_LEN @@ -93,7 +95,9 @@ def __init__(self, auth_file, protected_topics_file, setup_mode, aip, *args, **k # This agent is started before the router so we need # to keep it from blocking. self.core.delay_running_event_set = False - + self._certs = None + if get_messagebus() == "rmq": + self._certs = Certs() self.auth_file_path = os.path.abspath(auth_file) self.auth_file = AuthFile(self.auth_file_path) self.aip = aip @@ -105,7 +109,7 @@ def __init__(self, auth_file, protected_topics_file, setup_mode, aip, *args, **k self._protected_topics_file_path = os.path.abspath(protected_topics_file) self._protected_topics_for_rmq = ProtectedPubSubTopics() self._setup_mode = setup_mode - self._auth_failures = [] + self._auth_pending = [] self._auth_denied = [] self._auth_approved = [] @@ -127,9 +131,31 @@ def setup_zap(self, sender, **kwargs): if self.core.messagebus == 'rmq': self.vip.peerlist.onadd.connect(self._check_topic_rules) + def _update_auth_lists(self, entries, is_allow=True): + auth_list = [] + for entry in entries: + auth_list.append({'domain': entry.domain, + 'address': entry.address, + 'mechanism': entry.mechanism, + 'credentials': entry.credentials, + 'user_id': entry.user_id, + 'retries': 0 + } + ) + if is_allow: + self._auth_approved = [entry for entry in auth_list if entry["address"] is not None] + else: + self._auth_denied = [entry for entry in auth_list if entry["address"] is not None] + + def read_auth_file(self): _log.info('loading auth file %s', self.auth_file_path) entries = self.auth_file.read_allow_entries() + denied_entries = self.auth_file.read_deny_entries() + # Populate auth lists with current entries + self._update_auth_lists(entries) + self._update_auth_lists(denied_entries, is_allow=False) + entries = [entry for entry in entries if entry.enabled] # sort the entries so the regex credentails follow the concrete creds entries.sort() @@ -184,12 +210,13 @@ def _send_update(self): exception = e if not peers: - raise exception + raise BaseException("No peers connected to the platform") _log.debug("after getting peerlist to send auth updates") for peer in peers: - if peer not in [self.core.identity]: + if peer not in [self.core.identity, CONTROL_CONNECTION]: + _log.debug(f"Sending auth update to peers {peer}") self.vip.rpc.call(peer, 'auth.update', user_to_caps) if self.core.messagebus == 'rmq': self._check_rmq_topic_permissions() @@ -299,7 +326,7 @@ def zap_loop(self, sender, **kwargs): else: if type(userid) == bytes: userid = userid.decode("utf-8") - self._update_auth_failures(domain, address, kind, credentials[0], userid) + self._update_auth_pending(domain, address, kind, credentials[0], userid) try: expire, delay = blocked[address] @@ -389,12 +416,31 @@ def get_authorizations(self, user_id): def approve_authorization_failure(self, user_id): """RPC method - Approves a previously failed authorization + Approves a pending CSR or credential, based on provided identity. + The approved CSR or credential can be deleted or denied later. + An approved credential is stored in the allow list in auth.json. - :param user_id: user id field from VOLTTRON Interconnect Protocol + :param user_id: user id field from VOLTTRON Interconnect Protocol or common name for CSR :type user_id: str """ - for pending in self._auth_failures: + + val_err = None + if self._certs: + # Will fail with ValueError when a zmq credential user_id is passed. + try: + self._certs.approve_csr(user_id) + permissions = self.core.rmq_mgmt.get_default_permissions(user_id) + + if "federation" in user_id: # federation needs more than the current default permissions # TODO: Fix authorization in rabbitmq + permissions = dict(configure=".*", read=".*", write=".*") + self.core.rmq_mgmt.create_user_with_permissions(user_id, permissions, True) + _log.debug("Created cert and permissions for user: {}".format(user_id)) + # Stores error message in case it is caused by an unexpected failure + except ValueError as e: + val_err = e + index = 0 + matched_index = -1 + for pending in self._auth_pending: if user_id == pending['user_id']: self._update_auth_entry( pending['domain'], @@ -403,8 +449,12 @@ def approve_authorization_failure(self, user_id): pending['credentials'], pending['user_id'] ) - self._auth_approved.append(pending) - del self._auth_failures[self._auth_failures.index(pending)] + matched_index = index + val_err = None + break + index = index + 1 + if matched_index >= 0: + del self._auth_pending[matched_index] for pending in self._auth_denied: if user_id == pending['user_id']: @@ -415,29 +465,71 @@ def approve_authorization_failure(self, user_id): pending['credentials'], pending['user_id'] ) - self._auth_approved.append(pending) - del self._auth_denied[self._auth_denied.index(pending)] + self._remove_auth_entry(pending['credentials'], is_allow=False) + val_err = None + # If the user_id supplied was not for a ZMQ credential, and the pending_csr check failed, + # output the ValueError message to the error log. + if val_err: + _log.error(f"{val_err}") @RPC.export @RPC.allow(capabilities="allow_auth_modifications") def deny_authorization_failure(self, user_id): """RPC method - Denies a previously failed authorization + Denies a pending CSR or credential, based on provided identity. + The denied CSR or credential can be deleted or accepted later. + A denied credential is stored in the deny list in auth.json. - :param user_id: user id field from VOLTTRON Interconnect Protocol + :param user_id: user id field from VOLTTRON Interconnect Protocol or common name for CSR :type user_id: str """ - for pending in self._auth_failures: + + val_err = None + if self._certs: + # Will fail with ValueError when a zmq credential user_id is passed. + try: + self._certs.deny_csr(user_id) + _log.debug("Denied cert for user: {}".format(user_id)) + # Stores error message in case it is caused by an unexpected failure + except ValueError as e: + val_err = e + + index = 0 + matched_index = -1 + for pending in self._auth_pending: if user_id == pending['user_id']: - self._auth_denied.append(pending) - del self._auth_failures[self._auth_failures.index(pending)] + self._update_auth_entry( + pending['domain'], + pending['address'], + pending['mechanism'], + pending['credentials'], + pending['user_id'], + is_allow=False + ) + matched_index = index + val_err = None + break + index = index + 1 + if matched_index >= 0: + del self._auth_pending[matched_index] for pending in self._auth_approved: if user_id == pending['user_id']: + self._update_auth_entry( + pending['domain'], + pending['address'], + pending['mechanism'], + pending['credentials'], + pending['user_id'], + is_allow=False + ) self._remove_auth_entry(pending['credentials']) - self._auth_denied.append(pending) - del self._auth_approved[self._auth_approved.index(pending)] + val_err = None + # If the user_id supplied was not for a ZMQ credential, and the pending_csr check failed, + # output the ValueError message to the error log. + if val_err: + _log.error(f"{val_err}") @RPC.export @@ -445,36 +537,174 @@ def deny_authorization_failure(self, user_id): def delete_authorization_failure(self, user_id): """RPC method - Denies a previously failed authorization + Deletes a pending CSR or credential, based on provided identity. + To approve or deny a deleted pending CSR or credential, + the request must be resent by the remote platform or agent. - :param user_id: user id field from VOLTTRON Interconnect Protocol + :param user_id: user id field from VOLTTRON Interconnect Protocol or common name for CSR :type user_id: str """ - for pending in self._auth_failures: + + val_err = None + if self._certs: + # Will fail with ValueError when a zmq credential user_id is passed. + try: + self._certs.delete_csr(user_id) + _log.debug("Denied cert for user: {}".format(user_id)) + # Stores error message in case it is caused by an unexpected failure + except ValueError as e: + val_err = e + + index = 0 + matched_index = -1 + for pending in self._auth_pending: if user_id == pending['user_id']: - del self._auth_failures[self._auth_failures.index(pending)] + self._update_auth_entry( + pending['domain'], + pending['address'], + pending['mechanism'], + pending['credentials'], + pending['user_id'] + ) + matched_index = index + val_err = None + break + index = index + 1 + if matched_index >= 0: + del self._auth_pending[matched_index] + + index = 0 + matched_index = -1 + for pending in self._auth_pending: + if user_id == pending['user_id']: + matched_index = index + val_err = None + break + index = index + 1 + if matched_index >= 0: + del self._auth_pending[matched_index] for pending in self._auth_approved: if user_id == pending['user_id']: self._remove_auth_entry(pending['credentials']) - del self._auth_approved[self._auth_approved.index(pending)] + val_err = None for pending in self._auth_denied: if user_id == pending['user_id']: - del self._auth_denied[self._auth_denied.index(pending)] + self._remove_auth_entry(pending['credentials'], is_allow=False) + val_err = None + + # If the user_id supplied was not for a ZMQ credential, and the pending_csr check failed, + # output the ValueError message to the error log. + if val_err: + _log.error(f"{val_err}") @RPC.export - def get_authorization_failures(self): - return list(self._auth_failures) + def get_authorization_pending(self): + """RPC method + + Returns a list of failed (pending) ZMQ credentials. + + :rtype: list + """ + return list(self._auth_pending) @RPC.export def get_authorization_approved(self): + """RPC method + + Returns a list of approved ZMQ credentials. + This list is updated whenever the auth file is read. + It includes all allow entries from the auth file that contain a populated address field. + + :rtype: list + """ return list(self._auth_approved) @RPC.export def get_authorization_denied(self): + """RPC method + + Returns a list of denied ZMQ credentials. + This list is updated whenever the auth file is read. + It includes all deny entries from the auth file that contain a populated address field. + + :rtype: list + """ return list(self._auth_denied) + @RPC.export + @RPC.allow(capabilities="allow_auth_modifications") + def get_pending_csrs(self): + """RPC method + + Returns a list of pending CSRs. + This method provides RPC access to the Certs class's get_pending_csr_requests method. + This method is only applicable for web-enabled, RMQ instances. + + :rtype: list + """ + if self._certs: + csrs = [c for c in self._certs.get_pending_csr_requests()] + return csrs + else: + return [] + + @RPC.export + @RPC.allow(capabilities="allow_auth_modifications") + def get_pending_csr_status(self, common_name): + """RPC method + + Returns the status of a pending CSRs. + This method provides RPC access to the Certs class's get_csr_status method. + This method is only applicable for web-enabled, RMQ instances. + Currently, this method is only used by admin_endpoints. + + :param common_name: Common name for CSR + :type common_name: str + :rtype: str + """ + if self._certs: + return self._certs.get_csr_status(common_name) + else: + return "" + + @RPC.export + @RPC.allow(capabilities="allow_auth_modifications") + def get_pending_csr_cert(self, common_name): + """RPC method + + Returns the cert of a pending CSRs. + This method provides RPC access to the Certs class's get_cert_from_csr method. + This method is only applicable for web-enabled, RMQ instances. + Currently, this method is only used by admin_endpoints. + + :param common_name: Common name for CSR + :type common_name: str + :rtype: str + """ + if self._certs: + return self._certs.get_cert_from_csr(common_name).decode('utf-8') + else: + return "" + + @RPC.export + @RPC.allow(capabilities="allow_auth_modifications") + def get_all_pending_csr_subjects(self): + """RPC method + + Returns a list of all certs subjects. + This method provides RPC access to the Certs class's get_all_cert_subjects method. + This method is only applicable for web-enabled, RMQ instances. + Currently, this method is only used by admin_endpoints. + + :rtype: list + """ + if self._certs: + return self._certs.get_all_cert_subjects() + else: + return [] + def _get_authorizations(self, user_id, index): """Convenience method for getting authorization component by index""" auths = self.get_authorizations(user_id) @@ -521,7 +751,7 @@ def get_roles(self, user_id): """ return self._get_authorizations(user_id, 2) - def _update_auth_entry(self, domain, address, mechanism, credential, user_id): + def _update_auth_entry(self, domain, address, mechanism, credential, user_id, is_allow=True): # Make a new entry fields = { "domain": domain, @@ -537,17 +767,17 @@ def _update_auth_entry(self, domain, address, mechanism, credential, user_id): new_entry = AuthEntry(**fields) try: - self.auth_file.add(new_entry, overwrite=False) + self.auth_file.add(new_entry, overwrite=False, is_allow=is_allow) except AuthException as err: _log.error('ERROR: %s\n' % str(err)) - def _remove_auth_entry(self, credential): + def _remove_auth_entry(self, credential, is_allow=True): try: - self.auth_file.remove_by_credentials(credential) + self.auth_file.remove_by_credentials(credential, is_allow=is_allow) except AuthException as err: _log.error('ERROR: %s\n' % str(err)) - def _update_auth_failures(self, domain, address, mechanism, credential, user_id): + def _update_auth_pending(self, domain, address, mechanism, credential, user_id): for entry in self._auth_denied: # Check if failure entry has been denied. If so, increment the failure's denied count if ((entry['domain'] == domain) and @@ -557,7 +787,7 @@ def _update_auth_failures(self, domain, address, mechanism, credential, user_id) entry['retries'] += 1 return - for entry in self._auth_failures: + for entry in self._auth_pending: # Check if failure entry exists. If so, increment the failure count if ((entry['domain'] == domain) and (entry['address'] == address) and @@ -574,7 +804,7 @@ def _update_auth_failures(self, domain, address, mechanism, credential, user_id) "user_id": user_id, "retries": 1 } - self._auth_failures.append(dict(fields)) + self._auth_pending.append(dict(fields)) return def _load_protected_topics_for_rmq(self): @@ -748,7 +978,7 @@ class AuthEntry(object): def __init__(self, domain=None, address=None, mechanism='CURVE', credentials=None, user_id=None, groups=None, roles=None, - capabilities=None, comments=None, enabled=True, **kwargs): + capabilities: Optional[dict] = None, comments=None, enabled=True, **kwargs): self.domain = AuthEntry._build_field(domain) self.address = AuthEntry._build_field(address) @@ -786,7 +1016,7 @@ def _build_field(value): return List(String(elem) for elem in value) @staticmethod - def build_capabilities_field(value): + def build_capabilities_field(value: Optional[dict]): #_log.debug("_build_capabilities {}".format(value)) if not value: @@ -870,8 +1100,7 @@ def _check_validity(self): class AuthFile(object): def __init__(self, auth_file=None): if auth_file is None: - auth_file_dir = os.path.expanduser( - os.environ.get('VOLTTRON_HOME', '~/.volttron')) + auth_file_dir = get_home() auth_file = os.path.join(auth_file_dir, 'auth.json') self.auth_file = auth_file self._check_for_upgrade() @@ -881,10 +1110,10 @@ def version(self): return {'major': 1, 'minor': 2} def _check_for_upgrade(self): - allow_list, groups, roles, version = self._read() + allow_list, deny_list, groups, roles, version = self._read() if version != self.version: if version['major'] <= self.version['major']: - self._upgrade(allow_list, groups, roles, version) + self._upgrade(allow_list, deny_list, groups, roles, version) else: _log.error('This version of VOLTTRON cannot parse {}. ' 'Please upgrade VOLTTRON or move or delete ' @@ -906,10 +1135,11 @@ def _read(self): _log.exception('error loading %s', self.auth_file) allow_list = auth_data.get('allow', []) + deny_list = auth_data.get('deny', []) groups = auth_data.get('groups', {}) roles = auth_data.get('roles', {}) version = auth_data.get('version', {'major': 0, 'minor': 0}) - return allow_list, groups, roles, version + return allow_list, deny_list, groups, roles, version def read(self): """Gets the allowed entries, groups, and roles from the auth @@ -918,19 +1148,18 @@ def read(self): :returns: tuple of allow-entries-list, groups-dict, roles-dict :rtype: tuple """ - allow_list, groups, roles, _ = self._read() - entries = self._get_entries(allow_list) - self._use_groups_and_roles(entries, groups, roles) - return entries, groups, roles + allow_list, deny_list, groups, roles, _ = self._read() + allow_entries, deny_entries = self._get_entries(allow_list, deny_list) + self._use_groups_and_roles(allow_entries, groups, roles) + return allow_entries, deny_entries, groups, roles - def _upgrade(self, allow_list, groups, roles, version): + def _upgrade(self, allow_list, deny_list, groups, roles, version): backup = self.auth_file + '.' + str(uuid.uuid4()) + '.bak' shutil.copy(self.auth_file, backup) _log.info('Created backup of {} at {}'.format(self.auth_file, backup)) def warn_invalid(entry, msg=''): - _log.warn('Invalid entry {} in auth file {}. {}' - .format(entry, self.auth_file, msg)) + _log.warning('Invalid entry {} in auth file {}. {}'.format(entry, self.auth_file, msg)) def upgrade_0_to_1(allow_list): new_allow_list = [] @@ -984,7 +1213,7 @@ def upgrade_1_0_to_1_1(allow_list): msg = ('user_id {} is already present in ' 'authentication entry. Changed to user_id to ' '{}').format(user_id, new_user_id) - _log.warn(msg) + _log.warning(msg) user_id_ = new_user_id else: user_id = str(uuid.uuid4()) @@ -1015,8 +1244,8 @@ def upgrade_1_1_to_1_2(allow_list): if version['major'] == 1 and version['minor'] == 1: allow_list = upgrade_1_1_to_1_2(allow_list) - entries = self._get_entries(allow_list) - self._write(entries, groups, roles) + allow_entries, deny_entries = self._get_entries(allow_list, deny_list) + self._write(allow_entries, deny_entries, groups, roles) def read_allow_entries(self): """Gets the allowed entries from the auth file. @@ -1026,19 +1255,43 @@ def read_allow_entries(self): """ return self.read()[0] - def find_by_credentials(self, credentials): + def read_deny_entries(self): + """Gets the denied entries from the auth file. + + :returns: list of deny-entries + :rtype: list + """ + return self.read()[1] + + def find_by_credentials(self, credentials, is_allow=True): """Find all entries that have the given credentials :param str credentials: The credentials to search for :return: list of entries :rtype: list """ - return [entry for entry in self.read_allow_entries() - if str(entry.credentials) == credentials] - def _get_entries(self, allow_list): - entries = [] + if is_allow: + return [entry for entry in self.read_allow_entries() + if str(entry.credentials) == credentials] + else: + return [entry for entry in self.read_deny_entries() + if str(entry.credentials) == credentials] + + def _get_entries(self, allow_list, deny_list): + allow_entries = [] for file_entry in allow_list: + try: + entry = AuthEntry(**file_entry) + except TypeError: + _log.warning('invalid entry %r in auth file %s', file_entry, self.auth_file) + except AuthEntryInvalid as e: + _log.warning('invalid entry %r in auth file %s (%s)', file_entry, self.auth_file, str(e)) + else: + allow_entries.append(entry) + + deny_entries = [] + for file_entry in deny_list: try: entry = AuthEntry(**file_entry) except TypeError: @@ -1048,8 +1301,8 @@ def _get_entries(self, allow_list): _log.warn('invalid entry %r in auth file %s (%s)', file_entry, self.auth_file, str(e)) else: - entries.append(entry) - return entries + deny_entries.append(entry) + return allow_entries, deny_entries def _use_groups_and_roles(self, entries, groups, roles): """Add capabilities to each entry based on groups and roles""" @@ -1064,64 +1317,92 @@ def _use_groups_and_roles(self, entries, groups, roles): capabilities += roles.get(role, []) entry.add_capabilities(list(set(capabilities))) - def _check_if_exists(self, entry): + def _check_if_exists(self, entry, is_allow=True): """Raises AuthFileEntryAlreadyExists if entry is already in file""" - for index, prev_entry in enumerate(self.read_allow_entries()): - if entry.user_id == prev_entry.user_id: - raise AuthFileUserIdAlreadyExists(entry.user_id, [index]) + if is_allow: + for index, prev_entry in enumerate(self.read_allow_entries()): + if entry.user_id == prev_entry.user_id: + raise AuthFileUserIdAlreadyExists(entry.user_id, [index]) + + # Compare AuthEntry objects component-wise, rather than + # using match, because match will evaluate regex. + if (prev_entry.domain == entry.domain and + prev_entry.address == entry.address and + prev_entry.mechanism == entry.mechanism and + prev_entry.credentials == entry.credentials): + raise AuthFileEntryAlreadyExists([index]) + else: + for index, prev_entry in enumerate(self.read_deny_entries()): + if entry.user_id == prev_entry.user_id: + raise AuthFileUserIdAlreadyExists(entry.user_id, [index]) - # Compare AuthEntry objects component-wise, rather than - # using match, because match will evaluate regex. - if (prev_entry.domain == entry.domain and + # Compare AuthEntry objects component-wise, rather than + # using match, because match will evaluate regex. + if (prev_entry.domain == entry.domain and prev_entry.address == entry.address and prev_entry.mechanism == entry.mechanism and prev_entry.credentials == entry.credentials): - raise AuthFileEntryAlreadyExists([index]) + raise AuthFileEntryAlreadyExists([index]) - def _update_by_indices(self, auth_entry, indices): + def _update_by_indices(self, auth_entry, indices, is_allow=True): """Updates all entries at given indices with auth_entry""" for index in indices: - self.update_by_index(auth_entry, index) + self.update_by_index(auth_entry, index, is_allow) - def add(self, auth_entry, overwrite=False): + def add(self, auth_entry, overwrite=False, no_error=False, is_allow=True): """Adds an AuthEntry to the auth file :param auth_entry: authentication entry :param overwrite: set to true to overwrite matching entries + :param no_error: + set to True to not throw an AuthFileEntryAlreadyExists when attempting to add an exiting entry. + :type auth_entry: AuthEntry :type overwrite: bool + :type no_error: bool .. warning:: If overwrite is set to False and if auth_entry matches an existing entry then this method will raise - AuthFileEntryAlreadyExists + AuthFileEntryAlreadyExists unless no_error is set to true """ try: - self._check_if_exists(auth_entry) + self._check_if_exists(auth_entry, is_allow) except AuthFileEntryAlreadyExists as err: if overwrite: _log.debug("Updating existing auth entry with {} ".format(auth_entry)) - self._update_by_indices(auth_entry, err.indices) + self._update_by_indices(auth_entry, err.indices, is_allow) else: - raise err + if not no_error: + raise err else: - entries, groups, roles = self.read() - entries.append(auth_entry) - self._write(entries, groups, roles) + allow_entries, deny_entries, groups, roles = self.read() + if is_allow: + allow_entries.append(auth_entry) + else: + deny_entries.append(auth_entry) + self._write(allow_entries, deny_entries, groups, roles) _log.debug("Added auth entry {} ".format(auth_entry)) gevent.sleep(1) - def remove_by_credentials(self, credentials): + def remove_by_credentials(self, credentials, is_allow=True): """Removes entry from auth file by credential :para credential: entries will this credential will be removed :type credential: str """ - entries, groups, roles = self.read() + allow_entries, deny_entries, groups, roles = self.read() + if is_allow: + entries = allow_entries + else: + entries = deny_entries entries = [e for e in entries if e.credentials != credentials] - self._write(entries, groups, roles) + if is_allow: + self._write(entries, deny_entries, groups, roles) + else: + self._write(allow_entries, entries, groups, roles) - def remove_by_index(self, index): + def remove_by_index(self, index, is_allow=True): """Removes entry from auth file by index :param index: index of entry to remove @@ -1130,9 +1411,9 @@ def remove_by_index(self, index): .. warning:: Calling with out-of-range index will raise AuthFileIndexError """ - self.remove_by_indices([index]) + self.remove_by_indices([index], is_allow) - def remove_by_indices(self, indices): + def remove_by_indices(self, indices, is_allow=True): """Removes entry from auth file by indices :param indices: list of indicies of entries to remove @@ -1143,13 +1424,20 @@ def remove_by_indices(self, indices): """ indices = list(set(indices)) indices.sort(reverse=True) - entries, groups, roles = self.read() + allow_entries, deny_entries, groups, roles = self.read() + if is_allow: + entries = allow_entries + else: + entries = deny_entries for index in indices: try: del entries[index] except IndexError: raise AuthFileIndexError(index) - self._write(entries, groups, roles) + if is_allow: + self._write(entries, deny_entries, groups, roles) + else: + self._write(allow_entries, entries, groups, roles) def _set_groups_or_roles(self, groups_or_roles, is_group=True): param_name = 'groups' if is_group else 'roles' @@ -1159,12 +1447,12 @@ def _set_groups_or_roles(self, groups_or_roles, is_group=True): if not isinstance(value, list): raise ValueError('each value of the {} dict must be ' 'a list'.format(param_name)) - entries, groups, roles = self.read() + allow_entries, deny_entries, groups, roles = self.read() if is_group: groups = groups_or_roles else: roles = groups_or_roles - self._write(entries, groups, roles) + self._write(allow_entries, deny_entries, groups, roles) def set_groups(self, groups): """Define the mapping of group names to role lists @@ -1188,7 +1476,7 @@ def set_roles(self, roles): """ self._set_groups_or_roles(roles, is_group=False) - def update_by_index(self, auth_entry, index): + def update_by_index(self, auth_entry, index, is_allow=True): """Updates entry will given auth entry at given index :param auth_entry: new authorization entry @@ -1199,16 +1487,24 @@ def update_by_index(self, auth_entry, index): .. warning:: Calling with out-of-range index will raise AuthFileIndexError """ - entries, groups, roles = self.read() + allow_entries, deny_entries, groups, roles = self.read() + if is_allow: + entries = allow_entries + else: + entries = deny_entries try: entries[index] = auth_entry except IndexError: raise AuthFileIndexError(index) - self._write(entries, groups, roles) + if is_allow: + self._write(entries, deny_entries, groups, roles) + else: + self._write(allow_entries, entries, groups, roles) - def _write(self, entries, groups, roles): - auth = {'allow': [vars(x) for x in entries], 'groups': groups, - 'roles': roles, 'version': self.version} + def _write(self, allow_entries, deny_entries, groups, roles): + auth = {'allow': [vars(x) for x in allow_entries], + 'deny': [vars(x) for x in deny_entries], + 'groups': groups, 'roles': roles, 'version': self.version} with open(self.auth_file, 'w') as fp: jsonapi.dump(auth, fp, indent=2) diff --git a/volttron/platform/certs.py b/volttron/platform/certs.py index 54f6c2a71f..b6d2725cbb 100644 --- a/volttron/platform/certs.py +++ b/volttron/platform/certs.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- {{{ # vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et: # -# Copyright 2019, Battelle Memorial Institute. +# Copyright 2020, Battelle Memorial Institute. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -67,7 +67,7 @@ ENC_STANDARD = 65537 SHA_HASH = 'sha256' # # days before the certificate will timeout. -DEFAULT_DAYS = 365 +DEFAULT_DAYS = 365 * 10 # 10 years DEFAULT_TIMOUT = 60 * 60 * 24 * 360 * 10 @@ -574,53 +574,12 @@ def deny_csr(self, common_name): fp.write(jsonapi.dumps(meta)) def sign_csr(self, csr_file): - ca_crt = self.ca_cert() - ca_pkey = _load_key(self.private_key_file(self.root_ca_name)) with open(csr_file, 'rb') as f: csr = x509.load_pem_x509_csr(data=f.read(), backend=default_backend()) subject_common_name = csr.subject.get_attributes_for_oid(NameOID.COMMON_NAME)[0].value - - if self.cert_exists(subject_common_name): - crt = self.cert(subject_common_name) - return crt.public_bytes(encoding=serialization.Encoding.PEM) - - crt = x509.CertificateBuilder().subject_name( - csr.subject - ).issuer_name( - ca_crt.subject - ).public_key( - csr.public_key() - ).serial_number( - int(time.time()) # pylint: disable=no-member - ).not_valid_before( - datetime.datetime.utcnow() - ).not_valid_after( - datetime.datetime.utcnow() + datetime.timedelta(days=365 * 10) - ).add_extension( - extension=x509.KeyUsage( - digital_signature=True, key_encipherment=True, content_commitment=True, - data_encipherment=False, key_agreement=False, encipher_only=False, decipher_only=False, - key_cert_sign=False, crl_sign=False - ), - critical=True - ).add_extension( - extension=x509.BasicConstraints(ca=False, path_length=None), - critical=True - ).add_extension( - extension=x509.AuthorityKeyIdentifier.from_issuer_public_key(ca_pkey.public_key()), - critical=False - ).sign( - private_key=ca_pkey, - algorithm=hashes.SHA256(), - backend=default_backend() - ) - - new_cert_file = self.cert_file( - csr.subject.get_attributes_for_oid(NameOID.COMMON_NAME)[0].value) - with open(new_cert_file, 'wb') as f: - f.write(crt.public_bytes(encoding=serialization.Encoding.PEM)) - return crt.public_bytes(encoding=serialization.Encoding.PEM) + cert, _ = self.create_signed_cert_files(name=subject_common_name, overwrite=False, csr=csr) + return cert.public_bytes(encoding=serialization.Encoding.PEM) def cert_exists(self, cert_name, remote=False): """ @@ -798,7 +757,7 @@ def save_key(self, file_path): os.chmod(key_file, 0o600) def create_signed_cert_files(self, name, cert_type='client', ca_name=None, - overwrite=True, valid_days=DEFAULT_DAYS, + overwrite=True, valid_days=DEFAULT_DAYS, csr=None, **kwargs): """ Create a new certificate and sign it with the volttron instance's @@ -811,6 +770,9 @@ def create_signed_cert_files(self, name, cert_type='client', ca_name=None, overwritten :param name: name used to save the newly created certificate and private key. Files are saved as .crt and .pem + :param csr: Certificate Signing Request(CSR) based on which cert should be created. + In this case no new private key is generated. CSR's public bytes and subject are used in building the + certificate :param kwargs: dictionary object containing various details about who we are. Possible arguments: @@ -822,22 +784,32 @@ def create_signed_cert_files(self, name, cert_type='client', ca_name=None, CN - Common Name :return: True if certificate creation was successful """ - if not overwrite: - if self.cert_exists(name): - return False + if csr: + remote = True + else: + remote = False + + if not overwrite and self.cert_exists(name, remote=remote): + if remote: + return _load_cert(self.cert_file(name, remote)), None + else: + return _load_cert(self.cert_file(name)), self.private_key_file(name) if not ca_name: ca_name = self.root_ca_name cert, key, serial = _create_signed_certificate(ca_cert=self.cert(ca_name), ca_key=_load_key(self.private_key_file(ca_name)), - name=name, valid_days=valid_days, type=cert_type, **kwargs) - - self._save_cert(name, cert, key) + name=name, valid_days=valid_days, type=cert_type, + csr=csr, **kwargs) + if csr: + self._save_cert(name, cert, key, remote=remote) + else: + self._save_cert(name, cert, key) self.update_ca_db(cert, ca_name, serial) return cert, key - def _save_cert(self, name, cert, pk): + def _save_cert(self, name, cert, pk, remote=False): """ Save the given certificate and private key using name.crt and name.pem respectively. @@ -846,26 +818,28 @@ def _save_cert(self, name, cert, pk): :param pk: :class: ` :return: """ - with open(self.cert_file(name), "wb") as f: + with open(self.cert_file(name, remote=remote), "wb") as f: f.write(cert.public_bytes(serialization.Encoding.PEM)) - os.chmod(self.cert_file(name), 0o644) - encryption = serialization.NoEncryption() - if PROMPT_PASSPHRASE: - encryption = serialization.BestAvailableEncryption( - get_passphrase(prompt1='Enter passphrase for private ' - 'key ' + - name + ":") - ) - - # Write our key to disk for safe keeping - key_file = self.private_key_file(name) - with open(key_file, "wb") as f: - f.write(pk.private_bytes( - encoding=serialization.Encoding.PEM, - format=serialization.PrivateFormat.TraditionalOpenSSL, - encryption_algorithm=encryption - )) - os.chmod(key_file, 0o600) + os.chmod(self.cert_file(name, remote=remote), 0o644) + + if pk: + encryption = serialization.NoEncryption() + if PROMPT_PASSPHRASE: + encryption = serialization.BestAvailableEncryption( + get_passphrase(prompt1='Enter passphrase for private ' + 'key ' + + name + ":") + ) + + # Write our key to disk for safe keeping + key_file = self.private_key_file(name) + with open(key_file, "wb") as f: + f.write(pk.private_bytes( + encoding=serialization.Encoding.PEM, + format=serialization.PrivateFormat.TraditionalOpenSSL, + encryption_algorithm=encryption + )) + os.chmod(key_file, 0o600) def update_ca_db(self, cert, ca_name, serial): """ @@ -947,7 +921,7 @@ def _create_private_key(): ) -def _create_signed_certificate(ca_cert, ca_key, name, valid_days=365, type='client', **kwargs): +def _create_signed_certificate(ca_cert, ca_key, name, valid_days=DEFAULT_DAYS, type='client', csr=None, **kwargs): """ Creates signed cert of type provided and signs it with ca_key provided. To create subject for the new certificate common name is set new value, rest of the attributes are copied from subject of provided ca certificate @@ -965,15 +939,18 @@ def _create_signed_certificate(ca_cert, ca_key, name, valid_days=365, type='clie # crptography 2.2.2 ski = ca_cert.extensions.get_extension_for_class( x509.SubjectKeyIdentifier) - - key = _create_private_key() - # key = rsa.generate_private_key( - # public_exponent=65537, - # key_size=2048, - # backend=default_backend() - # ) fqdn = kwargs.pop('fqdn', None) - if kwargs: + + if csr: + key = None + public_key = csr.public_key() + else: + key = _create_private_key() + public_key = key.public_key() + + if csr: + subject = csr.subject + elif kwargs: subject = _create_subject(**kwargs) else: temp_list = ca_cert.subject.rdns @@ -1004,11 +981,11 @@ def _create_signed_certificate(ca_cert, ca_key, name, valid_days=365, type='clie ).issuer_name( issuer ).public_key( - key.public_key() + public_key ).not_valid_before( datetime.datetime.utcnow() ).not_valid_after( - # Our certificate will be valid for 365 days + # Our certificate will be valid for 3650 days datetime.datetime.utcnow() + datetime.timedelta(days=valid_days) ).add_extension( x509.AuthorityKeyIdentifier.from_issuer_subject_key_identifier(ski), @@ -1021,7 +998,7 @@ def _create_signed_certificate(ca_cert, ca_key, name, valid_days=365, type='clie critical=True ).add_extension( x509.SubjectKeyIdentifier( - _create_fingerprint(key.public_key())), + _create_fingerprint(public_key)), critical=False ) # cryptography 2.7 diff --git a/volttron/platform/config.py b/volttron/platform/config.py index 947779bfb8..adf3fa220c 100644 --- a/volttron/platform/config.py +++ b/volttron/platform/config.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- {{{ # vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et: # -# Copyright 2019, Battelle Memorial Institute. +# Copyright 2020, Battelle Memorial Institute. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -54,7 +54,6 @@ import shlex as _shlex import sys as _sys from volttron.platform.instance_setup import main -from volttron.platform.agent import utils def expandall(string): @@ -586,10 +585,6 @@ def add_argument(*args, **kwargs): def _main(): try: - # Protect against configuration of base logger when not the "main entry point" - utils.setup_logging() - import logging - logging.getLogger('urllib3.connectionpool').setLevel(logging.WARNING) main() except KeyboardInterrupt: print('\n') diff --git a/volttron/platform/control.py b/volttron/platform/control.py index df8abbc6d9..b5750df0f4 100644 --- a/volttron/platform/control.py +++ b/volttron/platform/control.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- {{{ # vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et: # -# Copyright 2019, Battelle Memorial Institute. +# Copyright 2020, Battelle Memorial Institute. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -51,7 +51,7 @@ import tempfile import traceback import uuid -from datetime import timedelta +from datetime import timedelta, datetime import gevent import gevent.event @@ -64,9 +64,10 @@ from volttron.platform import config from volttron.platform import get_home, get_address from volttron.platform import jsonapi +from volttron.platform.jsonrpc import MethodNotFound from volttron.platform.agent import utils from volttron.platform.agent.known_identities import CONTROL_CONNECTION, \ - CONFIGURATION_STORE + CONFIGURATION_STORE, PLATFORM_HEALTH, AUTH from volttron.platform.auth import AuthEntry, AuthFile, AuthException from volttron.platform.certs import Certs from volttron.platform.jsonrpc import RemoteError @@ -74,12 +75,13 @@ from volttron.platform.messaging.health import Status, STATUS_BAD from volttron.platform.scheduling import periodic from volttron.platform.vip.agent import Agent as BaseAgent, Core, RPC -from volttron.platform.vip.agent.errors import VIPError +from volttron.platform.vip.agent.errors import VIPError, Unreachable from volttron.platform.vip.agent.subsystems.query import Query from volttron.utils.rmq_config_params import RMQConfig from volttron.utils.rmq_mgmt import RabbitMQMgmt from volttron.utils.rmq_setup import check_rabbit_status from volttron.platform.agent.utils import is_secure_mode, wait_for_volttron_shutdown +from . install_agents import add_install_agent_parser, install_agent try: import volttron.restricted @@ -294,6 +296,12 @@ def list_agents(self): 'identity': self.agent_vip_identity(uuid)} for uuid, name in self._aip.list_agents().items()] + @RPC.export + def list_agents_rpc(self): + pass + # agents = self.list_agents() + # return [jsonapi.dumps(self.vip.rpc.call(agent.vip_identity, 'inspect').get(timeout=4)) for agent in agents] + @RPC.export def tag_agent(self, uuid, tag): if not isinstance(uuid, str): @@ -523,11 +531,18 @@ def filter_agents(agents, patterns, opts): for pattern in patterns: regex, _ = escape(pattern) result = set() + + # if no option is selected, try matching based on uuid if not (by_uuid or by_name or by_tag): reobj = re.compile(regex) matches = [agent for agent in agents if reobj.match(agent.uuid)] if len(matches) == 1: result.update(matches) + # if no match is found based on uuid, try matching on agent name + elif len(matches) == 0: + matches = [agent for agent in agents if reobj.match(agent.name)] + if len(matches) >= 1: + result.update(matches) else: reobj = re.compile(regex + '$') if by_uuid: @@ -605,85 +620,6 @@ def restore_agents_data(agent_uuid): callback=restore_agents_data) -def install_agent(opts, publickey=None, secretkey=None, callback=None): - aip = opts.aip - filename = opts.wheel - tag = opts.tag - vip_identity = opts.vip_identity - if opts.vip_address.startswith('ipc://'): - _log.info("Installing wheel locally without channel subsystem") - filename = config.expandall(filename) - agent_uuid = opts.connection.call('install_agent_local', - filename, - vip_identity=vip_identity, - publickey=publickey, - secretkey=secretkey) - - if tag: - opts.connection.call('tag_agent', agent_uuid, tag) - - else: - try: - _log.debug('Creating channel for sending the agent.') - channel_name = str(uuid.uuid4()) - channel = opts.connection.server.vip.channel('control', - channel_name) - _log.debug('calling control install agent.') - agent_uuid = opts.connection.call_no_get('install_agent', - filename, - channel_name, - vip_identity=vip_identity, - publickey=publickey, - secretkey=secretkey) - - _log.debug('Sending wheel to control') - sha512 = hashlib.sha512() - with open(filename, 'rb') as wheel_file_data: - while True: - # get a request - with gevent.Timeout(60): - request, file_offset, chunk_size = channel.recv_multipart() - if request == 'checksum': - channel.send(sha512.digest()) - break - - assert request == 'fetch' - - # send a chunk of the file - file_offset = int(file_offset) - chunk_size = int(chunk_size) - wheel_file_data.seek(file_offset) - data = wheel_file_data.read(chunk_size) - sha512.update(data) - channel.send(data) - - agent_uuid = agent_uuid.get(timeout=10) - - except Exception as exc: - if opts.debug: - traceback.print_exc() - _stderr.write( - '{}: error: {}: {}\n'.format(opts.command, exc, filename)) - return 10 - else: - if tag: - opts.connection.call('tag_agent', - agent_uuid, - tag) - finally: - _log.debug('closing channel') - channel.close(linger=0) - del channel - - name = opts.connection.call('agent_name', agent_uuid) - _stdout.write('Installed {} as {} {}\n'.format(filename, agent_uuid, name)) - - # Need to use a callback here rather than a return value. I am not 100% - # sure why this is the reason for allowing our tests to pass. - if callback: - callback(agent_uuid) - - def tag_agent(opts): agents = filter_agent(_list_agents(opts.aip), opts.agent, opts) if len(agents) != 1: @@ -754,6 +690,304 @@ def list_peers(opts): sys.stdout.write("{}\n".format(peer)) +def print_rpc_list(peers, code=False): + for peer in peers: + print(f'{peer}') + for method in peers[peer]: + if code: + print(f"\tself.vip.rpc.call({peer}, {method}).get()") + else: + print(f'\t{method}') + + +def print_rpc_methods(opts, peer_method_metadata, code=False): + for peer in peer_method_metadata: + if code is True: + pass + else: + print(f'{peer}') + for method in peer_method_metadata[peer]: + params = peer_method_metadata[peer][method].get('params', "No parameters for this method.") + if code is True: + if len(params) == 0: + print(f"self.vip.rpc.call({peer}, {method}).get()") + else: + print(f"self.vip.rpc.call({peer}, {method}, {[param for param in params]}).get()") + continue + else: + print(f'\t{method}') + if opts.verbose == True: + print("\tDocumentation:") + doc = peer_method_metadata[peer][method]\ + .get('doc', "No documentation for this method.")\ + .replace("\n", "\n\t\t") + print(f'\t\t{doc}\n') + print("\tParameters:") + if type(params) is str: + print(f'\t\t{params}') + else: + for param in params: + print(f'\t\t{param}:\n\t\t\t{params[param]}') + + +def list_agents_rpc(opts): + conn = opts.connection + try: + peers = sorted(conn.call('peerlist')) + except Exception as e: + print(e) + if opts.by_vip == True or len(opts.pattern) == 1: + peers = [peer for peer in peers if peer in opts.pattern] + elif len(opts.pattern) > 1: + peer = opts.pattern[0] + methods = opts.pattern[1:] + peer_method_metadata = {peer: {}} + for method in methods: + try: + peer_method_metadata[peer][method] = conn.server.vip.rpc.call( + peer, f'{method}.inspect').get(timeout=4) + except gevent.Timeout: + print(f'{peer} has timed out.') + except Unreachable: + print(f'{peer} is unreachable') + except MethodNotFound as e: + print(e) + + # _stdout.write(f"{peer_method_metadata}\n") + print_rpc_methods(opts, peer_method_metadata) + return + peer_methods = {} + for peer in peers: + try: + peer_methods[peer] = conn.server.vip.rpc.call( + peer, 'inspect').get(timeout=4)["methods"] + except gevent.Timeout: + print(f'{peer} has timed out') + except Unreachable: + print(f'{peer} is unreachable') + except MethodNotFound as e: + print(e) + + if opts.verbose is True: + print_rpc_list(peer_methods) + # for peer in peer_methods: + # _stdout.write(f"{peer}:{peer_methods[peer]}\n") + else: + for peer in peer_methods: + peer_methods[peer] = [method for method in peer_methods[peer] if "." not in method] + # _stdout.write(f"{peer}:{peer_methods[peer]}\n") + print_rpc_list(peer_methods) + + +def list_agent_rpc_code(opts): + conn = opts.connection + try: + peers = sorted(conn.call('peerlist')) + except Exception as e: + print(e) + if len(opts.pattern) == 1: + peers = [peer for peer in peers if peer in opts.pattern] + elif len(opts.pattern) > 1: + peer = opts.pattern[0] + methods = opts.pattern[1:] + peer_method_metadata = {peer: {}} + for method in methods: + try: + peer_method_metadata[peer][method] = conn.server.vip.rpc.call( + peer, f'{method}.inspect').get(timeout=4) + except gevent.Timeout: + print(f'{peer} has timed out.') + except Unreachable: + print(f'{peer} is unreachable') + except MethodNotFound as e: + print(e) + + # _stdout.write(f"{peer_method_metadata}\n") + print_rpc_methods(opts, peer_method_metadata, code=True) + return + + peer_methods = {} + for peer in peers: + try: + peer_methods[peer] = conn.server.vip.rpc.call( + peer, 'inspect').get(timeout=4)["methods"] + except gevent.Timeout: + print(f'{peer} has timed out.') + except Unreachable: + print(f'{peer} is unreachable') + except MethodNotFound as e: + print(e) + + if opts.verbose is True: + pass + else: + for peer in peer_methods: + peer_methods[peer] = [method for method in peer_methods[peer] if "." not in method] + + peer_method_metadata = {} + for peer in peer_methods: + peer_method_metadata[peer] = {} + for method in peer_methods[peer]: + try: + peer_method_metadata[peer][method] = conn.server.vip.rpc.call( + peer, f'{method}.inspect').get(timeout=4) + except gevent.Timeout: + print(f'{peer} has timed out') + except Unreachable: + print(f'{peer} is unreachable') + except MethodNotFound as e: + print(e) + print_rpc_methods(opts, peer_method_metadata, code=True) + + +def list_remotes(opts): + """ Lists remote certs and credentials. + Can be filters using the '--status' option, specifying + pending, approved, or denied. + The output printed includes: + user id of a ZMQ credential, or the common name of a CSR + remote address of the credential or csr + status of the credential or cert (either APPROVED, DENIED, or PENDING) + + """ + conn = opts.connection + if not conn: + _stderr.write("VOLTTRON is not running. This command " + "requires VOLTTRON platform to be running\n") + return + + output_view = [] + try: + pending_csrs = conn.server.vip.rpc.call(AUTH, "get_pending_csrs").get(timeout=4) + for csr in pending_csrs: + output_view.append({"entry": {"user_id": csr["identity"], + "address": csr["remote_ip_address"]}, + "status": csr["status"] + }) + except TimeoutError: + print("Certs timed out") + try: + approved_certs = conn.server.vip.rpc.call(AUTH, "get_authorization_approved").get(timeout=4) + for value in approved_certs: + output_view.append({"entry": value, "status": "APPROVED"}) + except TimeoutError: + print("Approved credentials timed out") + try: + denied_certs = conn.server.vip.rpc.call(AUTH, "get_authorization_denied").get(timeout=4) + for value in denied_certs: + output_view.append({"entry": value, "status": "DENIED"}) + except TimeoutError: + print("Denied credentials timed out") + try: + pending_certs = conn.server.vip.rpc.call(AUTH, "get_authorization_pending").get(timeout=4) + for value in pending_certs: + output_view.append({"entry": value, "status": "PENDING"}) + except TimeoutError: + print("Pending credentials timed out") + + if not output_view: + print("No remote certificates or credentials") + return + + if opts.status == "approved": + output_view = [output for output in output_view if output["status"] == "APPROVED"] + + elif opts.status == "denied": + output_view = [output for output in output_view if output["status"] == "DENIED"] + + elif opts.status == "pending": + output_view = [output for output in output_view if output["status"] == "PENDING"] + + elif opts.status is not None: + _stdout.write("Invalid parameter. Please use 'approved', 'denied', 'pending', or leave blank to list all.\n") + return + + if len(output_view) == 0: + print(f"No {opts.status} remote certificates or credentials") + return + + for output in output_view: + for value in output["entry"]: + if not output["entry"][value]: + output["entry"][value] = "-" + + userid_width = max(5, max(len(str(output["entry"]["user_id"])) for output in output_view)) + address_width = max(5, max(len(str(output["entry"]["address"])) for output in output_view)) + status_width = max(5, max(len(str(output["status"])) for output in output_view)) + fmt = '{:{}} {:{}} {:{}}\n' + _stderr.write( + fmt.format('USER_ID', userid_width, + 'ADDRESS', address_width, + 'STATUS', status_width)) + fmt = '{:{}} {:{}} {:{}}\n' + for output in output_view: + _stdout.write(fmt.format(output["entry"]["user_id"], userid_width, + output["entry"]["address"], address_width, + output["status"], status_width)) + +def approve_remote(opts): + """Approves either a pending CSR or ZMQ credential. + The platform must be running for this command to succeed. + :param opts.user_id: The ZMQ credential user_id or pending CSR common name + :type opts.user_id: str + """ + conn = opts.connection + if not conn: + _stderr.write("VOLTTRON is not running. This command " + "requires VOLTTRON platform to be running\n") + return + conn.server.vip.rpc.call(AUTH, "approve_authorization_failure", opts.user_id).get(timeout=4) + +def deny_remote(opts): + """Denies either a pending CSR or ZMQ credential. + The platform must be running for this command to succeed. + :param opts.user_id: The ZMQ credential user_id or pending CSR common name + :type opts.user_id: str + """ + conn = opts.connection + if not conn: + _stderr.write("VOLTTRON is not running. This command " + "requires VOLTTRON platform to be running\n") + return + conn.server.vip.rpc.call(AUTH, "deny_authorization_failure", opts.user_id).get(timeout=4) + + +def delete_remote(opts): + """Deletes either a pending CSR or ZMQ credential. + The platform must be running for this command to succeed. + :param opts.user_id: The ZMQ credential user_id or pending CSR common name + :type opts.user_id: str + """ + conn = opts.connection + if not conn: + _stderr.write("VOLTTRON is not running. This command " + "requires VOLTTRON platform to be running\n") + return + conn.server.vip.rpc.call(AUTH, "delete_authorization_failure", opts.user_id).get(timeout=4) + +# the following global variables are used to update the cache so +# that we don't ask the platform too many times for the data +# associated with health. +health_cache_timeout_date = None +health_cache_timeout = 5 +health_cache = {} + + +def update_health_cache(opts): + global health_cache_timeout_date + + t_now = datetime.now() + do_update = True + # Make sure we update if we don't have any health dicts, or if the cache has timed out. + if health_cache_timeout_date is not None and t_now < health_cache_timeout_date and health_cache: + do_update = False + + if do_update: + health_cache.clear() + health_cache.update(opts.connection.server.vip.rpc.call(PLATFORM_HEALTH, 'get_platform_health').get(timeout=4)) + health_cache_timeout_date = datetime.now() + timedelta(seconds=health_cache_timeout) + + def status_agents(opts): agents = {agent.uuid: agent for agent in _list_agents(opts.aip)} status = {} @@ -784,11 +1018,16 @@ def get_status(agent): return '' def get_health(agent): + update_health_cache(opts) + try: - # TODO Modify this later so that we aren't calling peerlist before we call the status of the agent. - if agent.vip_identity in opts.connection.server.vip.peerlist().get(timeout=4): - return opts.connection.server.vip.rpc.call(agent.vip_identity, - 'health.get_status_json').get(timeout=4)['status'] + health_dict = health_cache.get(agent.vip_identity) + + if health_dict: + if opts.json: + return health_dict + else: + return health_dict.get('message', '') else: return '' except (VIPError, gevent.Timeout): @@ -801,16 +1040,23 @@ def agent_health(opts): agents = {agent.uuid: agent for agent in _list_agents(opts.aip)}.values() agents = get_filtered_agents(opts, agents) if not agents: - _stderr.write('No installed Agents found\n') + if not opts.json: + _stderr.write('No installed Agents found\n') + else: + _stdout.write(f'{jsonapi.dumps({}, indent=2)}\n') return agent = agents.pop() - try: - _stderr.write(jsonapi.dumps( - opts.connection.server.vip.rpc.call(agent.vip_identity, 'health.get_status_json').get(timeout=4), - indent=4) + '\n' - ) - except VIPError: - print("Agent {} is not running on the Volttron platform.".format(agent.uuid)) + update_health_cache(opts) + + data = health_cache.get(agent.vip_identity) + + if not data: + if not opts.json: + _stdout.write(f'No health associated with {agent.vip_identity}\n') + else: + _stdout.write(f'{jsonapi.dumps({}, indent=2)}\n') + else: + _stdout.write(f'{jsonapi.dumps(data, indent=4)}\n') def clear_status(opts): @@ -1267,7 +1513,7 @@ def update_auth(opts): def add_role(opts): auth_file = _get_auth_file(opts.volttron_home) - roles = auth_file.read()[2] + roles = auth_file.read()[3] if opts.role in roles: _stderr.write('role "{}" already exists\n'.format(opts.role)) return @@ -1278,13 +1524,13 @@ def add_role(opts): def list_roles(opts): auth_file = _get_auth_file(opts.volttron_home) - roles = auth_file.read()[2] + roles = auth_file.read()[3] _print_two_columns(roles, 'ROLE', 'CAPABILITIES') def update_role(opts): auth_file = _get_auth_file(opts.volttron_home) - roles = auth_file.read()[2] + roles = auth_file.read()[3] if opts.role not in roles: _stderr.write('role "{}" does not exist\n'.format(opts.role)) return @@ -1299,7 +1545,7 @@ def update_role(opts): def remove_role(opts): auth_file = _get_auth_file(opts.volttron_home) - roles = auth_file.read()[2] + roles = auth_file.read()[3] if opts.role not in roles: _stderr.write('role "{}" does not exist\n'.format(opts.role)) return @@ -1310,7 +1556,7 @@ def remove_role(opts): def add_group(opts): auth_file = _get_auth_file(opts.volttron_home) - groups = auth_file.read()[1] + groups = auth_file.read()[2] if opts.group in groups: _stderr.write('group "{}" already exists\n'.format(opts.group)) return @@ -1321,13 +1567,13 @@ def add_group(opts): def list_groups(opts): auth_file = _get_auth_file(opts.volttron_home) - groups = auth_file.read()[1] + groups = auth_file.read()[2] _print_two_columns(groups, 'GROUPS', 'ROLES') def update_group(opts): auth_file = _get_auth_file(opts.volttron_home) - groups = auth_file.read()[1] + groups = auth_file.read()[2] if opts.group not in groups: _stderr.write('group "{}" does not exist\n'.format(opts.group)) return @@ -1342,7 +1588,7 @@ def update_group(opts): def remove_group(opts): auth_file = _get_auth_file(opts.volttron_home) - groups = auth_file.read()[1] + groups = auth_file.read()[2] if opts.group not in groups: _stderr.write('group "{}" does not exist\n'.format(opts.group)) return @@ -1389,7 +1635,10 @@ def _show_filtered_agents(opts, field_name, field_callback, agents=None): agents = get_filtered_agents(opts, agents) if not agents: - _stderr.write('No installed Agents found\n') + if not opts.json: + _stderr.write('No installed Agents found\n') + else: + _stdout.write(f'{jsonapi.dumps({}, indent=2)}\n') return agents = sorted(agents, key=lambda x: x.name) if not opts.min_uuid_len: @@ -1400,14 +1649,27 @@ def _show_filtered_agents(opts, field_name, field_callback, agents=None): tag_width = max(3, max(len(agent.tag or '') for agent in agents)) identity_width = max(3, max(len(agent.vip_identity or '') for agent in agents)) fmt = '{} {:{}} {:{}} {:{}} {:>6}\n' - _stderr.write( - fmt.format(' ' * n, 'AGENT', name_width, 'IDENTITY', identity_width, - 'TAG', tag_width, field_name)) - for agent in agents: - _stdout.write(fmt.format(agent.uuid[:n], agent.name, name_width, - agent.vip_identity, identity_width, - agent.tag or '', tag_width, - field_callback(agent))) + + if not opts.json: + _stderr.write( + fmt.format(' ' * n, 'AGENT', name_width, 'IDENTITY', identity_width, + 'TAG', tag_width, field_name)) + for agent in agents: + _stdout.write(fmt.format(agent.uuid[:n], agent.name, name_width, + agent.vip_identity, identity_width, + agent.tag or '', tag_width, + field_callback(agent))) + else: + json_obj = {} + for agent in agents: + json_obj[agent.vip_identity] = { + 'agent_uuid': agent.uuid, + 'name': agent.name, + 'identity': agent.vip_identity, + 'agent_tag': agent.tag or '', + field_name: field_callback(agent), + } + _stdout.write(f'{jsonapi.dumps(json_obj, indent=2)}\n') def _show_filtered_agents_status(opts, status_callback, health_callback, agents=None): @@ -1434,45 +1696,69 @@ def _show_filtered_agents_status(opts, status_callback, health_callback, agents= if not agents: agents = _list_agents(opts.aip) + # Find max before so the uuid of the agent is available + # when a usre has filtered the list. + if not opts.min_uuid_len: + n = 36 + else: + n = max(_calc_min_uuid_length(agents), opts.min_uuid_len) + agents = get_filtered_agents(opts, agents) if not agents: - _stderr.write('No installed Agents found\n') + if not opts.json: + _stderr.write('No installed Agents found\n') + else: + _stdout.write(f'{jsonapi.dumps({}, indent=2)}\n') return agents = sorted(agents, key=lambda x: x.name) - if not opts.min_uuid_len: - n = 36 - else: - n = max(_calc_min_uuid_length(agents), opts.min_uuid_len) - name_width = max(5, max(len(agent.name) for agent in agents)) - tag_width = max(3, max(len(agent.tag or '') for agent in agents)) - identity_width = max(3, max(len(agent.vip_identity or '') for agent in agents)) - if is_secure_mode(): - user_width = max(3, max(len(agent.agent_user or '') for agent in agents)) - fmt = '{} {:{}} {:{}} {:{}} {:{}} {:>6} {:>15}\n' - _stderr.write( - fmt.format(' ' * n, 'AGENT', name_width, 'IDENTITY', identity_width, - 'TAG', tag_width, 'AGENT_USER', user_width, 'STATUS', 'HEALTH')) - fmt = '{} {:{}} {:{}} {:{}} {:{}} {:<15} {:<}\n' - for agent in agents: - status_str = status_callback(agent) - _stdout.write(fmt.format(agent.uuid[:n], agent.name, name_width, - agent.vip_identity, identity_width, - agent.tag or '', tag_width, - agent.agent_user if status_str.startswith("running") else "", user_width, - status_str, health_callback(agent))) + if not opts.json: + name_width = max(5, max(len(agent.name) for agent in agents)) + tag_width = max(3, max(len(agent.tag or '') for agent in agents)) + identity_width = max(3, max(len(agent.vip_identity or '') for agent in agents)) + if is_secure_mode(): + user_width = max(3, max(len(agent.agent_user or '') for agent in agents)) + fmt = '{} {:{}} {:{}} {:{}} {:{}} {:>6} {:>15}\n' + _stderr.write( + fmt.format(' ' * n, 'AGENT', name_width, 'IDENTITY', identity_width, + 'TAG', tag_width, 'AGENT_USER', user_width, 'STATUS', 'HEALTH')) + fmt = '{} {:{}} {:{}} {:{}} {:{}} {:<15} {:<}\n' + for agent in agents: + status_str = status_callback(agent) + agent_health_dict = health_callback(agent) + _stdout.write(fmt.format(agent.uuid[:n], agent.name, name_width, + agent.vip_identity, identity_width, + agent.tag or '', tag_width, + agent.agent_user if status_str.startswith("running") else "", user_width, + status_str, health_callback(agent))) + else: + fmt = '{} {:{}} {:{}} {:{}} {:>6} {:>15}\n' + _stderr.write( + fmt.format(' ' * n, 'AGENT', name_width, 'IDENTITY', identity_width, + 'TAG', tag_width, 'STATUS', 'HEALTH')) + fmt = '{} {:{}} {:{}} {:{}} {:<15} {:<}\n' + for agent in agents: + _stdout.write(fmt.format(agent.uuid[:n], agent.name, name_width, + agent.vip_identity, identity_width, + agent.tag or '', tag_width, + status_callback(agent), health_callback(agent))) else: - fmt = '{} {:{}} {:{}} {:{}} {:>6} {:>15}\n' - _stderr.write( - fmt.format(' ' * n, 'AGENT', name_width, 'IDENTITY', identity_width, - 'TAG', tag_width, 'STATUS', 'HEALTH')) - fmt = '{} {:{}} {:{}} {:{}} {:<15} {:<}\n' + json_obj = {} for agent in agents: - _stdout.write(fmt.format(agent.uuid[:n], agent.name, name_width, - agent.vip_identity, identity_width, - agent.tag or '', tag_width, - status_callback(agent), health_callback(agent))) + json_obj[agent.vip_identity] = { + 'agent_uuid': agent.uuid, + 'name': agent.name, + 'identity': agent.vip_identity, + 'agent_tag': agent.tag or '', + 'status': status_callback(agent), + 'health': health_callback(agent), + } + if is_secure_mode(): + json_obj[agent.vip_identity]['agent_user'] = agent.agent_user if \ + json_obj[agent.vip_identity]['status'].startswith('running') else '' + _stdout.write(f'{jsonapi.dumps(json_obj, indent=2)}\n') + def get_agent_publickey(opts): @@ -2139,6 +2425,7 @@ def main(argv=sys.argv): parser.add_argument( '--show-config', action='store_true', help=argparse.SUPPRESS) + parser.add_argument("--json", action="store_true", default=False, help="format output to json") parser.add_help_argument() parser.set_defaults( @@ -2149,29 +2436,14 @@ def main(argv=sys.argv): top_level_subparsers = parser.add_subparsers(title='commands', metavar='', dest='command') - def add_parser(*args, **kwargs): + def add_parser(*args, **kwargs) -> argparse.ArgumentParser: parents = kwargs.get('parents', []) parents.append(global_args) kwargs['parents'] = parents subparser = kwargs.pop("subparser", top_level_subparsers) return subparser.add_parser(*args, **kwargs) - install = add_parser('install', help='install agent from wheel', - epilog='Optionally you may specify the --tag argument to tag the ' - 'agent during install without requiring a separate call to ' - 'the tag command. ') - install.add_argument('wheel', help='path to agent wheel') - install.add_argument('--tag', help='tag for the installed agent') - install.add_argument('--vip-identity', help='VIP IDENTITY for the installed agent. ' - 'Overrides any previously configured VIP IDENTITY.') - if HAVE_RESTRICTED: - install.add_argument('--verify', action='store_true', - dest='verify_agents', - help='verify agent integrity during install') - install.add_argument('--no-verify', action='store_false', - dest='verify_agents', - help=argparse.SUPPRESS) - install.set_defaults(func=install_agent, verify_agents=True) + add_install_agent_parser(add_parser, HAVE_RESTRICTED) tag = add_parser('tag', parents=[filterable], help='set, show, or remove agent tag') @@ -2281,6 +2553,38 @@ def add_parser(*args, **kwargs): help=argparse.SUPPRESS) upgrade.set_defaults(func=upgrade_agent, verify_agents=True) + # ==================================================== + # rpc commands + # ==================================================== + rpc_ctl = add_parser('rpc', + help='rpc controls') + + rpc_subparsers = rpc_ctl.add_subparsers(title='subcommands', metavar='', dest='store_commands') + + rpc_code = add_parser("code", subparser=rpc_subparsers, help="shows how to use rpc call in other agents") + + rpc_code.add_argument('pattern', nargs='*', + help='Identity of agent, followed by method(s)' + '') + rpc_code.add_argument('-v', '--verbose', action='store_true', + help="list all subsystem rpc methods in addition to the agent's rpc methods") + + rpc_code.set_defaults(func=list_agent_rpc_code, min_uuid_len=1) + + rpc_list = add_parser("list", subparser=rpc_subparsers, help="lists all agents and their rpc methods") + + rpc_list.add_argument('-i', '--vip', dest='by_vip', action='store_true', + help='filter by vip identity') + + rpc_list.add_argument('pattern', nargs='*', + help='UUID or name of agent') + + rpc_list.add_argument('-v', '--verbose', action='store_true', + help="list all subsystem rpc methods in addition to the agent's rpc methods. If a method " + "is specified, display the doc-string associated with the method.") + + rpc_list.set_defaults(func=list_agents_rpc, min_uuid_len=1) + # ==================================================== # certs commands # ==================================================== @@ -2445,6 +2749,34 @@ def add_parser(*args, **kwargs): help='remove (rather than append) given capabilities') auth_update_role.set_defaults(func=update_role) + auth_remote = add_parser('remote', subparser=auth_subparsers, + help="manage pending RMQ certs and ZMQ credentials") + auth_remote_subparsers = auth_remote.add_subparsers(title='remote subcommands', metavar='', dest='store_commands') + + auth_remote_list_cmd = add_parser("list", subparser=auth_remote_subparsers, + help="lists approved, denied, and pending certs and credentials" + ) + auth_remote_list_cmd.add_argument("--status", help="Specify approved, denied, or pending") + auth_remote_list_cmd.set_defaults(func=list_remotes) + + auth_remote_approve_cmd = add_parser("approve", subparser=auth_remote_subparsers, + help="approves pending or denied remote connection") + auth_remote_approve_cmd.add_argument("user_id", help="user_id or identity of pending credential or cert to approve") + auth_remote_approve_cmd.set_defaults(func=approve_remote) + + auth_remote_deny_cmd = add_parser("deny", subparser=auth_remote_subparsers, + help="denies pending or denied remote connection") + auth_remote_deny_cmd.add_argument("user_id", + help="user_id or identity of pending credential or cert to deny") + auth_remote_deny_cmd.set_defaults(func=deny_remote) + + auth_remote_delete_cmd = add_parser("delete", subparser=auth_remote_subparsers, + help="approves pending or denied remote connection") + auth_remote_delete_cmd.add_argument("user_id", + help="user_id or identity of pending credential or cert to delete") + auth_remote_delete_cmd.set_defaults(func=delete_remote) + + # ==================================================== # config commands # ==================================================== @@ -2728,7 +3060,10 @@ def add_parser(*args, **kwargs): opts.aip = aipmod.AIPplatform(opts) opts.aip.setup() - opts.connection = ControlConnection(opts.vip_address) + + opts.connection = None + if utils.is_volttron_running(volttron_home): + opts.connection = ControlConnection(opts.vip_address) try: with gevent.Timeout(opts.timeout): @@ -2743,11 +3078,27 @@ def add_parser(*args, **kwargs): _stderr.write("Invalid command: '{}' or command requires additional arguments\n".format(opts.command)) parser.print_help() return 1 - # except Exception as exc: - # print_tb = traceback.print_exc - # error = str(exc) - else: - return 0 + except SystemExit as exc: + # Handles if sys.exit is called from within a function if not 0 + # then we know there was an error and processing will continue + # else we return 0 from here. This has the added effect of + # allowing us to cascade short circuit calls. + if exc.args[0] != 0: + print_tb = exc.print_tb + error = exc.message + else: + return 0 + finally: + # make sure the connection to the server is closed when this scriopt is about to exit. + if opts.connection: + try: + opts.connection.server.core.stop() + except Unreachable: + # its ok for this to fail at this point it might not even be valid. + pass + finally: + opts.connection = None + if opts.debug: print_tb() _stderr.write('{}: error: {}\n'.format(opts.command, error)) diff --git a/volttron/platform/dbutils/basedb.py b/volttron/platform/dbutils/basedb.py index a92c164738..0e3dd06039 100644 --- a/volttron/platform/dbutils/basedb.py +++ b/volttron/platform/dbutils/basedb.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- {{{ # vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et: # -# Copyright 2019, Battelle Memorial Institute. +# Copyright 2020, Battelle Memorial Institute. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -41,20 +41,22 @@ import importlib import logging import threading -from gevent.local import local - +import sqlite3 import sys from abc import abstractmethod +from gevent.local import local + from volttron.platform.agent import utils from volttron.platform import jsonapi -import sqlite3 utils.setup_logging() _log = logging.getLogger(__name__) class ConnectionError(Exception): - """Custom class for connection errors""" + """ + Custom class for connection errors + """ pass @@ -68,32 +70,26 @@ def closing(obj): except BaseException as exc: # if exc.__class__.__module__ == 'exceptions': if exc.__class__.__module__ == 'builtins': - # Don't ignore built-in exceptions because they likely indicate - # a bug that should stop execution. psycopg2.Error subclasses - # Exception, so the module must also be checked. :-( + # Don't ignore built-in exceptions because they likely indicate a bug that should stop execution. + # psycopg2.Error subclasses Exception, so the module must also be checked. raise - _log.exception('An exception was raised while closing ' - 'the cursor and is being ignored.') + _log.exception('An exception was raised while closing the cursor and is being ignored.') class DbDriver(object): """ Parent class used by :py:class:`sqlhistorian.historian.SQLHistorian` to do the database operations. This class is inherited by - - :py:class:`volttron.platform.dbutils.mysqlfuncts.MySqlFuncts` - :py:class:`volttron.platform.dbutils.sqlitefuncts.SqlLiteFuncts` - """ def __init__(self, dbapimodule, **kwargs): thread_name = threading.currentThread().getName() if callable(dbapimodule): - _log.debug("Constructing Driver for %s in thread: %s", - dbapimodule.__name__, thread_name) + _log.debug("Constructing Driver for %s in thread: %s", dbapimodule.__name__, thread_name) connect = dbapimodule else: - _log.debug("Constructing Driver for %s in thread: %s", - dbapimodule, thread_name) + _log.debug("Constructing Driver for %s in thread: %s", dbapimodule, thread_name) _log.debug("kwargs for connect is %r", kwargs) dbapimodule = importlib.import_module(dbapimodule) connect = lambda: dbapimodule.connect(**kwargs) @@ -107,7 +103,6 @@ def bulk_insert(self): Function to meet bulk insert requirements. This function can be overridden by historian drivers to yield the required method for data insertion during bulk inserts in the respective historians. In this generic case it will yield the single insert method - :yields: insert method """ yield self.insert_data @@ -120,8 +115,7 @@ def cursor(self): self.stash.cursor = self.__connection.cursor() return self.stash.cursor except Exception: - _log.warn("An exception occurred while creating " - "a cursor. Will try establishing connection again") + _log.warning("An exception occurred while creating a cursor. Will try establishing connection again") self.__connection = None try: self.__connection = self.__connect() @@ -129,8 +123,7 @@ def cursor(self): _log.error("Could not connect to database. Raise ConnectionError") raise ConnectionError(e).with_traceback(sys.exc_info()[2]) if self.__connection is None: - raise ConnectionError( - "Unknown error. Could not connect to database") + raise ConnectionError("Unknown error. Could not connect to database") # if any exception happens here have it go to the caller. self.stash.cursor = self.__connection.cursor() @@ -141,9 +134,7 @@ def read_tablenames_from_db(self, meta_table_name): """ Reads names of the tables used by this historian to store data, topics, metadata, aggregate topics and aggregate metadata - - :param meta_table_name: The volttron metadata table in which table - definitions are stored + :param meta_table_name: The volttron metadata table in which table definitions are stored :return: table names .. code-block:: python @@ -155,8 +146,7 @@ def read_tablenames_from_db(self, meta_table_name): 'agg_meta_table':name of table that store aggregate metadata } """ - rows = self.select("SELECT table_id, table_name, table_prefix from " + - meta_table_name, None) + rows = self.select("SELECT table_id, table_name, table_prefix from " + meta_table_name, None) table_names = dict() table_prefix = "" table_map = {} @@ -166,10 +156,8 @@ def read_tablenames_from_db(self, meta_table_name): table_prefix = row[2] + "_" if row[2] else "" table_names[row[0]] = table_prefix + row[1] - table_names['agg_topics_table'] = table_prefix + \ - 'aggregate_' + table_map['topics_table'] - table_names['agg_meta_table'] = table_prefix + 'aggregate_' + \ - table_map['meta_table'] + table_names['agg_topics_table'] = table_prefix + 'aggregate_' + table_map['topics_table'] + table_names['agg_meta_table'] = table_prefix + 'aggregate_' + table_map['meta_table'] return table_names @abstractmethod @@ -183,7 +171,6 @@ def setup_historian_tables(self): def get_topic_map(self): """ Returns details of topics in database - :return: two dictionaries. - First one maps topic_name.lower() to topic id and - Second one maps topic_name.lower() to topic name @@ -194,10 +181,8 @@ def get_topic_map(self): def get_agg_topics(self): """ Get the list of aggregate topics available - :return: list of tuples containing - (agg_topic_name, agg_type, agg_time_period, configured topics/topic - name pattern) + (agg_topic_name, agg_type, agg_time_period, configured topics/topic name pattern) """ pass @@ -205,7 +190,6 @@ def get_agg_topics(self): def get_agg_topic_map(self): """ Get a map of aggregate_topics to aggregate_topic_id - :return: dict of format {(agg_topic_name, agg_type, agg_time_period):agg_topic_id} """ @@ -214,8 +198,7 @@ def get_agg_topic_map(self): @abstractmethod def query_topics_by_pattern(self, topic_pattern): """ - Return a map of {topi_name.lower():topic_id} that matches the given - pattern + Return a map of {topic_name.lower():topic_id} that matches the given pattern :param topic_pattern: pattern to match against topic_name :return: """ @@ -253,7 +236,6 @@ def insert_meta_query(self): def get_aggregation_list(self): """ Return list of aggregation supported by the specific data store - :return: list of aggregations """ pass @@ -283,7 +265,6 @@ def replace_agg_meta_stmt(self): def manage_db_size(self, history_limit_timestamp, storage_limit_gb): """ Optional function to manage database size. - :param history_limit_timestamp: remove all data older than this timestamp :param storage_limit_gb: remove oldest data until database is smaller than this value. """ @@ -292,37 +273,29 @@ def manage_db_size(self, history_limit_timestamp, storage_limit_gb): def insert_meta(self, topic_id, metadata): """ Inserts metadata for topic - :param topic_id: topic id for which metadata is inserted :param metadata: metadata - :return: True if execution completes. Raises exception if unable to - connect to database + :return: True if execution completes. Raises exception if unable to connect to database """ - self.execute_stmt(self.insert_meta_query(), - (topic_id, jsonapi.dumps(metadata)), commit=False) + self.execute_stmt(self.insert_meta_query(), (topic_id, jsonapi.dumps(metadata)), commit=False) return True def insert_data(self, ts, topic_id, data): """ Inserts data for topic - :param ts: timestamp :param topic_id: topic id for which data is inserted :param data: data value - :return: True if execution completes. raises Exception if unable to - connect to database + :return: True if execution completes. raises Exception if unable to connect to database """ - self.execute_stmt(self.insert_data_query(), - (ts, topic_id, jsonapi.dumps(data)), commit=False) + self.execute_stmt(self.insert_data_query(), (ts, topic_id, jsonapi.dumps(data)), commit=False) return True def insert_topic(self, topic): """ Insert a new topic - :param topic: topic to insert - :return: id of the topic inserted if insert was successful. - Raises exception if unable to connect to database + :return: id of the topic inserted if insert was successful. Raises exception if unable to connect to database """ with closing(self.cursor()) as cursor: cursor.execute(self.insert_topic_query(), (topic,)) @@ -331,55 +304,44 @@ def insert_topic(self, topic): def update_topic(self, topic, topic_id): """ Update a topic name - :param topic: new topic name :param topic_id: topic id for which update is done - :return: True if execution is complete. Raises exception if unable to - connect to database + :return: True if execution is complete. Raises exception if unable to connect to database """ - self.execute_stmt(self.update_topic_query(), (topic, topic_id), - commit=False) + self.execute_stmt(self.update_topic_query(), (topic, topic_id), commit=False) return True def insert_agg_meta(self, topic_id, metadata): """ Inserts metadata for aggregate topic - :param topic_id: aggregate topic id for which metadata is inserted :param metadata: metadata - :return: True if execution completes. Raises exception if connection to - database fails + :return: True if execution completes. Raises exception if connection to database fails """ - self.execute_stmt(self.replace_agg_meta_stmt(), - (topic_id, jsonapi.dumps(metadata)), commit=False) + self.execute_stmt(self.replace_agg_meta_stmt(), (topic_id, jsonapi.dumps(metadata)), commit=False) return True def insert_agg_topic(self, topic, agg_type, agg_time_period): """ Insert a new aggregate topic - :param topic: topic name to insert :param agg_type: type of aggregation :param agg_time_period: time period of aggregation - :return: id of the topic inserted if insert was successful. - Raises exception if unable to connect to database + :return: id of the topic inserted if insert was successful. Raises exception if unable to connect to database """ with closing(self.cursor()) as cursor: - cursor.execute(self.insert_agg_topic_stmt(), - (topic, agg_type, agg_time_period)) + cursor.execute(self.insert_agg_topic_stmt(), (topic, agg_type, agg_time_period)) return cursor.lastrowid def update_agg_topic(self, agg_id, agg_topic_name): """ Update a aggregate topic name - :param agg_id: topic id for which update is done :param agg_topic_name: new aggregate topic name :return: True if execution is complete. Raises exception if unable to connect to database """ - self.execute_stmt(self.update_agg_topic_stmt(), - (agg_topic_name, agg_id),commit=False) + self.execute_stmt(self.update_agg_topic_stmt(), (agg_topic_name, agg_id),commit=False) return True def commit(self): @@ -394,17 +356,11 @@ def commit(self): return True except sqlite3.OperationalError as e: if "database is locked" in str(e): - _log.error("EXCEPTION: SQLITE3 Database is locked. This " - "error could occur when there are multiple " - "simultaneous read and write requests, making " - "individual request to wait more than the " - "default timeout period. If you are using " - "sqlite for frequent reads and write, please " - "configure a higher timeout in agent " - "configuration under \n" - "config[\"connection\"][\"params\"][" - "\"timeout\"] " - "Default value is 10. Timeout units is seconds") + _log.error("EXCEPTION: SQLITE3 Database is locked. This error could occur when there are multiple " + "simultaneous read and write requests, making individual request to wait more than the " + "default timeout period. If you are using sqlite for frequent reads and write, please " + "configure a higher timeout in agent configuration under \nconfig[\"connection\"]" + "[\"params\"][\"timeout\"] Default value is 10. Timeout units is seconds") raise _log.warning('connection was null during commit phase.') return False @@ -412,7 +368,6 @@ def commit(self): def rollback(self): """ Rollback a transaction - :return: True if successful, False otherwise """ if self.__connection is not None: @@ -432,7 +387,6 @@ def close(self): def select(self, query, args=None, fetch_all=True): """ Execute a select statement - :param query: select statement :param args: arguments for the where clause :param fetch_all: Set to True if function should return retrieve all @@ -456,11 +410,9 @@ def select(self, query, args=None, fetch_all=True): def execute_stmt(self, stmt, args=None, commit=False): """ Execute a sql statement - :param stmt: the statement to execute :param args: optional arguments - :param commit: True if transaction should be committed. Defaults to - False + :param commit: True if transaction should be committed. Defaults to False :return: count of the number of affected rows """ if args is None: @@ -474,11 +426,9 @@ def execute_stmt(self, stmt, args=None, commit=False): def execute_many(self, stmt, args, commit=False): """ Execute a sql statement with multiple args - :param stmt: the statement to execute :param args: optional arguments - :param commit: True if transaction should be committed. Defaults to - False + :param commit: True if transaction should be committed. Defaults to False :return: count of the number of affected rows """ with closing(self.cursor()) as cursor: @@ -488,29 +438,20 @@ def execute_many(self, stmt, args, commit=False): return cursor.rowcount @abstractmethod - def query(self, topic_ids, id_name_map, start=None, end=None, - agg_type=None, - agg_period=None, skip=0, count=None, order="FIRST_TO_LAST"): + def query(self, topic_ids, id_name_map, start=None, end=None, agg_type=None, agg_period=None, skip=0, count=None, + order="FIRST_TO_LAST"): """ - Queries the raw historian data or aggregate data and returns the - results of the query - + Queries the raw historian data or aggregate data and returns the results of the query :param topic_ids: list of topic ids to query for. :param id_name_map: dictionary that maps topic id to topic name :param start: Start of query timestamp as a datetime. :param end: End of query timestamp as a datetime. - :param agg_type: If this is a query for aggregate data, the type of - aggregation ( for example, sum, avg) - :param agg_period: If this is a query for aggregate data, the time - period of aggregation + :param agg_type: If this is a query for aggregate data, the type of aggregation ( for example, sum, avg) + :param agg_period: If this is a query for aggregate data, the time period of aggregation :param skip: Skip this number of results. - :param count: Limit results to this value. When the query is for - multiple topics, count applies to individual topics. For - example, a query on 2 topics with count=5 will return 5 - records for each topic - :param order: How to order the results, either "FIRST_TO_LAST" or - "LAST_TO_FIRST" - :type topic: str or list + :param count: Limit results to this value. When the query is for multiple topics, count applies to individual + topics. For example, a query on 2 topics with count=5 will return 5 records for each topic + :param order: How to order the results, either "FIRST_TO_LAST" or "LAST_TO_FIRST" :type start: datetime :type end: datetime :type skip: int @@ -533,66 +474,51 @@ def query(self, topic_ids, id_name_map, start=None, end=None, @abstractmethod def create_aggregate_store(self, agg_type, period): """ - Create the data structure (table or collection) that is going to store - the aggregate data for the give aggregation type and aggregation - time period. Table name should be constructed as _ - + Create the data structure (table or collection) that is going to store the aggregate data for the give + aggregation type and aggregation time period. Table name should be constructed as _ :param agg_type: The type of aggregation. (avg, sum etc.) - :param agg_time_period: The time period of aggregation - :return - True if successful, False otherwise + :param period: The time period of aggregation + :return: True if successful, False otherwise """ pass @abstractmethod def insert_aggregate_stmt(self, table_name): """ - The sql statement to insert collected aggregate for a given time - period into database - - :param table_name: name of the table into which the aggregate data - needs to be inserted - :return: sql insert/replace statement to insert aggregate data for a - specific time slice + The sql statement to insert collected aggregate for a given time period into database + :param table_name: name of the table into which the aggregate data needs to be inserted + :return: sql insert/replace statement to insert aggregate data for a specific time slice :rtype: str """ pass - def insert_aggregate(self, agg_topic_id, agg_type, period, ts, - data, topic_ids): + def insert_aggregate(self, agg_topic_id, agg_type, period, ts, data, topic_ids): """ Insert aggregate data collected for a specific time period into database. Data is inserted into _ table - :param agg_topic_id: topic id :param agg_type: type of aggregation :param period: time period of aggregation :param ts: end time of aggregation period (not inclusive) :param data: computed aggregate - :param topic_ids: topic ids or topic ids for which aggregate was - computed - :return: True if execution was successful, raises exception - in case of connection failures + :param topic_ids: topic ids or topic ids for which aggregate was computed + :return: True if execution was successful, raises exception in case of connection failures """ table_name = agg_type + '_' + period _log.debug("Inserting aggregate: {} {} {} {} into table {}".format( ts, agg_topic_id, jsonapi.dumps(data), str(topic_ids), table_name)) - self.execute_stmt( - self.insert_aggregate_stmt(table_name), - (ts, agg_topic_id, jsonapi.dumps(data), str(topic_ids)), - commit=True) + self.execute_stmt(self.insert_aggregate_stmt(table_name), + (ts, agg_topic_id, jsonapi.dumps(data), str(topic_ids)), commit=True) return True @abstractmethod def collect_aggregate(self, topic_ids, agg_type, start=None, end=None): """ Collect the aggregate data by querying the historian's data store - - :param topic_ids: list of topic ids for which aggregation should be - performed. + :param topic_ids: list of topic ids for which aggregation should be performed. :param agg_type: type of aggregation - :param start_time: start time for query (inclusive) - :param end_time: end time for query (exclusive) - :return: a tuple of (aggregated value, count of records over which - this aggregation was computed) + :param start: start time for query (inclusive) + :param end: end time for query (exclusive) + :return: a tuple of (aggregated value, count of records over which this aggregation was computed) """ pass diff --git a/volttron/platform/dbutils/crateutils.py b/volttron/platform/dbutils/crateutils.py index 23d210722c..efb4296f4d 100644 --- a/volttron/platform/dbutils/crateutils.py +++ b/volttron/platform/dbutils/crateutils.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- {{{ # vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et: # -# Copyright 2019, Battelle Memorial Institute. +# Copyright 2020, Battelle Memorial Institute. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/volttron/platform/dbutils/influxdbutils.py b/volttron/platform/dbutils/influxdbutils.py index 4621227188..7bc29b01cc 100644 --- a/volttron/platform/dbutils/influxdbutils.py +++ b/volttron/platform/dbutils/influxdbutils.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- {{{ # vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et: # -# Copyright 2019, Battelle Memorial Institute. +# Copyright 2020, Battelle Memorial Institute. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/volttron/platform/dbutils/mongoutils.py b/volttron/platform/dbutils/mongoutils.py index bac8d5921b..dabcab0a41 100644 --- a/volttron/platform/dbutils/mongoutils.py +++ b/volttron/platform/dbutils/mongoutils.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- {{{ # vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et: # -# Copyright 2019, Battelle Memorial Institute. +# Copyright 2020, Battelle Memorial Institute. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -250,4 +250,4 @@ def _negate_condition(condition): new_value.append(v) return {key: new_value} else: - return {key: {'$not': value}} \ No newline at end of file + return {key: {'$not': value}} diff --git a/volttron/platform/dbutils/mysqlfuncts.py b/volttron/platform/dbutils/mysqlfuncts.py index 483da2088b..9d36ea6c40 100644 --- a/volttron/platform/dbutils/mysqlfuncts.py +++ b/volttron/platform/dbutils/mysqlfuncts.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- {{{ # vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et: # -# Copyright 2019, Battelle Memorial Institute. +# Copyright 2020, Battelle Memorial Institute. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -107,14 +107,14 @@ def setup_historian_tables(self): try: if self.MICROSECOND_SUPPORT: self.execute_stmt( - 'CREATE TABLE IF NOT EXISTS ' + self.data_table + + 'CREATE TABLE ' + self.data_table + ' (ts timestamp(6) NOT NULL,\ topic_id INTEGER NOT NULL, \ value_string TEXT NOT NULL, \ UNIQUE(topic_id, ts))') else: self.execute_stmt( - 'CREATE TABLE IF NOT EXISTS ' + self.data_table + + 'CREATE TABLE ' + self.data_table + ' (ts timestamp NOT NULL,\ topic_id INTEGER NOT NULL, \ value_string TEXT NOT NULL, \ @@ -122,13 +122,13 @@ def setup_historian_tables(self): self.execute_stmt('''CREATE INDEX data_idx ON ''' + self.data_table + ''' (ts ASC)''') - self.execute_stmt('''CREATE TABLE IF NOT EXISTS ''' + + self.execute_stmt('''CREATE TABLE ''' + self.topics_table + ''' (topic_id INTEGER NOT NULL AUTO_INCREMENT, topic_name varchar(512) NOT NULL, PRIMARY KEY (topic_id), UNIQUE(topic_name))''') - self.execute_stmt('''CREATE TABLE IF NOT EXISTS ''' + self.execute_stmt('''CREATE TABLE ''' + self.meta_table + '''(topic_id INTEGER NOT NULL, metadata TEXT NOT NULL, @@ -151,11 +151,16 @@ def setup_historian_tables(self): def record_table_definitions(self, tables_def, meta_table_name): _log.debug( "In record_table_def {} {}".format(tables_def, meta_table_name)) - self.execute_stmt( - 'CREATE TABLE IF NOT EXISTS ' + meta_table_name + - ' (table_id varchar(512) PRIMARY KEY, \ - table_name varchar(512) NOT NULL, \ - table_prefix varchar(512));') + + rows = self.select("show tables like %s", [meta_table_name]) + if rows: + _log.debug("Found meta data table {}. ".format(meta_table_name)) + else: + self.execute_stmt( + 'CREATE TABLE ' + meta_table_name + + ' (table_id varchar(512) PRIMARY KEY, \ + table_name varchar(512) NOT NULL, \ + table_prefix varchar(512));') table_prefix = tables_def.get('table_prefix', "") @@ -184,21 +189,26 @@ def setup_aggregate_historian_tables(self, meta_table_name): self.agg_topics_table = table_names.get('agg_topics_table', None) self.agg_meta_table = table_names.get('agg_meta_table', None) - self.execute_stmt( - 'CREATE TABLE IF NOT EXISTS ' + self.agg_topics_table + - ' (agg_topic_id INTEGER NOT NULL AUTO_INCREMENT, \ - agg_topic_name varchar(512) NOT NULL, \ - agg_type varchar(512) NOT NULL, \ - agg_time_period varchar(512) NOT NULL, \ - PRIMARY KEY (agg_topic_id), \ - UNIQUE(agg_topic_name, agg_type, agg_time_period));') - - self.execute_stmt( - 'CREATE TABLE IF NOT EXISTS ' + self.agg_meta_table + - '(agg_topic_id INTEGER NOT NULL, \ - metadata TEXT NOT NULL, \ - PRIMARY KEY(agg_topic_id));') - self.commit() + rows = self.select("show tables like %s", [self.agg_topics_table]) + if rows: + _log.debug("Found table {}. Historian table exists".format( + self.agg_topics_table)) + else: + self.execute_stmt( + 'CREATE TABLE ' + self.agg_topics_table + + ' (agg_topic_id INTEGER NOT NULL AUTO_INCREMENT, \ + agg_topic_name varchar(512) NOT NULL, \ + agg_type varchar(512) NOT NULL, \ + agg_time_period varchar(512) NOT NULL, \ + PRIMARY KEY (agg_topic_id), \ + UNIQUE(agg_topic_name, agg_type, agg_time_period));') + + self.execute_stmt( + 'CREATE TABLE ' + self.agg_meta_table + + '(agg_topic_id INTEGER NOT NULL, \ + metadata TEXT NOT NULL, \ + PRIMARY KEY(agg_topic_id));') + self.commit() _log.debug("Created aggregate topics and meta tables") def query(self, topic_ids, id_name_map, start=None, end=None, skip=0, @@ -396,18 +406,22 @@ def create_aggregate_store(self, agg_type, agg_time_period): if self.MICROSECOND_SUPPORT is None: self.init_microsecond_support() - stmt = "CREATE TABLE IF NOT EXISTS " + table_name + \ - " (ts timestamp(6) NOT NULL, topic_id INTEGER NOT NULL, " \ - "value_string TEXT NOT NULL, topics_list TEXT," \ - " UNIQUE(topic_id, ts)," \ - "INDEX (ts ASC))" - if not self.MICROSECOND_SUPPORT: - stmt = "CREATE TABLE IF NOT EXISTS " + table_name + \ - " (ts timestamp NOT NULL, topic_id INTEGER NOT NULL, " \ + rows = self.select("show tables like %s", [table_name]) + if rows: + _log.debug("Found table {}. Historian table exists".format(table_name)) + else: + stmt = "CREATE TABLE " + table_name + \ + " (ts timestamp(6) NOT NULL, topic_id INTEGER NOT NULL, " \ "value_string TEXT NOT NULL, topics_list TEXT," \ " UNIQUE(topic_id, ts)," \ "INDEX (ts ASC))" - return self.execute_stmt(stmt, commit=True) + if not self.MICROSECOND_SUPPORT: + stmt = "CREATE TABLE " + table_name + \ + " (ts timestamp NOT NULL, topic_id INTEGER NOT NULL, " \ + "value_string TEXT NOT NULL, topics_list TEXT," \ + " UNIQUE(topic_id, ts)," \ + "INDEX (ts ASC))" + return self.execute_stmt(stmt, commit=True) def insert_aggregate_stmt(self, table_name): return '''REPLACE INTO ''' + table_name + \ diff --git a/volttron/platform/dbutils/postgresqlfuncts.py b/volttron/platform/dbutils/postgresqlfuncts.py index 39683aaa05..39a69c5da6 100644 --- a/volttron/platform/dbutils/postgresqlfuncts.py +++ b/volttron/platform/dbutils/postgresqlfuncts.py @@ -118,7 +118,7 @@ def setup_historian_tables(self): if self.timescale_dialect: _log.debug("trying to create hypertable") self.execute_stmt(SQL( - "SELECT create_hypertable({}, 'ts')").format( + "SELECT create_hypertable({}, 'ts', if_not_exists => true)").format( Literal(self.data_table))) self.execute_stmt(SQL( 'CREATE INDEX ON {} (topic_id, ts)').format( @@ -164,17 +164,17 @@ def read_tablenames_from_db(self, meta_table_name): SQL('SELECT table_id, table_name FROM {}').format( Identifier(meta_table_name)))) prefix = tables.pop('', '') - tables['agg_topics_table'] = 'aggregate_' + tables['topics_table'] - tables['agg_meta_table'] = 'aggregate_' + tables['meta_table'] + tables['agg_topics_table'] = 'aggregate_' + tables.get('topics_table', 'topics') + tables['agg_meta_table'] = 'aggregate_' + tables.get('meta_table', 'meta') if prefix: tables = {key: prefix + '_' + name for key, name in tables.items()} return tables def setup_aggregate_historian_tables(self, meta_table_name): table_names = self.read_tablenames_from_db(meta_table_name) - self.data_table = table_names['data_table'] - self.topics_table = table_names['topics_table'] - self.meta_table = table_names['meta_table'] + self.data_table = table_names.get('data_table', 'data') + self.topics_table = table_names.get('topics_table', 'topics') + self.meta_table = table_names.get('meta_table', 'meta') self.agg_topics_table = table_names['agg_topics_table'] self.agg_meta_table = table_names['agg_meta_table'] self.execute_stmt(SQL( diff --git a/volttron/platform/dbutils/sqlitefuncts.py b/volttron/platform/dbutils/sqlitefuncts.py index fc444bfc58..01134b7c44 100644 --- a/volttron/platform/dbutils/sqlitefuncts.py +++ b/volttron/platform/dbutils/sqlitefuncts.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- {{{ # vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et: # -# Copyright 2019, Battelle Memorial Institute. +# Copyright 2020, Battelle Memorial Institute. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -35,37 +35,39 @@ # BATTELLE for the UNITED STATES DEPARTMENT OF ENERGY # under Contract DE-AC05-76RL01830 # }}} + import ast import errno import logging import sqlite3 import pytz import threading +import os +import re +from .basedb import DbDriver from collections import defaultdict from datetime import datetime from math import ceil -import os -import re -from .basedb import DbDriver from volttron.platform.agent import utils from volttron.platform import jsonapi +from volttron.platform.agent.utils import fix_sqlite3_datetime utils.setup_logging() _log = logging.getLogger(__name__) -from volttron.platform.agent.utils import fix_sqlite3_datetime -#Make sure sqlite3 datetime adapters are updated. +# Make sure sqlite3 datetime adapters are updated. fix_sqlite3_datetime() -""" -Implementation of SQLite3 database operation for -:py:class:`sqlhistorian.historian.SQLHistorian` and -:py:class:`sqlaggregator.aggregator.SQLAggregateHistorian` -For method details please refer to base class -:py:class:`volttron.platform.dbutils.basedb.DbDriver` -""" + class SqlLiteFuncts(DbDriver): + """ + Implementation of SQLite3 database operation for + :py:class:`sqlhistorian.historian.SQLHistorian` and + :py:class:`sqlaggregator.aggregator.SQLAggregateHistorian` + For method details please refer to base class + :py:class:`volttron.platform.dbutils.basedb.DbDriver` + """ def __init__(self, connect_params, table_names): database = connect_params['database'] thread_name = threading.currentThread().getName() @@ -74,7 +76,6 @@ def __init__(self, connect_params, table_names): if database == ':memory:': self.__database = database else: - self.__database = os.path.expandvars(os.path.expanduser(database)) db_dir = os.path.dirname(self.__database) @@ -97,8 +98,7 @@ def __init__(self, connect_params, table_names): connect_params['database'] = self.__database if 'detect_types' not in connect_params: - connect_params['detect_types'] = \ - sqlite3.PARSE_DECLTYPES | sqlite3.PARSE_COLNAMES + connect_params['detect_types'] = sqlite3.PARSE_DECLTYPES | sqlite3.PARSE_COLNAMES if 'timeout' not in connect_params.keys(): connect_params['timeout'] = 10 @@ -124,7 +124,7 @@ def setup_historian_tables(self): if auto_vacuum != 1: _log.info("auto_vacuum set to 0 (None), updating to 1 (full).") - _log.info("VACCUMing DB to cause new auto_vacuum setting to take effect. " + _log.info("VACCUUMing DB to cause new auto_vacuum setting to take effect. " "This could be slow on a large database.") self.select('''PRAGMA auto_vacuum=1''') self.select('''VACUUM;''') @@ -134,7 +134,7 @@ def setup_historian_tables(self): ''' (ts timestamp NOT NULL, topic_id INTEGER NOT NULL, value_string TEXT NOT NULL, - UNIQUE(topic_id, ts))''',commit=False) + UNIQUE(topic_id, ts))''', commit=False) self.execute_stmt( '''CREATE INDEX IF NOT EXISTS data_idx ON ''' + self.data_table + ''' (ts ASC)''', commit=False) @@ -149,10 +149,8 @@ def setup_historian_tables(self): metadata TEXT NOT NULL)''', commit=True) _log.debug("Created data topics and meta tables") - def record_table_definitions(self, table_defs, meta_table_name): - _log.debug( - "In record_table_def {} {}".format(table_defs, meta_table_name)) + _log.debug("In record_table_def {} {}".format(table_defs, meta_table_name)) self.execute_stmt( 'CREATE TABLE IF NOT EXISTS ' + meta_table_name + ' (table_id TEXT PRIMARY KEY, \ @@ -172,7 +170,6 @@ def record_table_definitions(self, table_defs, meta_table_name): ['meta_table', table_defs['meta_table'], table_prefix]) self.commit() - def setup_aggregate_historian_tables(self, meta_table_name): table_names = self.read_tablenames_from_db(meta_table_name) @@ -196,9 +193,7 @@ def setup_aggregate_historian_tables(self, meta_table_name): _log.debug("Created aggregate topics and meta tables") self.commit() - - def query(self, topic_ids, id_name_map, start=None, end=None, - agg_type=None, agg_period=None, skip=0, count=None, + def query(self, topic_ids, id_name_map, start=None, end=None, agg_type=None, agg_period=None, skip=0, count=None, order="FIRST_TO_LAST"): """ This function should return the results of a query in the form: @@ -233,9 +228,8 @@ def query(self, topic_ids, id_name_map, start=None, end=None, where_clauses = ["WHERE topic_id = ?"] args = [topic_ids[0]] - # base historian converts naive timestamps to UTC, but if the - # start and end had explicit timezone info then they need to get - # converted to UTC since sqlite3 only store naive timestamp + # base historian converts naive timestamps to UTC, but if the start and end had explicit timezone info then they + # need to get converted to UTC since sqlite3 only store naive timestamp if start: start = start.astimezone(pytz.UTC) if end: @@ -252,7 +246,6 @@ def query(self, topic_ids, id_name_map, start=None, end=None, where_clauses.append("ts < ?") args.append(end) - where_statement = ' AND '.join(where_clauses) order_by = 'ORDER BY topic_id ASC, ts ASC' @@ -260,8 +253,7 @@ def query(self, topic_ids, id_name_map, start=None, end=None, order_by = ' ORDER BY topic_id DESC, ts DESC' # can't have an offset without a limit - # -1 = no limit and allows the user to - # provide just an offset + # -1 = no limit and allows the user to provide just an offset if count is None: count = -1 @@ -280,7 +272,6 @@ def query(self, topic_ids, id_name_map, start=None, end=None, _log.debug("Real Query: " + real_query) _log.debug("args: " + str(args)) - values = defaultdict(list) start_t = datetime.utcnow() for topic_id in topic_ids: @@ -289,29 +280,27 @@ def query(self, topic_ids, id_name_map, start=None, end=None, cursor = self.select(real_query, args, fetch_all=False) if cursor: for _id, ts, value in cursor: - values[id_name_map[topic_id]].append( - (utils.format_timestamp(ts), jsonapi.loads(value))) + values[id_name_map[topic_id]].append((utils.format_timestamp(ts), jsonapi.loads(value))) cursor.close() - _log.debug("Time taken to load results from db:{}".format( - datetime.utcnow()-start_t)) + _log.debug("Time taken to load results from db:{}".format(datetime.utcnow()-start_t)) return values def manage_db_size(self, history_limit_timestamp, storage_limit_gb): """ Manage database size. - :param history_limit_timestamp: remove all data older than this timestamp :param storage_limit_gb: remove oldest data until database is smaller than this value. """ - _log.debug("Managing store - timestamp limit: {} GB size limit: {}".format(history_limit_timestamp, storage_limit_gb)) + _log.debug("Managing store - timestamp limit: {} GB size limit: {}".format( + history_limit_timestamp, storage_limit_gb)) commit = False if history_limit_timestamp is not None: count = self.execute_stmt( - '''DELETE FROM ''' + self.data_table + \ + '''DELETE FROM ''' + self.data_table + ''' WHERE ts < ?''', (history_limit_timestamp,)) if count is not None and count > 0: @@ -330,10 +319,10 @@ def page_count(): while page_count() >= max_pages: count = self.execute_stmt( - '''DELETE FROM ''' + self.data_table + \ + '''DELETE FROM ''' + self.data_table + ''' WHERE ts IN - (SELECT ts FROM ''' + self.data_table + \ + (SELECT ts FROM ''' + self.data_table + ''' ORDER BY ts ASC LIMIT 100)''') @@ -391,10 +380,8 @@ def get_topic_map(self): def get_agg_topics(self): try: _log.debug("in get_agg_topics") - query = "SELECT agg_topic_name, agg_type, agg_time_period, " \ - "metadata FROM " + self.agg_topics_table + " as t, " + \ - self.agg_meta_table + " as m WHERE t.agg_topic_id = " \ - "m.agg_topic_id " + query = "SELECT agg_topic_name, agg_type, agg_time_period, metadata FROM " + self.agg_topics_table + \ + " as t, " + self.agg_meta_table + " as m WHERE t.agg_topic_id = m.agg_topic_id " rows = self.select(query, None) topics = [] for row in rows: @@ -404,7 +391,7 @@ def get_agg_topics(self): return topics except sqlite3.Error as e: if e.args[0][0:13] == 'no such table': - _log.warn("No such table : {}".format(self.agg_topics_table)) + _log.warning("No such table : {}".format(self.agg_topics_table)) return [] else: raise @@ -412,9 +399,7 @@ def get_agg_topics(self): def get_agg_topic_map(self): try: _log.debug("in get_agg_topic_map") - q = "SELECT agg_topic_id, agg_topic_name, agg_type, " \ - "agg_time_period " \ - "FROM " + self.agg_topics_table + q = "SELECT agg_topic_id, agg_topic_name, agg_type, agg_time_period FROM " + self.agg_topics_table rows = self.select(q, None) _log.debug("loading agg_topic map from db") id_map = dict() @@ -424,7 +409,7 @@ def get_agg_topic_map(self): return id_map except sqlite3.Error as e: if e.args[0][0:13] == 'no such table': - _log.warn("No such table : {}".format(self.agg_topics_table)) + _log.warning("No such table : {}".format(self.agg_topics_table)) return {} else: raise @@ -441,13 +426,10 @@ def regex_select(self, query, args, fetch_all=True, cache_size=None): conn = None cursor = None try: - conn = sqlite3.connect( - self.__database, - detect_types=sqlite3.PARSE_DECLTYPES | sqlite3.PARSE_COLNAMES) + conn = sqlite3.connect(self.__database, detect_types=sqlite3.PARSE_DECLTYPES | sqlite3.PARSE_COLNAMES) if conn is None: - _log.error("Unable to connect to sqlite database {} ".format( - self.__database)) + _log.error("Unable to connect to sqlite database {} ".format(self.__database)) return [] conn.create_function("REGEXP", 2, SqlLiteFuncts.regexp) if cache_size: @@ -466,8 +448,7 @@ def regex_select(self, query, args, fetch_all=True, cache_size=None): else: return cursor, conn except Exception as e: - _log.error("Exception querying database based on regular " - "expression:{}".format(e.args)) + _log.error("Exception querying database based on regular expression:{}".format(e.args)) finally: if fetch_all: if cursor: @@ -478,38 +459,33 @@ def regex_select(self, query, args, fetch_all=True, cache_size=None): def query_topics_by_pattern(self, topic_pattern): id_map, name_map = self.get_topic_map() _log.debug("Contents of topics table {}".format(list(id_map.keys()))) - q = "SELECT topic_id, topic_name FROM " + self.topics_table + \ - " WHERE topic_name REGEXP '" + topic_pattern + "';" + q = "SELECT topic_id, topic_name FROM " + self.topics_table + " WHERE topic_name REGEXP '" + topic_pattern + \ + "';" rows = self.regex_select(q, None) _log.debug("loading topic map from db") id_map = dict() for t, n in rows: id_map[n] = t - _log.debug("topics that matched the pattern {} : {}".format( - topic_pattern, id_map)) + _log.debug("topics that matched the pattern {} : {}".format(topic_pattern, id_map)) return id_map def create_aggregate_store(self, agg_type, period): - table_name = agg_type + '''_''' + period - # period = sqlutils.parse_time_period(period) stmt = "CREATE TABLE IF NOT EXISTS " + table_name + \ " (ts timestamp NOT NULL, topic_id INTEGER NOT NULL, " \ "value_string TEXT NOT NULL, topics TEXT, " \ "UNIQUE(topic_id, ts)); " self.execute_stmt(stmt) - stmt = "CREATE INDEX IF NOT EXISTS idx_" + table_name + " ON " + \ - table_name + "(ts ASC);" + stmt = "CREATE INDEX IF NOT EXISTS idx_" + table_name + " ON " + table_name + "(ts ASC);" self.execute_stmt(stmt, commit=True) return True def insert_aggregate_stmt(self, table_name): - return '''INSERT OR REPLACE INTO ''' + table_name + \ - ''' values(?, ?, ?, ?)''' + return '''INSERT OR REPLACE INTO ''' + table_name + ''' values(?, ?, ?, ?)''' def collect_aggregate(self, topic_ids, agg_type, start=None, end=None): """ @@ -523,11 +499,9 @@ def collect_aggregate(self, topic_ids, agg_type, start=None, end=None): """ if isinstance(agg_type, str): if agg_type.upper() not in ['AVG', 'MIN', 'MAX', 'COUNT', 'SUM']: - raise ValueError( - "Invalid aggregation type {}".format(agg_type)) - query = '''SELECT ''' \ - + agg_type + '''(value_string), count(value_string) FROM ''' \ - + self.data_table + ''' {where}''' + raise ValueError("Invalid aggregation type {}".format(agg_type)) + query = '''SELECT ''' + agg_type + '''(value_string), count(value_string) FROM ''' + \ + self.data_table + ''' {where}''' where_clauses = ["WHERE topic_id = ?"] args = [topic_ids[0]] @@ -540,9 +514,8 @@ def collect_aggregate(self, topic_ids, agg_type, start=None, end=None): where_clauses = [where_str] args = topic_ids[:] - # base historian converts naive timestamps to UTC, but if the - # start and end had explicit timezone info then they need to get - # converted to UTC since sqlite3 only store naive timestamp + # base historian converts naive timestamps to UTC, but if the start and end had explicit timezone info then they + # need to get converted to UTC since sqlite3 only store naive timestamp if start: start = start.astimezone(pytz.UTC) if end: @@ -567,13 +540,11 @@ def collect_aggregate(self, topic_ids, agg_type, start=None, end=None): results = self.select(real_query, args) if results: - _log.debug("results got {}, {}".format(results[0][0], - results[0][1])) + _log.debug("results got {}, {}".format(results[0][0], results[0][1])) return results[0][0], results[0][1] else: return 0, 0 - @staticmethod def get_tagging_query_from_ast(topic_tags_table, tup, tag_refs): """ @@ -614,8 +585,7 @@ def get_tagging_query_from_ast(topic_tags_table, tup, tag_refs): :return: sqlite query :rtype str """ - query = SqlLiteFuncts._get_compound_query(topic_tags_table, tup, - tag_refs) + query = SqlLiteFuncts._get_compound_query(topic_tags_table, tup, tag_refs) # Verify for parent tag finally. if present convert to subquery # Process parent tag # Convert @@ -630,8 +600,7 @@ def get_tagging_query_from_ast(topic_tags_table, tup, tag_refs): # ) parent = "" - search_pattern = r"WHERE\s+tag='(.+)\.(" \ - r".+)'\s+AND\s+value\s+(.+)($|\n)" + search_pattern = r"WHERE\s+tag='(.+)\.(.+)'\s+AND\s+value\s+(.+)($|\n)" results = re.findall(search_pattern, query, flags=re.IGNORECASE) # Example result :: [('campusRef', 'tag1', '= 2', '\n'), # ('siteRef', 'tag2', '= 3 ', '\n')] @@ -648,8 +617,7 @@ def get_tagging_query_from_ast(topic_tags_table, tup, tag_refs): r"AND " \ r"value \3 \4)".format(table=topic_tags_table, parent=parent) - query = re.sub(search_pattern, replace_pattern, query, count=1, - flags=re.I) + query = re.sub(search_pattern, replace_pattern, query, count=1, flags=re.I) _log.debug("Returning sqlite query condition {}".format(query)) return query @@ -690,32 +658,24 @@ def _get_compound_query(topic_tags_table, tup, tag_refs, root=True): :rtype str """ - # Instead of using sqlite LIKE operator we use python regular - # expression and sqlite REGEXP operator - reserved_words = {'and':'INTERSECT', "or":'UNION', 'not':'NOT', - 'like':'REGEXP'} + # Instead of using sqlite LIKE operator we use python regular expression and sqlite REGEXP operator + reserved_words = {'and': 'INTERSECT', "or": 'UNION', 'not': 'NOT', 'like': 'REGEXP'} prefix = 'SELECT topic_prefix FROM {} WHERE '.format(topic_tags_table) - # _log.debug("In get sqlite query condition. tup: {}".format(tup)) if tup is None: return tup if not isinstance(tup[1], tuple): - left = repr(tup[1]) # quote the tag + left = repr(tup[1]) # quote the tag else: - left = SqlLiteFuncts._get_compound_query(topic_tags_table, - tup[1], tag_refs, - False) + left = SqlLiteFuncts._get_compound_query(topic_tags_table, tup[1], tag_refs, False) if not isinstance(tup[2], tuple): if isinstance(tup[2],str): right = repr(tup[2]) - elif isinstance(tup[2],bool): + elif isinstance(tup[2], bool): right = 1 if tup[2] else 0 else: right = tup[2] else: - right = SqlLiteFuncts._get_compound_query(topic_tags_table, - tup[2], - tag_refs, - False) + right = SqlLiteFuncts._get_compound_query(topic_tags_table, tup[2], tag_refs, False) assert isinstance(tup[0], str) @@ -724,17 +684,13 @@ def _get_compound_query(topic_tags_table, tup, tag_refs, root=True): if lower_tup0 in reserved_words: operator = reserved_words[lower_tup0] - query = "" if operator == 'NOT': query = SqlLiteFuncts._negate_condition(right, topic_tags_table) elif operator == 'INTERSECT' or operator == 'UNION': if root: - query = "{left}\n{operator}\n{right}".format(left=left, - operator=operator, - right=right) + query = "{left}\n{operator}\n{right}".format(left=left, operator=operator, right=right) else: - query = 'SELECT topic_prefix FROM ({left} \n{operator}\n{' \ - 'right})'.format( + query = 'SELECT topic_prefix FROM ({left} \n{operator}\n{right})'.format( left=left, operator=operator, right=right) else: query = "{prefix} tag={tag} AND value {operator} {value}".format( @@ -778,21 +734,18 @@ def _negate_condition(condition, table_name): :return: negated select query :rtype str """ - _log.debug("Query condition to negate: {}".format(condition)) # Change and to or and or to and condition = condition.replace('INTERSECT\n', 'UNION_1\n') condition = condition.replace('UNION\n', 'INTERSECT\n') condition = condition.replace('UNION_1\n', 'UNION\n') # Now negate all SELECT... value with - # SELECT topic_prefix FROM topic_tags WHERE topic_prefix NOT IN ( - # SELECT....value) + # SELECT topic_prefix FROM topic_tags WHERE topic_prefix NOT IN (SELECT....value) search_pattern = r'(SELECT\s+topic_prefix\s+FROM\s+' + table_name + \ r'\s+WHERE\s+tag=\'.*\'\s+AND\s+value.*($|\n))' - replace_pattern = r'SELECT topic_prefix FROM ' + table_name + \ - r' WHERE topic_prefix NOT IN (\1)\2' + replace_pattern = r'SELECT topic_prefix FROM ' + table_name + r' WHERE topic_prefix NOT IN (\1)\2' c = re.search(search_pattern, condition) condition = re.sub(search_pattern, replace_pattern, @@ -802,6 +755,7 @@ def _negate_condition(condition, table_name): _log.debug("Condition after negation: {}".format(condition)) return condition + if __name__ == '__main__': con = { "database": '/tmp/tmpgLzWr3/historian.sqlite' @@ -813,12 +767,6 @@ def _negate_condition(condition, table_name): "meta_table": "meta_table" } functs = SqlLiteFuncts(con, tables_def) - functs.collect_aggregate('device1/in_temp', - 'sum', - datetime.strptime( - '2016-06-05 22:47:02.417604+00:00', - "%Y-%m-%d %H:%M:%S.%f+00:00"), - datetime.strptime( - '2016-06-05 22:49:02.417604+00:00', - "%Y-%m-%d %H:%M:%S.%f+00:00") - ) + functs.collect_aggregate('device1/in_temp', 'sum', + datetime.strptime('2016-06-05 22:47:02.417604+00:00', "%Y-%m-%d %H:%M:%S.%f+00:00"), + datetime.strptime('2016-06-05 22:49:02.417604+00:00', "%Y-%m-%d %H:%M:%S.%f+00:00")) diff --git a/volttron/platform/dbutils/sqlutils.py b/volttron/platform/dbutils/sqlutils.py index 3b6c577b25..0c655d1553 100644 --- a/volttron/platform/dbutils/sqlutils.py +++ b/volttron/platform/dbutils/sqlutils.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- {{{ # vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et: # -# Copyright 2019, Battelle Memorial Institute. +# Copyright 2020, Battelle Memorial Institute. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/volttron/platform/install_agents.py b/volttron/platform/install_agents.py new file mode 100644 index 0000000000..eba74ba27d --- /dev/null +++ b/volttron/platform/install_agents.py @@ -0,0 +1,363 @@ +# -*- coding: utf-8 -*- {{{ +# vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et: +# +# Copyright 2020, Battelle Memorial Institute. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# This material was prepared as an account of work sponsored by an agency of +# the United States Government. Neither the United States Government nor the +# United States Department of Energy, nor Battelle, nor any of their +# employees, nor any jurisdiction or organization that has cooperated in the +# development of these materials, makes any warranty, express or +# implied, or assumes any legal liability or responsibility for the accuracy, +# completeness, or usefulness or any information, apparatus, product, +# software, or process disclosed, or represents that its use would not infringe +# privately owned rights. Reference herein to any specific commercial product, +# process, or service by trade name, trademark, manufacturer, or otherwise +# does not necessarily constitute or imply its endorsement, recommendation, or +# favoring by the United States Government or any agency thereof, or +# Battelle Memorial Institute. The views and opinions of authors expressed +# herein do not necessarily state or reflect those of the +# United States Government or any agency thereof. +# +# PACIFIC NORTHWEST NATIONAL LABORATORY operated by +# BATTELLE for the UNITED STATES DEPARTMENT OF ENERGY +# under Contract DE-AC05-76RL01830 +# }}} + +import argparse +import hashlib +import logging +import os +import sys +import tempfile +import traceback +import uuid + +import gevent +import yaml + +from volttron.platform import config, jsonapi, get_volttron_root, get_home +from volttron.platform.agent.utils import execute_command +from volttron.platform.packaging import add_files_to_package, create_package + +_log = logging.getLogger(__name__) + +_stdout = sys.stdout +_stderr = sys.stderr + + +def identity_exists(volttron_control, identity): + env = os.environ.copy() + cmds = [volttron_control, "status"] + + data = execute_command(cmds, env=env, logger=_log, + err_prefix="Error checking identity") + for x in data.split("\n"): + if x: + line_split = x.split() + if identity == line_split[2]: + return line_split[0] + return False + + +def install_requirements(agent_source): + req_file = os.path.join(agent_source, "requirements.txt") + + if os.path.exists(req_file): + _log.info(f"Installing requirements for agent from {req_file}.") + cmds = ["pip", "install", "-r", req_file] + try: + execute_command(cmds, logger=_log, + err_prefix="Error installing requirements") + except RuntimeError: + sys.exit(1) + + +def install_agent_directory(opts): + """ + The main installation method for installing the agent on the correct local + platform instance. + :param opts: + :param package: + :param agent_config: + :return: + """ + if not os.path.isfile(os.path.join(opts.install_path, "setup.py")): + _log.error("Agent source must contain a setup.py file.") + sys.exit(-10) + + install_requirements(opts.install_path) + + wheelhouse = os.path.join(get_home(), "packaged") + opts.package = create_package(opts.install_path, wheelhouse, opts.vip_identity) + + if not os.path.isfile(opts.package): + _log.error("The wheel file for the agent was unable to be created.") + sys.exit(-10) + + agent_exists = False + volttron_control = os.path.join(get_volttron_root(), "env/bin/vctl") + if opts.vip_identity is not None: + # if the identity exists the variable will have the agent uuid in it. + agent_exists = identity_exists(volttron_control, opts.vip_identity) + if agent_exists: + if not opts.force: + _log.error( + "identity already exists, but force wasn't specified.") + sys.exit(-10) + # Note we don't remove the agent here because if we do that will + # not allow us to update without losing the keys. The + # install_agent method either installs or upgrades the agent. + agent_config = opts.agent_config + + if agent_config is None: + agent_config = {} + + # if not a dict then config should be a filename + if not isinstance(agent_config, dict): + config_file = agent_config + else: + cfg = tempfile.NamedTemporaryFile() + with open(cfg.name, 'w') as fout: + fout.write(yaml.safe_dump(agent_config)) + config_file = cfg.name + + try: + with open(config_file) as fp: + data = yaml.safe_load(fp) + except: + _log.error("Invalid yaml/json config file.") + sys.exit(-10) + + # Configure the whl file before installing. + add_files_to_package(opts.package, {'config_file': config_file}) + env = os.environ.copy() + + + if agent_exists: + cmds = [volttron_control, "--json", "upgrade", opts.vip_identity, opts.package] + else: + cmds = [volttron_control, "--json", "install", opts.package] + + if opts.tag: + cmds.extend(["--tag", opts.tag]) + + out = execute_command(cmds, env=env, logger=_log, + err_prefix="Error installing agent") + + parsed = out.split("\n") + + # If there is not an agent with that identity: + # 'Could not find agent with VIP IDENTITY "BOO". Installing as new agent + # Installed /home/volttron/.volttron/packaged/listeneragent-3.2-py2-none-any.whl as 6ccbf8dc-4929-4794-9c8e-3d8c6a121776 listeneragent-3.2' + + # The following is standard output of an agent that was previously installed + # If the agent was not previously installed then only the second line + # would have been output to standard out. + # + # Removing previous version of agent "foo" + # Installed /home/volttron/.volttron/packaged/listeneragent-3.2-py2-none-any.whl as 81b811ff-02b5-482e-af01-63d2fd95195a listeneragent-3.2 + + agent_uuid = None + for l in parsed: + if l.startswith('Installed'): + agent_uuid = l.split(' ')[-2:-1][0] + # if 'Could not' in parsed[0]: + # agent_uuid = parsed[1].split()[-2] + # elif 'Removing' in parsed[0]: + # agent_uuid = parsed[1].split()[-2] + # else: + # agent_uuid = parsed[0].split()[-2] + + output_dict = dict(agent_uuid=agent_uuid) + + if opts.start: + cmds = [volttron_control, "start", agent_uuid] + outputdata = execute_command(cmds, env=env, logger=_log, + err_prefix="Error starting agent") + + # Expected output on standard out + # Starting 83856b74-76dc-4bd9-8480-f62bd508aa9c listeneragent-3.2 + if 'Starting' in outputdata: + output_dict['starting'] = True + + if opts.enable: + cmds = [volttron_control, "enable", agent_uuid] + + if opts.priority != -1: + cmds.extend(["--priority", str(opts.priority)]) + + outputdata = execute_command(cmds, env=env, logger=_log, + err_prefix="Error enabling agent") + # Expected output from standard out + # Enabling 6bcee29b-7af3-4361-a67f-7d3c9e986419 listeneragent-3.2 with priority 50 + if "Enabling" in outputdata: + output_dict['enabling'] = True + output_dict['priority'] = outputdata.split("\n")[0].split()[-1] + + if opts.start: + # Pause for agent_start_time seconds before verifying that the agent + gevent.sleep(opts.agent_start_time) + + cmds = [volttron_control, "status", agent_uuid] + outputdata = execute_command(cmds, env=env, logger=_log, + err_prefix="Error finding agent status") + + # 5 listeneragent-3.2 foo running [10737] + output_dict["started"] = "running" in outputdata + if output_dict["started"]: + pidpos = outputdata.index('[') + 1 + pidend = outputdata.index(']') + output_dict['agent_pid'] = int(outputdata[pidpos: pidend]) + + if opts.json: + sys.stdout.write("%s\n" % jsonapi.dumps(output_dict, indent=4)) + if opts.csv: + keylen = len(output_dict) + keyline = '' + valueline = '' + keys = list(output_dict.keys()) + for k in range(keylen): + if k < keylen - 1: + keyline += "%s," % keys[k] + valueline += "%s," % output_dict[keys[k]] + else: + keyline += "%s" % keys[k] + valueline += "%s" % output_dict[keys[k]] + sys.stdout.write("%s\n%s\n" % (keyline, valueline)) + + +def install_agent(opts, publickey=None, secretkey=None, callback=None): + try: + install_path = opts.install_path + except AttributeError: + install_path = opts.wheel + + if os.path.isdir(install_path): + install_agent_directory(opts) + if opts.connection is not None: + opts.connection.server.core.stop() + sys.exit(0) + filename = install_path + tag = opts.tag + vip_identity = opts.vip_identity + if opts.vip_address.startswith('ipc://'): + _log.info("Installing wheel locally without channel subsystem") + filename = config.expandall(filename) + agent_uuid = opts.connection.call('install_agent_local', + filename, + vip_identity=vip_identity, + publickey=publickey, + secretkey=secretkey) + + if tag: + opts.connection.call('tag_agent', agent_uuid, tag) + + else: + channel = None + try: + _log.debug('Creating channel for sending the agent.') + channel_name = str(uuid.uuid4()) + channel = opts.connection.server.vip.channel('control', + channel_name) + _log.debug('calling control install agent.') + agent_uuid = opts.connection.call_no_get('install_agent', + filename, + channel_name, + vip_identity=vip_identity, + publickey=publickey, + secretkey=secretkey) + + _log.debug('Sending wheel to control') + sha512 = hashlib.sha512() + with open(filename, 'rb') as wheel_file_data: + while True: + # get a request + with gevent.Timeout(60): + request, file_offset, chunk_size = channel.recv_multipart() + if request == b'checksum': + channel.send(sha512.digest()) + break + + assert request == b'fetch' + + # send a chunk of the file + file_offset = int(file_offset) + chunk_size = int(chunk_size) + wheel_file_data.seek(file_offset) + data = wheel_file_data.read(chunk_size) + sha512.update(data) + channel.send(data) + + agent_uuid = agent_uuid.get(timeout=10) + + except Exception as exc: + if opts.debug: + traceback.print_exc() + _stderr.write( + '{}: error: {}: {}\n'.format(opts.command, exc, filename)) + return 10 + else: + if tag: + opts.connection.call('tag_agent', + agent_uuid, + tag) + finally: + _log.debug('closing channel') + if channel: + channel.close(linger=0) + del channel + + name = opts.connection.call('agent_name', agent_uuid) + _stdout.write('Installed {} as {} {}\n'.format(filename, agent_uuid, name)) + + opts.connection.server.core.stop() + + # This is where we need to exit so the script doesn't continue after installation. + sys.exit(0) + + +def add_install_agent_parser(add_parser_fn, has_restricted): + install = add_parser_fn('install', help='install agent from wheel', + epilog='Optionally you may specify the --tag argument to tag the ' + 'agent during install without requiring a separate call to ' + 'the tag command. ') + install.add_argument('install_path', help='path to agent wheel or directory for agent installation') + install.add_argument('--tag', help='tag for the installed agent') + install.add_argument('--vip-identity', help='VIP IDENTITY for the installed agent. ' + 'Overrides any previously configured VIP IDENTITY.') + install.add_argument('--agent-config', help="Agent configuration!") + install.add_argument("-f", "--force", action='store_true', + help="agents are uninstalled by tag so force allows multiple agents to be removed at one go.") + install.add_argument("--priority", default=-1, type=int, + help="priority of startup during instance startup") + install.add_argument("--start", action='store_true', + help="start the agent during the script execution") + install.add_argument("--enable", action='store_true', + help="enable the agent with default 50 priority unless --priority set") + install.add_argument("--csv", action='store_true', + help="format the standard out output to csv") + install.add_argument("--json", action="store_true", + help="format the standard out output to json") + install.add_argument("-st", "--agent-start-time", default=5, type=int, + help="the amount of time to wait and verify that the agent has started up.") + if has_restricted: + install.add_argument('--verify', action='store_true', + dest='verify_agents', + help='verify agent integrity during install') + install.add_argument('--no-verify', action='store_false', + dest='verify_agents', + help=argparse.SUPPRESS) + install.set_defaults(func=install_agent, verify_agents=True) diff --git a/volttron/platform/instance_setup.py b/volttron/platform/instance_setup.py index a4ac03b12e..591bb0c2ed 100644 --- a/volttron/platform/instance_setup.py +++ b/volttron/platform/instance_setup.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- {{{ # vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et: # -# Copyright 2019, Battelle Memorial Institute. +# Copyright 2020, Battelle Memorial Institute. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -45,6 +45,7 @@ from configparser import ConfigParser from shutil import copy from urllib.parse import urlparse +import logging from gevent import subprocess from gevent.subprocess import Popen @@ -54,9 +55,9 @@ from requirements import extras_require from volttron.platform import certs, is_rabbitmq_available from volttron.platform import jsonapi -from volttron.platform.agent.known_identities import MASTER_WEB, PLATFORM_DRIVER, VOLTTRON_CENTRAL +from volttron.platform.agent.known_identities import PLATFORM_WEB, PLATFORM_DRIVER, VOLTTRON_CENTRAL from volttron.platform.agent.utils import get_platform_instance_name, wait_for_volttron_startup, \ - is_volttron_running, wait_for_volttron_shutdown + is_volttron_running, wait_for_volttron_shutdown, setup_logging from volttron.utils import get_hostname from volttron.utils.prompt import prompt_response, y, n, y_or_n from volttron.utils.rmq_config_params import RMQConfig @@ -221,7 +222,7 @@ def _install_agent(agent_dir, config, tag): def _is_agent_installed(tag): - installed_list_process = Popen(['vctl','list'], env=os.environ, stdout=subprocess.PIPE, stderr=subprocess.PIPE) + installed_list_process = Popen(['vctl', 'list'], env=os.environ, stdout=subprocess.PIPE, stderr=subprocess.PIPE) installed_list = installed_list_process.communicate() installed = b"".join(installed_list) if tag.encode('utf-8') in installed: @@ -385,7 +386,7 @@ def _create_web_certs(): return 1 print("Creating new web server certificate.") - crts.create_signed_cert_files(name=MASTER_WEB + "-server", cert_type='server', ca_name=crts.root_ca_name, fqdn=get_hostname()) + crts.create_signed_cert_files(name=PLATFORM_WEB + "-server", cert_type='server', ca_name=crts.root_ca_name, fqdn=get_hostname()) return 0 @@ -423,7 +424,12 @@ def do_message_bus(): # print("Rabbitmq dependencies not installed. Installing now...") # set_dependencies("rabbitmq") # print("Done!") - check_rmq_setup() + try: + check_rmq_setup() + except AssertionError: + print("RabbitMQ setup is incomplete. RabbitMQ server directory is missing.") + print("Please run bootstrap --rabbitmq before running vcfg") + sys.exit() config_opts['message-bus'] = bus_type @@ -479,6 +485,28 @@ def do_vip(): print('\nERROR: That address has already been bound to.') config_opts['vip-address'] = '{}:{}'.format(vip_address, vip_port) + +def do_instance_name(): + """ + Prompts the user for volttron instance-name. + "volttron1" will be used as the default otherwise. + """ + # TODO: Set constraints on what can be used for volttron instance-name. + global config_opts + + instance_name = config_opts.get('instance-name', + 'volttron1') + instance_name = instance_name.strip('"') + + valid_name = False + while not valid_name: + prompt = 'What is the name of this instance?' + new_instance_name = prompt_response(prompt, default=instance_name) + if new_instance_name: + valid_name = True + instance_name = new_instance_name + config_opts['instance-name'] = '"{}"'.format(instance_name) + def do_web_enabled_rmq(vhome): global config_opts @@ -642,23 +670,23 @@ def get_cert_and_key(vhome): # Check for existing files first. If present and are valid ask if we are to use that - master_web_cert = os.path.join(vhome, 'certificates/certs/', MASTER_WEB+"-server.crt") - master_web_key = os.path.join(vhome, 'certificates/private/', MASTER_WEB + "-server.pem") + platform_web_cert = os.path.join(vhome, 'certificates/certs/', PLATFORM_WEB+"-server.crt") + platform_web_key = os.path.join(vhome, 'certificates/private/', PLATFORM_WEB + "-server.pem") cert_error = True - if is_file_readable(master_web_cert, False) and is_file_readable(master_web_key, False): + if is_file_readable(platform_web_cert, False) and is_file_readable(platform_web_key, False): try: - if certs.Certs.validate_key_pair(master_web_cert, master_web_key): + if certs.Certs.validate_key_pair(platform_web_cert, platform_web_key): print('\nThe following certificate and keyfile exists for web access over https: \n{}\n{}'.format( - master_web_cert,master_web_key)) + platform_web_cert,platform_web_key)) prompt = '\nDo you want to use these certificates for the web server?' if prompt_response(prompt, valid_answers=y_or_n, default='Y') in y: - config_opts['web-ssl-cert'] = master_web_cert - config_opts['web-ssl-key'] = master_web_key + config_opts['web-ssl-cert'] = platform_web_cert + config_opts['web-ssl-key'] = platform_web_key cert_error = False else: print('\nPlease provide the path to cert and key files. ' - 'This will overwrite existing files: \n{} and {}'.format(master_web_cert, master_web_key)) + 'This will overwrite existing files: \n{} and {}'.format(platform_web_cert, platform_web_key)) else: print("Existing key pair is not valid.") except RuntimeError as e: @@ -705,12 +733,12 @@ def get_cert_and_key(vhome): else: cert_error = _create_web_certs() if not cert_error: - master_web_cert = os.path.join(vhome, 'certificates/certs/', - MASTER_WEB+"-server.crt") - master_web_key = os.path.join(vhome, 'certificates/private/', - MASTER_WEB + "-server.pem") - config_opts['web-ssl-cert'] = master_web_cert - config_opts['web-ssl-key'] = master_web_key + platform_web_cert = os.path.join(vhome, 'certificates/certs/', + PLATFORM_WEB+"-server.crt") + platform_web_key = os.path.join(vhome, 'certificates/private/', + PLATFORM_WEB + "-server.pem") + config_opts['web-ssl-cert'] = platform_web_cert + config_opts['web-ssl-key'] = platform_web_key def is_file_readable(file_path, log=True): @@ -731,20 +759,6 @@ def do_vcp(): vctl_list = vctl_list_process.communicate() vctl_list_output = ''.join([v.decode('utf-8') for v in vctl_list]) - # Default instance name to the vip address. - instance_name = config_opts.get('instance-name', - 'volttron1') - instance_name = instance_name.strip('"') - - valid_name = False - while not valid_name: - prompt = 'What is the name of this instance?' - new_instance_name = prompt_response(prompt, default=instance_name) - if new_instance_name: - valid_name = True - instance_name = new_instance_name - config_opts['instance-name'] = '"{}"'.format(instance_name) - try: vc_address = config_opts['volttron-central-address'] no_vc_address = False @@ -814,7 +828,7 @@ def do_platform_historian(): def add_fake_device_to_configstore(): - prompt = 'Would you like to install a fake device on the master driver?' + prompt = 'Would you like to install a fake device on the platform driver?' response = prompt_response(prompt, valid_answers=y_or_n, default='N') if response in y: _cmd(['volttron-ctl', 'config', 'store', PLATFORM_DRIVER, @@ -824,9 +838,9 @@ def add_fake_device_to_configstore(): 'examples/configurations/drivers/fake.config']) -@installs(get_services_core("MasterDriverAgent"), 'master_driver', +@installs(get_services_core("PlatformDriverAgent"), 'platform_driver', post_install_func=add_fake_device_to_configstore) -def do_master_driver(): +def do_platform_driver(): return {} @@ -863,6 +877,7 @@ def wizard(): _update_config_file() do_message_bus() do_vip() + do_instance_name() _update_config_file() prompt = 'Is this instance web enabled?' @@ -889,8 +904,6 @@ def wizard(): "After starting VOLTTRON, please go to {} to complete the setup.".format( os.path.join(config_opts['bind-web-address'], "admin", "login.html") )) - # TODO: Commented out so we don't prompt for installing vc or vcp until they - # have been figured out totally for python3 prompt = 'Will this instance be controlled by volttron central?' response = prompt_response(prompt, valid_answers=y_or_n, default='Y') @@ -908,14 +921,14 @@ def wizard(): response = prompt_response(prompt, valid_answers=y_or_n, default='N') if response in y: do_platform_historian() - prompt = 'Would you like to install a master driver?' + prompt = 'Would you like to install a platform driver?' response = prompt_response(prompt, valid_answers=y_or_n, default='N') if response in y: if not _check_dependencies_met("drivers"): print("Driver dependencies not installed. Installing now...") set_dependencies("drivers") print("Done!") - do_master_driver() + do_platform_driver() prompt = 'Would you like to install a listener agent?' response = prompt_response(prompt, valid_answers=y_or_n, default='N') @@ -981,6 +994,12 @@ def main(): args = parser.parse_args() verbose = args.verbose + # Protect against configuration of base logger when not the "main entry point" + if verbose: + setup_logging(logging.DEBUG, True) + else: + setup_logging(logging.INFO, True) + prompt_vhome = True if args.vhome: set_home(args.vhome) diff --git a/volttron/platform/jsonapi.py b/volttron/platform/jsonapi.py index ff6099c235..59e35eaa9f 100644 --- a/volttron/platform/jsonapi.py +++ b/volttron/platform/jsonapi.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- {{{ # vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et: # -# Copyright 2019, Battelle Memorial Institute. +# Copyright 2020, Battelle Memorial Institute. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/volttron/platform/jsonrpc.py b/volttron/platform/jsonrpc.py index 2ca8a4ea57..70d7c995dd 100644 --- a/volttron/platform/jsonrpc.py +++ b/volttron/platform/jsonrpc.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- {{{ # vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et: # -# Copyright 2019, Battelle Memorial Institute. +# Copyright 2020, Battelle Memorial Institute. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/volttron/platform/keystore.py b/volttron/platform/keystore.py index 1dfe7d73bb..4290203933 100644 --- a/volttron/platform/keystore.py +++ b/volttron/platform/keystore.py @@ -4,7 +4,7 @@ # -*- coding: utf-8 -*- {{{ # vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et: # -# Copyright 2019, Battelle Memorial Institute. +# Copyright 2020, Battelle Memorial Institute. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/volttron/platform/lib/inotify/__init__.py b/volttron/platform/lib/inotify/__init__.py index 47d13c4d02..cf69017150 100644 --- a/volttron/platform/lib/inotify/__init__.py +++ b/volttron/platform/lib/inotify/__init__.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- {{{ # vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et: # -# Copyright 2019, Battelle Memorial Institute. +# Copyright 2020, Battelle Memorial Institute. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/volttron/platform/lib/inotify/__main__.py b/volttron/platform/lib/inotify/__main__.py index 9ff00c3da1..3a21617a63 100644 --- a/volttron/platform/lib/inotify/__main__.py +++ b/volttron/platform/lib/inotify/__main__.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- {{{ # vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et: # -# Copyright 2019, Battelle Memorial Institute. +# Copyright 2020, Battelle Memorial Institute. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/volttron/platform/lib/inotify/green.py b/volttron/platform/lib/inotify/green.py index 6c510069d9..4cd2366298 100644 --- a/volttron/platform/lib/inotify/green.py +++ b/volttron/platform/lib/inotify/green.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- {{{ # vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et: # -# Copyright 2019, Battelle Memorial Institute. +# Copyright 2020, Battelle Memorial Institute. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/volttron/platform/lib/prctl.py b/volttron/platform/lib/prctl.py index 04d4219df7..1a1bceabbe 100644 --- a/volttron/platform/lib/prctl.py +++ b/volttron/platform/lib/prctl.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- {{{ # vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et: # -# Copyright 2019, Battelle Memorial Institute. +# Copyright 2020, Battelle Memorial Institute. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/volttron/platform/main.py b/volttron/platform/main.py index d9f0ea887f..777d815286 100644 --- a/volttron/platform/main.py +++ b/volttron/platform/main.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- {{{ # vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et: # -# Copyright 2019, Battelle Memorial Institute. +# Copyright 2020, Battelle Memorial Institute. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -42,6 +42,7 @@ import logging from logging import handlers import logging.config +from typing import Optional from urllib.parse import urlparse import os @@ -55,6 +56,8 @@ import gevent import gevent.monkey +from volttron.platform.vip.healthservice import HealthService +from volttron.platform.vip.servicepeer import ServicePeerNotifier from volttron.utils import get_random_key from volttron.utils.frame_serialization import deserialize_frames, serialize_frames @@ -83,20 +86,20 @@ from .auth import AuthService, AuthFile, AuthEntry from .control import ControlService try: - from .web import MasterWebService + from .web import PlatformWebService HAS_WEB = True except ImportError: HAS_WEB = False from .store import ConfigStoreService from .agent import utils -from .agent.known_identities import MASTER_WEB, CONFIGURATION_STORE, AUTH, CONTROL, CONTROL_CONNECTION +from .agent.known_identities import PLATFORM_WEB, CONFIGURATION_STORE, AUTH, CONTROL, CONTROL_CONNECTION, PLATFORM_HEALTH, \ + KEY_DISCOVERY, PROXY_ROUTER from .vip.agent.subsystems.pubsub import ProtectedPubSubTopics from .keystore import KeyStore, KnownHostsStore from .vip.pubsubservice import PubSubService from .vip.routingservice import RoutingService from .vip.externalrpcservice import ExternalRPCService from .vip.keydiscovery import KeyDiscoveryAgent -from .vip.pubsubwrapper import PubSubWrapper from ..utils.persistance import load_create_store from .vip.rmq_router import RMQRouter from volttron.platform.agent.utils import store_message_bus_config @@ -291,10 +294,11 @@ def __init__(self, local_address, addresses=(), volttron_central_address=None, instance_name=None, bind_web_address=None, volttron_central_serverkey=None, protected_topics={}, external_address_file='', - msgdebug=None, agent_monitor_frequency=600): + msgdebug=None, agent_monitor_frequency=600, + service_notifier=Optional[ServicePeerNotifier]): super(Router, self).__init__( - context=context, default_user_id=default_user_id) + context=context, default_user_id=default_user_id, service_notifier=service_notifier) self.local_address = Address(local_address) self._addr = addresses self.addresses = addresses = [Address(addr) for addr in set(addresses)] @@ -432,6 +436,9 @@ def handle_subsystem(self, frames, user_id): drop = frames[6] self._drop_peer(drop) self._drop_pubsub_peers(drop) + if self._service_notifier: + self._service_notifier.peer_dropped(drop) + _log.debug("ROUTER received agent stop message. dropping peer: {}".format(drop)) except IndexError: _log.error(f"agentstop called but unable to determine agent from frames sent {frames}") @@ -577,7 +584,8 @@ def __init__(self, local_address, addresses=(), volttron_central_address=None, instance_name=None, bind_web_address=None, volttron_central_serverkey=None, protected_topics={}, external_address_file='', - msgdebug=None, volttron_central_rmq_address=None): + msgdebug=None, volttron_central_rmq_address=None, + service_notifier=Optional[ServicePeerNotifier]): self._context_class = _green.Context self._socket_class = _green.Socket self._poller_class = _green.Poller @@ -588,7 +596,7 @@ def __init__(self, local_address, addresses=(), volttron_central_address=volttron_central_address, instance_name=instance_name, bind_web_address=bind_web_address, volttron_central_serverkey=volttron_central_address, protected_topics=protected_topics, external_address_file=external_address_file, - msgdebug=msgdebug) + msgdebug=msgdebug, service_notifier=service_notifier) def start(self): '''Create the socket and call setup(). @@ -678,7 +686,7 @@ def start_volttron_process(opts): # and opts.web_ssl_cert os.environ['MESSAGEBUS'] = opts.message_bus - os.environ['SECURE_AGENT_USER'] = opts.secure_agent_users + os.environ['SECURE_AGENT_USERS'] = opts.secure_agent_users if opts.instance_name is None: if len(opts.vip_address) > 0: opts.instance_name = opts.vip_address[0] @@ -692,6 +700,7 @@ def start_volttron_process(opts): get_platform_instance_name(vhome=opts.volttron_home, prompt=False) if opts.bind_web_address: + os.environ['BIND_WEB_ADDRESS'] = opts.bind_web_address parsed = urlparse(opts.bind_web_address) if parsed.scheme not in ('http', 'https'): raise Exception( @@ -785,7 +794,8 @@ def start_volttron_process(opts): ks_control_conn = KeyStore(KeyStore.get_agent_keystore_path(CONTROL_CONNECTION)) entry = AuthEntry(credentials=encode_key(decode_key(ks_control_conn.public)), user_id=CONTROL_CONNECTION, - capabilities=[{'edit_config_store': {'identity': '/.*/'}}], + capabilities=[{'edit_config_store': {'identity': '/.*/'}}, + "allow_auth_modifications"], comments='Automatically added by platform on start') AuthFile().add(entry, overwrite=True) @@ -809,6 +819,9 @@ def start_volttron_process(opts): "often the platform checks for any crashed agent " "and attempts to restart. {}".format(e)) + # Allows registration agents to callbacks for peers + notifier = ServicePeerNotifier() + # Main loops def zmq_router(stop): try: @@ -823,7 +836,8 @@ def zmq_router(stop): bind_web_address=opts.bind_web_address, protected_topics=protected_topics, external_address_file=external_address_file, - msgdebug=opts.msgdebug).run() + msgdebug=opts.msgdebug, + service_notifier=notifier).run() except Exception: _log.exception('Unhandled exception in router loop') raise @@ -839,7 +853,8 @@ def rmq_router(stop): RMQRouter(opts.vip_address, opts.vip_local_address, opts.instance_name, opts.vip_address, volttron_central_address=opts.volttron_central_address, volttron_central_serverkey=opts.volttron_central_serverkey, - bind_web_address=opts.bind_web_address + bind_web_address=opts.bind_web_address, + service_notifier=notifier ).run() except Exception: _log.exception('Unhandled exception in rmq router loop') @@ -904,15 +919,17 @@ def rmq_router(stop): _log.error("DEBUG: Exiting due to error in rabbitmq config file. Please check.") sys.exit() - try: - start_rabbit(rmq_config.rmq_home) - except AttributeError as exc: - _log.error("Exception while starting RabbitMQ. Check the path in the config file.") - sys.exit() - except subprocess.CalledProcessError as exc: - _log.error("Unable to start rabbitmq server. " - "Check rabbitmq log for errors") - sys.exit() + # If RabbitMQ is started as service, don't start it through the code + if not rmq_config.rabbitmq_as_service: + try: + start_rabbit(rmq_config.rmq_home) + except AttributeError as exc: + _log.error("Exception while starting RabbitMQ. Check the path in the config file.") + sys.exit() + except subprocess.CalledProcessError as exc: + _log.error("Unable to start rabbitmq server. " + "Check rabbitmq log for errors") + sys.exit() # Start the config store before auth so we may one day have auth use it. config_store = ConfigStoreService(address=address, @@ -959,10 +976,11 @@ def rmq_router(stop): bind_web_address=opts.bind_web_address, protected_topics=protected_topics, external_address_file=external_address_file, - msgdebug=opts.msgdebug) + msgdebug=opts.msgdebug, + service_notifier=notifier) proxy_router = ZMQProxyRouter(address=address, - identity='proxy_router', + identity=PROXY_ROUTER, zmq_router=green_router, message_bus=opts.message_bus) event = gevent.event.Event() @@ -1004,22 +1022,18 @@ def rmq_router(stop): agent_monitor_frequency=opts.agent_monitor_frequency), KeyDiscoveryAgent(address=address, serverkey=publickey, - identity='keydiscovery', + identity=KEY_DISCOVERY, external_address_config=external_address_file, setup_mode=opts.setup_mode, bind_web_address=opts.bind_web_address, enable_store=False, - message_bus='zmq'), - # For Backward compatibility with VOLTTRON versions <= 4.1 - PubSubWrapper(address=address, - identity='pubsub', heartbeat_autostart=True, - enable_store=False, - message_bus='zmq') + message_bus='zmq') ] entry = AuthEntry(credentials=services[0].core.publickey, user_id=CONTROL, - capabilities=[{'edit_config_store': {'identity': '/.*/'}}], + capabilities=[{'edit_config_store': {'identity': '/.*/'}}, + "allow_auth_modifications"], comments='Automatically added by platform on start') AuthFile().add(entry, overwrite=True) @@ -1037,18 +1051,18 @@ def rmq_router(stop): if opts.web_ssl_key is None or opts.web_ssl_cert is None or \ (not os.path.isfile(opts.web_ssl_key) and not os.path.isfile(opts.web_ssl_cert)): # This is different than the master.web cert which is used for the agent to connect - # to rmq server. The master.web-server certificate will be used for the master web + # to rmq server. The master.web-server certificate will be used for the platform web # services. - base_webserver_name = MASTER_WEB + "-server" + base_webserver_name = PLATFORM_WEB + "-server" from volttron.platform.certs import Certs certs = Certs() certs.create_signed_cert_files(base_webserver_name, cert_type='server') opts.web_ssl_key = certs.private_key_file(base_webserver_name) opts.web_ssl_cert = certs.cert_file(base_webserver_name) - _log.info("Starting master web service") - services.append(MasterWebService( - serverkey=publickey, identity=MASTER_WEB, + _log.info("Starting platform web service") + services.append(PlatformWebService( + serverkey=publickey, identity=PLATFORM_WEB, address=address, bind_web_address=opts.bind_web_address, volttron_central_address=opts.volttron_central_address, @@ -1060,24 +1074,29 @@ def rmq_router(stop): web_secret_key=opts.web_secret_key )) - ks_masterweb = KeyStore(KeyStore.get_agent_keystore_path(MASTER_WEB)) - entry = AuthEntry(credentials=encode_key(decode_key(ks_masterweb.public)), - user_id=MASTER_WEB, + ks_platformweb = KeyStore(KeyStore.get_agent_keystore_path(PLATFORM_WEB)) + entry = AuthEntry(credentials=encode_key(decode_key(ks_platformweb.public)), + user_id=PLATFORM_WEB, capabilities=['allow_auth_modifications'], comments='Automatically added by platform on start') AuthFile().add(entry, overwrite=True) - # # MASTER_WEB did not work on RMQ. Referred to agent as master + # # PLATFORM_WEB did not work on RMQ. Referred to agent as master # # Added this auth to allow RPC calls for credential authentication # # when using the RMQ messagebus. - # ks_masterweb = KeyStore(KeyStore.get_agent_keystore_path('master')) - # entry = AuthEntry(credentials=encode_key(decode_key(ks_masterweb.public)), + # ks_platformweb = KeyStore(KeyStore.get_agent_keystore_path('master')) + # entry = AuthEntry(credentials=encode_key(decode_key(ks_platformweb.public)), # user_id='master', # capabilities=['allow_auth_modifications'], # comments='Automatically added by platform on start') # AuthFile().add(entry, overwrite=True) - + health_service = HealthService(address=address, + identity=PLATFORM_HEALTH, heartbeat_autostart=True, + enable_store=False, + message_bus=opts.message_bus) + notifier.register_peer_callback(health_service.peer_added, health_service.peer_dropped) + services.append(health_service) events = [gevent.event.Event() for service in services] tasks = [gevent.spawn(service.core.run, event) for service, event in zip(services, events)] @@ -1126,7 +1145,7 @@ def rmq_router(stop): if os.path.exists(pid_file): os.remove(pid_file) except Exception: - _log.warn("Unable to load {}".format(VOLTTRON_INSTANCES)) + _log.warning("Unable to load {}".format(VOLTTRON_INSTANCES)) _log.debug("********************************************************************") _log.debug("VOLTTRON PLATFORM HAS SHUTDOWN") _log.debug("********************************************************************") diff --git a/volttron/platform/messaging/__init__.py b/volttron/platform/messaging/__init__.py index 587f35a9ea..582fd7adbd 100644 --- a/volttron/platform/messaging/__init__.py +++ b/volttron/platform/messaging/__init__.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- {{{ # vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et: # -# Copyright 2019, Battelle Memorial Institute. +# Copyright 2020, Battelle Memorial Institute. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/volttron/platform/messaging/headers.py b/volttron/platform/messaging/headers.py index a5fdd67ebe..ab8548b5f2 100644 --- a/volttron/platform/messaging/headers.py +++ b/volttron/platform/messaging/headers.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- {{{ # vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et: # -# Copyright 2019, Battelle Memorial Institute. +# Copyright 2020, Battelle Memorial Institute. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/volttron/platform/messaging/health.py b/volttron/platform/messaging/health.py index 70df4287c3..17d07e50c5 100644 --- a/volttron/platform/messaging/health.py +++ b/volttron/platform/messaging/health.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- {{{ # vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et: # -# Copyright 2019, Battelle Memorial Institute. +# Copyright 2020, Battelle Memorial Institute. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/volttron/platform/messaging/socket.py b/volttron/platform/messaging/socket.py index f30b8bd334..883a8a9e5f 100644 --- a/volttron/platform/messaging/socket.py +++ b/volttron/platform/messaging/socket.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- {{{ # vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et: # -# Copyright 2019, Battelle Memorial Institute. +# Copyright 2020, Battelle Memorial Institute. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/volttron/platform/messaging/topics.py b/volttron/platform/messaging/topics.py index eedcde0e56..079013ea18 100644 --- a/volttron/platform/messaging/topics.py +++ b/volttron/platform/messaging/topics.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- {{{ # vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et: # -# Copyright 2019, Battelle Memorial Institute. +# Copyright 2020, Battelle Memorial Institute. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/volttron/platform/messaging/utils.py b/volttron/platform/messaging/utils.py index 34fc308901..66bc9a55c0 100644 --- a/volttron/platform/messaging/utils.py +++ b/volttron/platform/messaging/utils.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- {{{ # vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et: # -# Copyright 2019, Battelle Memorial Institute. +# Copyright 2020, Battelle Memorial Institute. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/volttron/platform/packages.py b/volttron/platform/packages.py index fd5e7142f3..0110fac571 100644 --- a/volttron/platform/packages.py +++ b/volttron/platform/packages.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- {{{ # vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et: # -# Copyright 2019, Battelle Memorial Institute. +# Copyright 2020, Battelle Memorial Institute. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -146,7 +146,7 @@ def add_files(self, files_to_add=None, basedir='.'): files_to_add['contract']) msg += ' to execreqs.json' sys.stderr.write(msg) - _log.warn(msg) + _log.warning(msg) self.zipfile.writestr("%s/%s" % (self.distinfo_name, 'execreqs.json'), data) diff --git a/volttron/platform/packaging.py b/volttron/platform/packaging.py index 2ce076b31a..58a97d620a 100644 --- a/volttron/platform/packaging.py +++ b/volttron/platform/packaging.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- {{{ # vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et: # -# Copyright 2019, Battelle Memorial Institute. +# Copyright 2020, Battelle Memorial Institute. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/volttron/platform/resmon.py b/volttron/platform/resmon.py index 08f215f426..4668734edd 100644 --- a/volttron/platform/resmon.py +++ b/volttron/platform/resmon.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- {{{ # vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et: # -# Copyright 2019, Battelle Memorial Institute. +# Copyright 2020, Battelle Memorial Institute. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/volttron/platform/scheduling.py b/volttron/platform/scheduling.py index 694333ece0..625da649e7 100644 --- a/volttron/platform/scheduling.py +++ b/volttron/platform/scheduling.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- {{{ # vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et: # -# Copyright 2019, Battelle Memorial Institute. +# Copyright 2020, Battelle Memorial Institute. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/volttron/platform/store.py b/volttron/platform/store.py index ec5c971d95..0f34a8b22d 100644 --- a/volttron/platform/store.py +++ b/volttron/platform/store.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- {{{ # vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et: # -# Copyright 2019, Battelle Memorial Institute. +# Copyright 2020, Battelle Memorial Institute. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/volttron/platform/storeutils.py b/volttron/platform/storeutils.py index f8a55eb92f..ca4be85255 100644 --- a/volttron/platform/storeutils.py +++ b/volttron/platform/storeutils.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- {{{ # vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et: # -# Copyright 2019, Battelle Memorial Institute. +# Copyright 2020, Battelle Memorial Institute. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -97,4 +97,4 @@ def _follow_links(seen, new_config_name, current_config_name, current_config, ex if _follow_links(seen, new_config_name, child_config_name, child_config, existing_configs): return True - return False \ No newline at end of file + return False diff --git a/volttron/platform/vip/__init__.py b/volttron/platform/vip/__init__.py index 7c7a3feb77..4cf3bf6358 100644 --- a/volttron/platform/vip/__init__.py +++ b/volttron/platform/vip/__init__.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- {{{ # vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et: # -# Copyright 2019, Battelle Memorial Institute. +# Copyright 2020, Battelle Memorial Institute. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/volttron/platform/vip/agent/__init__.py b/volttron/platform/vip/agent/__init__.py index add459c936..e48f3ceb76 100644 --- a/volttron/platform/vip/agent/__init__.py +++ b/volttron/platform/vip/agent/__init__.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- {{{ # vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et: # -# Copyright 2019, Battelle Memorial Institute. +# Copyright 2020, Battelle Memorial Institute. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -78,14 +78,19 @@ def __init__(self, owner, core, heartbeat_autostart, def __init__(self, identity=None, address=None, context=None, publickey=None, secretkey=None, serverkey=None, - heartbeat_autostart=False, heartbeat_period=60, - volttron_home=os.path.abspath(platform.get_home()), + # Since heartbeat is now 100% tied to status on the vctl change the defaults + # to auto start the heartbeat. + heartbeat_autostart=True, heartbeat_period=60, + volttron_home=None, agent_uuid=None, enable_store=True, enable_web=False, enable_channel=False, reconnect_interval=None, version='0.1', enable_fncs=False, instance_name=None, message_bus=None, volttron_central_address=None, volttron_central_instance_name=None): + if volttron_home is None: + volttron_home = os.path.abspath(platform.get_home()) + try: self._version = version diff --git a/volttron/platform/vip/agent/compat.py b/volttron/platform/vip/agent/compat.py index 89a524da01..b62bcf9d1a 100644 --- a/volttron/platform/vip/agent/compat.py +++ b/volttron/platform/vip/agent/compat.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- {{{ # vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et: # -# Copyright 2019, Battelle Memorial Institute. +# Copyright 2020, Battelle Memorial Institute. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/volttron/platform/vip/agent/connection.py b/volttron/platform/vip/agent/connection.py index 978c804e4c..f186aee322 100644 --- a/volttron/platform/vip/agent/connection.py +++ b/volttron/platform/vip/agent/connection.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- {{{ # vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et: # -# Copyright 2019, Battelle Memorial Institute. +# Copyright 2020, Battelle Memorial Institute. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -71,7 +71,7 @@ def __init__(self, address, peer=None, publickey=None, self._peer = peer self._serverkey = None if peer is None: - self._log.warn('Peer is non so must be passed in call method.') + self._log.warning('Peer is non so must be passed in call method.') self.volttron_home = volttron_home if self.volttron_home is None: @@ -170,9 +170,7 @@ def server(self): self._connected_since = get_aware_utc_now() if self.peer: if self.peer not in self._server.vip.peerlist().get(timeout=2): - self._log.warn('peer {} not found connected to router.'.format( - self.peer - )) + self._log.warning('peer {} not found connected to router.'.format(self.peer)) return self._server def peers(self, timeout=DEFAULT_TIMEOUT): diff --git a/volttron/platform/vip/agent/core.py b/volttron/platform/vip/agent/core.py index a8ca446156..a7c92049e7 100644 --- a/volttron/platform/vip/agent/core.py +++ b/volttron/platform/vip/agent/core.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- {{{ # vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et: # -# Copyright 2019, Battelle Memorial Institute. +# Copyright 2020, Battelle Memorial Institute. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/volttron/platform/vip/agent/decorators.py b/volttron/platform/vip/agent/decorators.py index fdc84eea59..ce3788b598 100644 --- a/volttron/platform/vip/agent/decorators.py +++ b/volttron/platform/vip/agent/decorators.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- {{{ # vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et: # -# Copyright 2019, Battelle Memorial Institute. +# Copyright 2020, Battelle Memorial Institute. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/volttron/platform/vip/agent/dispatch.py b/volttron/platform/vip/agent/dispatch.py index 4ecbaa0154..7115711bab 100644 --- a/volttron/platform/vip/agent/dispatch.py +++ b/volttron/platform/vip/agent/dispatch.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- {{{ # vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et: # -# Copyright 2019, Battelle Memorial Institute. +# Copyright 2020, Battelle Memorial Institute. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/volttron/platform/vip/agent/errors.py b/volttron/platform/vip/agent/errors.py index f956401117..4a1d9912e6 100644 --- a/volttron/platform/vip/agent/errors.py +++ b/volttron/platform/vip/agent/errors.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- {{{ # vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et: # -# Copyright 2019, Battelle Memorial Institute. +# Copyright 2020, Battelle Memorial Institute. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/volttron/platform/vip/agent/results.py b/volttron/platform/vip/agent/results.py index f485763f12..f1740a5c1e 100644 --- a/volttron/platform/vip/agent/results.py +++ b/volttron/platform/vip/agent/results.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- {{{ # vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et: # -# Copyright 2019, Battelle Memorial Institute. +# Copyright 2020, Battelle Memorial Institute. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/volttron/platform/vip/agent/subsystems/__init__.py b/volttron/platform/vip/agent/subsystems/__init__.py index ab278edff7..b9a25954c0 100644 --- a/volttron/platform/vip/agent/subsystems/__init__.py +++ b/volttron/platform/vip/agent/subsystems/__init__.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- {{{ # vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et: # -# Copyright 2019, Battelle Memorial Institute. +# Copyright 2020, Battelle Memorial Institute. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/volttron/platform/vip/agent/subsystems/auth.py b/volttron/platform/vip/agent/subsystems/auth.py index 36ece5e97b..aee7e79cda 100644 --- a/volttron/platform/vip/agent/subsystems/auth.py +++ b/volttron/platform/vip/agent/subsystems/auth.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- {{{ # vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et: # -# Copyright 2019, Battelle Memorial Institute. +# Copyright 2020, Battelle Memorial Institute. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/volttron/platform/vip/agent/subsystems/base.py b/volttron/platform/vip/agent/subsystems/base.py index c93c126179..6f65b14315 100644 --- a/volttron/platform/vip/agent/subsystems/base.py +++ b/volttron/platform/vip/agent/subsystems/base.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- {{{ # vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et: # -# Copyright 2019, Battelle Memorial Institute. +# Copyright 2020, Battelle Memorial Institute. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/volttron/platform/vip/agent/subsystems/channel.py b/volttron/platform/vip/agent/subsystems/channel.py index b5b89f7955..cda62c908b 100644 --- a/volttron/platform/vip/agent/subsystems/channel.py +++ b/volttron/platform/vip/agent/subsystems/channel.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- {{{ # vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et: # -# Copyright 2019, Battelle Memorial Institute. +# Copyright 2020, Battelle Memorial Institute. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/volttron/platform/vip/agent/subsystems/configstore.py b/volttron/platform/vip/agent/subsystems/configstore.py index 091a4c6268..4ba08ed72c 100644 --- a/volttron/platform/vip/agent/subsystems/configstore.py +++ b/volttron/platform/vip/agent/subsystems/configstore.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- {{{ # vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et: # -# Copyright 2019, Battelle Memorial Institute. +# Copyright 2020, Battelle Memorial Institute. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/volttron/platform/vip/agent/subsystems/health.py b/volttron/platform/vip/agent/subsystems/health.py index 4e061e8a80..760331f0c4 100644 --- a/volttron/platform/vip/agent/subsystems/health.py +++ b/volttron/platform/vip/agent/subsystems/health.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- {{{ # vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et: # -# Copyright 2019, Battelle Memorial Institute. +# Copyright 2020, Battelle Memorial Institute. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/volttron/platform/vip/agent/subsystems/heartbeat.py b/volttron/platform/vip/agent/subsystems/heartbeat.py index 5851d17607..c8d9688fea 100644 --- a/volttron/platform/vip/agent/subsystems/heartbeat.py +++ b/volttron/platform/vip/agent/subsystems/heartbeat.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- {{{ # vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et: # -# Copyright 2019, Battelle Memorial Institute. +# Copyright 2020, Battelle Memorial Institute. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/volttron/platform/vip/agent/subsystems/hello.py b/volttron/platform/vip/agent/subsystems/hello.py index 0123fbde92..3dd0eaf8a5 100644 --- a/volttron/platform/vip/agent/subsystems/hello.py +++ b/volttron/platform/vip/agent/subsystems/hello.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- {{{ # vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et: # -# Copyright 2019, Battelle Memorial Institute. +# Copyright 2020, Battelle Memorial Institute. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/volttron/platform/vip/agent/subsystems/peerlist.py b/volttron/platform/vip/agent/subsystems/peerlist.py index 6eda38adbb..384569875f 100644 --- a/volttron/platform/vip/agent/subsystems/peerlist.py +++ b/volttron/platform/vip/agent/subsystems/peerlist.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- {{{ # vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et: # -# Copyright 2019, Battelle Memorial Institute. +# Copyright 2020, Battelle Memorial Institute. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/volttron/platform/vip/agent/subsystems/ping.py b/volttron/platform/vip/agent/subsystems/ping.py index a92aca2ada..2b9bbea512 100644 --- a/volttron/platform/vip/agent/subsystems/ping.py +++ b/volttron/platform/vip/agent/subsystems/ping.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- {{{ # vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et: # -# Copyright 2019, Battelle Memorial Institute. +# Copyright 2020, Battelle Memorial Institute. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/volttron/platform/vip/agent/subsystems/pubsub.py b/volttron/platform/vip/agent/subsystems/pubsub.py index d746d1b367..77ce70b05f 100644 --- a/volttron/platform/vip/agent/subsystems/pubsub.py +++ b/volttron/platform/vip/agent/subsystems/pubsub.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- {{{ # vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et: # -# Copyright 2019, Battelle Memorial Institute. +# Copyright 2020, Battelle Memorial Institute. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -92,9 +92,6 @@ def __init__(self, core, rpc_subsys, peerlist_subsys, owner): self.rpc = weakref.ref(rpc_subsys) self.peerlist = weakref.ref(peerlist_subsys) self._owner = owner - self._pubsubwithrpc = PubSubWithRPC(self.core, self.rpc) - self._send_via_rpc = False - self._parameters_needed = True def platform_subscriptions(): return defaultdict(subscriptions) @@ -105,7 +102,6 @@ def subscriptions(): self._my_subscriptions = defaultdict(platform_subscriptions) self.protected_topics = ProtectedPubSubTopics() core.register('pubsub', self._handle_subsystem, self._handle_error) - self.rpc().export(self._peer_push, 'pubsub.push') self.vip_socket = None self._results = ResultsDictionary() self._event_queue = Queue() @@ -185,7 +181,7 @@ def _sync(self, peer, items): items = {(bus, prefix) for bus, topics in items.items() for prefix in topics} remove = [] - for bus, subscriptions in self._peer_subscriptions.items(): + for bus, subscriptions in self._my_subscriptions.items(): for prefix, subscribers in subscriptions.items(): item = bus, prefix try: @@ -197,80 +193,26 @@ def _sync(self, peer, items): else: subscribers.add(peer) for bus, prefix in remove: - subscriptions = self._peer_subscriptions[bus] + subscriptions = self._my_subscriptions[bus] assert not subscriptions.pop(prefix) for bus, prefix in items: self._add_peer_subscription(peer, bus, prefix) - def _peer_sync(self, items): - peer = self.rpc().context.vip_message.peer - assert isinstance(items, dict) - self._sync(peer, items) - def _add_peer_subscription(self, peer, bus, prefix): try: - subscriptions = self._peer_subscriptions[bus] + subscriptions = self._my_subscriptions[bus] except KeyError: - self._peer_subscriptions[bus] = subscriptions = dict() + self._my_subscriptions[bus] = subscriptions = dict() try: subscribers = subscriptions[prefix] except KeyError: subscriptions[prefix] = subscribers = set() subscribers.add(peer) - def _peer_subscribe(self, prefix, bus=''): - peer = self.rpc().context.vip_message.peer - for prefix in prefix if isinstance(prefix, list) else [prefix]: - self._add_peer_subscription(peer, bus, prefix) - - def _peer_unsubscribe(self, prefix, bus=''): - peer = self.rpc().context.vip_message.peer - try: - subscriptions = self._peer_subscriptions[bus] - except KeyError: - return - if prefix is None: - remove = [] - for topic, subscribers in subscriptions.items(): - subscribers.discard(peer) - if not subscribers: - remove.append(topic) - for topic in remove: - del subscriptions[topic] - else: - for prefix in prefix if isinstance(prefix, list) else [prefix]: - subscribers = subscriptions[prefix] - subscribers.discard(peer) - if not subscribers: - del subscriptions[prefix] - - def _peer_list(self, prefix='', bus='', subscribed=True, reverse=False): - peer = self.rpc().context.vip_message.peer - if bus is None: - buses = iter(self._peer_subscriptions.items()) - else: - buses = [(bus, self._peer_subscriptions[bus])] - if reverse: - test = prefix.startswith - else: - test = lambda t: t.startswith(prefix) - results = [] - for bus, subscriptions in buses: - for topic, subscribers in subscriptions.items(): - if test(topic): - member = peer in subscribers - if not subscribed or member: - results.append((bus, topic, member)) - return results - - def _peer_publish(self, topic, headers, message=None, bus=''): - peer = self.rpc().context.vip_message.peer - self._distribute(peer, topic, headers, message, bus) - def _distribute(self, peer, topic, headers, message=None, bus=''): self._check_if_protected_topic(topic) try: - subscriptions = self._peer_subscriptions[bus] + subscriptions = self._my_subscriptions[bus] except KeyError: subscriptions = dict() subscribers = set() @@ -290,13 +232,6 @@ def _distribute(self, peer, topic, headers, message=None, bus=''): socket.send_multipart(frames, copy=False) return len(subscribers) - def _peer_push(self, sender, bus, topic, headers, message): - '''Handle incoming subscription pushes from peers.''' - peer = self.rpc().context.vip_message.peer - handled = 0 - sender = decode_peer(sender) - self._process_callback(sender, bus, topic, headers, message) - def synchronize(self): """Synchronize local subscriptions with the PubSubService. """ @@ -307,16 +242,7 @@ def synchronize(self): for bus, subscriptions in bus_subscriptions.items()} sync_msg = jsonapi.dumpb(dict(subscriptions=subscriptions)) frames = ['synchronize', 'connected', sync_msg] - # For backward compatibility with old pubsub - if self._send_via_rpc: - delay = random.random() - self.core().spawn_later(delay, self.rpc().notify, 'pubsub', 'pubsub.sync', subscriptions) - else: - # Parameters are stored initially, in case remote agent/platform is using old pubsub - if self._parameters_needed: - kwargs = dict(op='synchronize', subscriptions=subscriptions) - self._save_parameters(result.ident, **kwargs) - self.vip_socket.send_vip('', 'pubsub', frames, result.ident, copy=False) + self.vip_socket.send_vip('', 'pubsub', frames, result.ident, copy=False) # 2073 - python3 dictionary keys method returns a dict_keys structure that isn't serializable. # added list(subscriptions.keys()) to make it like python2 list of strings. @@ -329,16 +255,7 @@ def synchronize(self): dict(subscriptions=subscriptions) ) frames = ['synchronize', 'connected', sync_msg] - # For backward compatibility with old pubsub - if self._send_via_rpc: - delay = random.random() - self.core().spawn_later(delay, self.rpc().notify, 'pubsub', 'pubsub.sync', subscriptions) - else: - # Parameters are stored initially, in case remote agent/platform is using old pubsub - if self._parameters_needed: - kwargs = dict(op='synchronize', subscriptions=subscriptions) - self._save_parameters(result.ident, **kwargs) - self.vip_socket.send_vip('', 'pubsub', frames, result.ident, copy=False) + self.vip_socket.send_vip('', 'pubsub', frames, result.ident, copy=False) def list(self, peer, prefix='', bus='', subscribed=True, reverse=False, all_platforms=False): """Gets list of subscriptions matching the prefix and bus for the specified peer. @@ -359,22 +276,13 @@ def list(self, peer, prefix='', bus='', subscribed=True, reverse=False, all_plat :Return Values: List of tuples [(topic, bus, flag to indicate if peer is a subscriber or not)] """ - # For backward compatibility with old pubsub - if self._send_via_rpc: - return self.rpc().call(peer, 'pubsub.list', prefix, - bus, subscribed, reverse) - else: - result = next(self._results) - # Parameters are stored initially, in case remote agent/platform is using old pubsub - if self._parameters_needed: - kwargs = dict(op='list', prefix=prefix, subscribed=subscribed, reverse=reverse, bus=bus) - self._save_parameters(result.ident, **kwargs) - list_msg = jsonapi.dumpb(dict(prefix=prefix, all_platforms=all_platforms, - subscribed=subscribed, reverse=reverse, bus=bus)) - - frames = ['list', list_msg] - self.vip_socket.send_vip('', 'pubsub', frames, result.ident, copy=False) - return result + result = next(self._results) + list_msg = jsonapi.dumpb(dict(prefix=prefix, all_platforms=all_platforms, + subscribed=subscribed, reverse=reverse, bus=bus)) + + frames = ['list', list_msg] + self.vip_socket.send_vip('', 'pubsub', frames, result.ident, copy=False) + return result def _add_subscription(self, prefix, callback, bus='', all_platforms=False): # _log.debug(f"Adding subscription prefix: {prefix} allplatforms: {all_platforms}") @@ -417,24 +325,15 @@ def subscribe(self, peer, prefix, callback, bus='', all_platforms=False, persist :Return Values: Success or Failure """ - # For backward compatibility with old pubsub - if self._send_via_rpc: - self._add_subscription(prefix, callback, bus) - return self.rpc().call(peer, 'pubsub.subscribe', prefix, bus=bus) - else: - result = next(self._results) - # Parameters are stored initially, in case remote agent/platform is using old pubsub - if self._parameters_needed: - kwargs = dict(op='subscribe', prefix=prefix, bus=bus) - self._save_parameters(result.ident, **kwargs) - self._add_subscription(prefix, callback, bus, all_platforms) - sub_msg = jsonapi.dumpb( - dict(prefix=prefix, bus=bus, all_platforms=all_platforms) - ) + result = next(self._results) + self._add_subscription(prefix, callback, bus, all_platforms) + sub_msg = jsonapi.dumpb( + dict(prefix=prefix, bus=bus, all_platforms=all_platforms) + ) - frames = ['subscribe', sub_msg] - self.vip_socket.send_vip('', 'pubsub', frames, result.ident, copy=False) - return result + frames = ['subscribe', sub_msg] + self.vip_socket.send_vip('', 'pubsub', frames, result.ident, copy=False) + return result @subscribe.classmethod def subscribe(cls, peer, prefix, bus='', all_platforms=False, persistent_queue=None): @@ -444,25 +343,6 @@ def decorate(method): return decorate - def _peer_push(self, sender, bus, topic, headers, message): - """ - Added for backward compatibility with old pubsub - param sender: publisher - type sender: str - param bus: bus - type callback: str - param topic: topic for the message - type topic: str - param headers: header for the message - type headers: dict - param message: actual message - type message: dict - """ - peer = self.rpc().context.vip_message.peer - handled = 0 - sender = decode_peer(sender) - self._process_callback(sender, bus, topic, headers, message) - def _drop_subscription(self, prefix, callback, bus='', platform='internal'): """ @@ -514,12 +394,13 @@ def _drop_subscription(self, prefix, callback, bus='', platform='internal'): if not topics: raise KeyError('no such subscription') else: - _log.debug("PUSUB unsubscribe my subscriptions: {0} {1}".format( - prefix, self._my_subscriptions)) + _log.debug(f"BEFORE: {self._my_subscriptions}") if platform in self._my_subscriptions: bus_subscriptions = self._my_subscriptions[platform] if bus in bus_subscriptions: + _log.debug(f"BUS: {bus}") subscriptions = bus_subscriptions[bus] + _log.debug(f"subscriptions: {subscriptions}") if callback is None: try: del subscriptions[prefix] @@ -528,15 +409,18 @@ def _drop_subscription(self, prefix, callback, bus='', platform='internal'): else: try: callbacks = subscriptions[prefix] + _log.debug(f"callbacks: {callbacks}") except KeyError: return [] try: callbacks.remove(callback) - except KeyError: + except KeyError as e: + _log.debug(f"KeyError: {e}") pass if not callbacks: try: del subscriptions[prefix] + _log.debug(f"subscriptions: {subscriptions}") except KeyError: return [] topics = [prefix] @@ -544,6 +428,7 @@ def _drop_subscription(self, prefix, callback, bus='', platform='internal'): del bus_subscriptions[bus] if not bus_subscriptions: del self._my_subscriptions[platform] + _log.debug(f"AFTER: {self._my_subscriptions}") return topics def unsubscribe(self, peer, prefix, callback, bus='', all_platforms=False): @@ -565,32 +450,22 @@ def unsubscribe(self, peer, prefix, callback, bus='', all_platforms=False): :Return Values: success or not """ - # For backward compatibility with old pubsub - if self._send_via_rpc == True: - topics = self._drop_subscription(prefix, callback, bus) - return self.rpc().call(peer, 'pubsub.unsubscribe', topics, bus=bus) + subscriptions = dict() + result = next(self._results) + if not all_platforms: + platform = 'internal' + topics = self._drop_subscription(prefix, callback, bus, platform) + subscriptions[platform] = dict(prefix=topics, bus=bus) else: - subscriptions = dict() - result = next(self._results) - if not all_platforms: - platform = 'internal' - topics = self._drop_subscription(prefix, callback, bus, platform) - subscriptions[platform] = dict(prefix=topics, bus=bus) - else: - platform = 'all' - topics = self._drop_subscription(prefix, callback, bus, platform) - subscriptions[platform] = dict(prefix=topics, bus=bus) - - # Parameters are stored initially, in case remote agent/platform is using old pubsub - if self._parameters_needed: - kwargs = dict(op='unsubscribe', prefix=topics, bus=bus) - self._save_parameters(result.ident, **kwargs) - - unsub_msg = jsonapi.dumpb(subscriptions) - topics = self._drop_subscription(prefix, callback, bus) - frames = ['unsubscribe', unsub_msg] - self.vip_socket.send_vip('', 'pubsub', frames, result.ident, copy=False) - return result + platform = 'all' + topics = self._drop_subscription(prefix, callback, bus, platform) + subscriptions[platform] = dict(prefix=topics, bus=bus) + + unsub_msg = jsonapi.dumpb(subscriptions) + topics = self._drop_subscription(prefix, callback, bus) + frames = ['unsubscribe', unsub_msg] + self.vip_socket.send_vip('', 'pubsub', frames, result.ident, copy=False) + return result def publish(self, peer: str, topic: str, headers=None, message=None, bus=''): """Publish a message to a given topic via a peer. @@ -623,23 +498,10 @@ def publish(self, peer: str, topic: str, headers=None, message=None, bus=''): if peer is None: peer = 'pubsub' - # For backward compatibility with old pubsub - if self._send_via_rpc: - return self.rpc().call( - peer, 'pubsub.publish', topic=topic, headers=headers, - message=message, bus=bus) - else: - result = next(self._results) - # Parameters are stored initially, in case remote agent/platform is using old pubsub - if self._parameters_needed: - kwargs = dict(op='publish', peer=peer, - topic=topic, bus=bus, - headers=headers, message=message) - self._save_parameters(result.ident, **kwargs) - - args = ['publish', topic, dict(bus=bus, headers=headers, message=message)] - self.vip_socket.send_vip('', 'pubsub', args, result.ident, copy=False) - return result + result = next(self._results) + args = ['publish', topic, dict(bus=bus, headers=headers, message=message)] + self.vip_socket.send_vip('', 'pubsub', args, result.ident, copy=False) + return result def _check_if_protected_topic(self, topic): required_caps = self.protected_topics.get(topic) @@ -674,11 +536,6 @@ def _process_incoming_message(self, message): except KeyError: pass - if self._parameters_needed: - self._send_via_rpc = False - self._parameters_needed = False - self._pubsubwithrpc.clear_parameters() - del self._pubsubwithrpc response = message.args[1] import struct if not isinstance(response, int): @@ -734,238 +591,11 @@ def _handle_error(self, sender, message, error, **kwargs): param **kwargs: variable arguments type **kwargs: dict """ - if isinstance(error, UnknownSubsystem): - # Must be connected to OLD pubsub. Try sending using RPC - self._send_via_rpc = True - self._pubsubwithrpc.send(self._results, message) - else: - try: - result = self._results.pop(message.id) - except KeyError: - return - result.set_exception(error) - - def _save_parameters(self, result_id, **kwargs): - """Save the parameters for later use. - param result_id: asyn result id - type result_id: float - param **kwargs: parameters to be stored - type **kwargs: dict - """ - end_time = utils.get_aware_utc_now() + timedelta(seconds=60) - event = self.core().schedule(end_time, self._cancel_event, result_id) - if kwargs is not None: - kwargs['event'] = event - self._pubsubwithrpc.parameters[result_id] = kwargs - - def _cancel_event(self, ident): - """Cancel event - param ident: event id - param ident: float - """ - - # #2074 the self._pubsubwithrpc attribute is delete when we have - # successfully determined that we are not connected to a backward - # compatible with volttron 4.0 - try: - parameters = self._pubsubwithrpc.parameters.pop(id) - event = parameters['event'] - event.cancel() - except KeyError: - return - except AttributeError: - pass - - try: - result = self._results.pop(id) - result.set_exception(gevent.Timeout) - except KeyError: - return - - -class PubSubWithRPC(object): - """For backward compatibility with old PubSub. The input parameters for each pubsub call is stored for short period - till we establish that the agent is connected to platform with old pubsub or not. Once this is established, the - parameters are no longer stored and this class is longer used.""" - - def __init__(self, core, rpc): - self.parameters = dict() - self._rpc = rpc - self._core = core - - def send(self, results, message): - """Check the message id to determine the type of call: subscribe or publish or list or unsubscribe. - Retrieve the corresponding input parameters and make the correct RPC call. - param results: Async results dictionary - type results: Weak dictionary - param message: Error message - type: - """ - id = message.id - - try: - parameters = self.parameters.pop(id) - except KeyError: - _log.error("Missing key {}".format(id)) - return - try: - if parameters['op'] == 'synchronize': - self._core().spawn(self._synchronize, id, results, parameters) - elif parameters['op'] == 'subscribe': - self._core().spawn(self._subscribe, id, results, parameters) - elif parameters['op'] == 'publish': - self._core().spawn(self._publish, id, results, parameters) - elif parameters['op'] == 'list': - self._core().spawn(self._list, id, results, parameters) - elif parameters['op'] == 'unsubscribe': - self._core().spawn(self._unsubscribe, id, results, parameters) - else: - _log.error("Error: Unknown operation {}".format(parameters['op'])) - except KeyError as exc: - _log.error("Error: Missing KEY in message {}".format(exc)) - - def _synchronize(self, results_id, results, parameters): - """Unsubscribe call using RPC - param results_id: Asynchronous result ID required to the set response for the caller - type results_id: float (hash value) - param results: Async results dictionary - type results: Weak dictionary - param parameters: Input parameters for the unsubscribe call - """ - try: - subscriptions = parameters['subscriptions'] - event = parameters['event'] - event.cancel() - except KeyError: - return - self._rpc().notify('pubsub', 'pubsub.sync', subscriptions) - - def _subscribe(self, results_id, results, parameters): - """Subscribe call using RPC - param results_id: Asynchronous result ID required to the set response for the caller - type results_id: float (hash value) - param results: Async results dictionary - type results: Weak dictionary - param parameters: Input parameters for the subscribe call - """ - try: - result = results.pop(results_id) - except KeyError: - result = None - - try: - prefix = parameters['prefix'] - bus = parameters['bus'] - event = parameters['event'] - event.cancel() - except KeyError: - return - try: - response = self._rpc().call('pubsub', 'pubsub.subscribe', prefix, bus=bus).get(timeout=5) - if result is not None: - result.set(response) - except gevent.Timeout as exc: - if result is not None: - result.set_exception(exc) - - def _list(self, results_id, results, parameters): - """List call using RPC - param results_id: Asynchronous result ID required to the set response for the caller - type results_id: float (hash value) - param results: Async results dictionary - type results: Weak dictionary - param parameters: Input parameters for the list call - """ - try: - result = results.pop(results_id) - except KeyError: - result = None - - try: - prefix = parameters['prefix'] - subscribed = parameters['subscribed'] - reverse = parameters['reverse'] - bus = parameters['bus'] - event = parameters['event'] - event.cancel() - except KeyError: - return - try: - response = self._rpc().call('pubsub', 'pubsub.list', prefix, - bus, subscribed, reverse).get(timeout=5) - if result is not None: - result.set(response) - except gevent.Timeout as exc: - if result is not None: - result.set_exception(exc) - - def _publish(self, results_id, results, parameters): - """Publish call using RPC - param results_id: Asynchronous result ID required to the set response for the caller - type results_id: float (hash value) - param results: Async results dictionary - type results: Weak dictionary - param parameters: Input parameters for the publish call - """ - try: - result = results.pop(results_id) - except KeyError: - result = None - try: - topic = parameters['topic'] - headers = parameters['headers'] - message = parameters['message'] - bus = parameters['bus'] - event = parameters['event'] - event.cancel() - except KeyError: - return - try: - response = self._rpc().call( - 'pubsub', 'pubsub.publish', topic=topic, headers=headers, - message=message, bus=bus).get(timeout=5) - if result is not None: - result.set(response) - except gevent.Timeout as exc: - if result is not None: - result.set_exception(exc) - - def _unsubscribe(self, results_id, results, parameters): - """Unsubscribe call using RPC - param results_id: Asynchronous result ID required to the set response for the caller - type results_id: float (hash value) - param results: Async results dictionary - type results: Weak dictionary - param parameters: Input parameters for the unsubscribe call - """ - try: - result = results.pop(results_id) - except KeyError: - result = None - try: - topics = parameters['prefix'] - bus = parameters['bus'] - event = parameters['event'] - event.cancel() - except KeyError: - return - try: - response = self._rpc().call('pubsub', 'pubsub.unsubscribe', topics, bus=bus).get(timeout=5) - if result is not None: - result.set(response) - except gevent.Timeout as exc: - if result is not None: - result.set_exception(exc) - - def clear_parameters(self): - """Clear all the saved parameters. - """ try: - for ident, param in self.parameters.items(): - param['event'].cancel() - self.parameters.clear() + result = self._results.pop(message.id) except KeyError: return + result.set_exception(error) class ProtectedPubSubTopics(object): diff --git a/volttron/platform/vip/agent/subsystems/query.py b/volttron/platform/vip/agent/subsystems/query.py index 2c34f775e4..cd5971a05d 100644 --- a/volttron/platform/vip/agent/subsystems/query.py +++ b/volttron/platform/vip/agent/subsystems/query.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- {{{ # vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et: # -# Copyright 2019, Battelle Memorial Institute. +# Copyright 2020, Battelle Memorial Institute. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/volttron/platform/vip/agent/subsystems/rmq_pubsub.py b/volttron/platform/vip/agent/subsystems/rmq_pubsub.py index 806b50a37b..ef63767d93 100644 --- a/volttron/platform/vip/agent/subsystems/rmq_pubsub.py +++ b/volttron/platform/vip/agent/subsystems/rmq_pubsub.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- {{{ # vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et: # -# Copyright 2019, Battelle Memorial Institute. +# Copyright 2020, Battelle Memorial Institute. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -249,9 +249,7 @@ def _send_proxy(self, prefix, bus=''): """ connection = self.core().connection rkey = self.core().instance_name + '.proxy.router.pubsub' - sub_msg = jsonapi.dumps( - dict(prefix=prefix, bus=bus, all_platforms=True) - ) + sub_msg = dict(prefix=prefix, bus=bus, all_platforms=True) # VIP format - [SENDER, RECIPIENT, PROTO, USER_ID, MSG_ID, SUBSYS, ARGS...] frames = [self.core().identity, '', 'VIP1', '', '', 'pubsub', 'subscribe', sub_msg] connection.channel.basic_publish(exchange=connection.exchange, diff --git a/volttron/platform/vip/agent/subsystems/rpc.py b/volttron/platform/vip/agent/subsystems/rpc.py index 57c6e5d7b7..2c47c51be2 100644 --- a/volttron/platform/vip/agent/subsystems/rpc.py +++ b/volttron/platform/vip/agent/subsystems/rpc.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- {{{ # vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et: # -# Copyright 2019, Battelle Memorial Institute. +# Copyright 2020, Battelle Memorial Institute. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/volttron/platform/vip/agent/subsystems/volttronfncs.py b/volttron/platform/vip/agent/subsystems/volttronfncs.py index 8165b32689..6f6044d1e7 100644 --- a/volttron/platform/vip/agent/subsystems/volttronfncs.py +++ b/volttron/platform/vip/agent/subsystems/volttronfncs.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- {{{ # vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et: # -# Copyright 2019, Battelle Memorial Institute. +# Copyright 2020, Battelle Memorial Institute. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/volttron/platform/vip/agent/subsystems/web.py b/volttron/platform/vip/agent/subsystems/web.py index be1ec46b83..98ec30d764 100644 --- a/volttron/platform/vip/agent/subsystems/web.py +++ b/volttron/platform/vip/agent/subsystems/web.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- {{{ # vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et: # -# Copyright 2019, Battelle Memorial Institute. +# Copyright 2020, Battelle Memorial Institute. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -41,7 +41,7 @@ import weakref from enum import Enum -from volttron.platform.agent.known_identities import MASTER_WEB +from volttron.platform.agent.known_identities import PLATFORM_WEB from volttron.platform.vip.agent.subsystems.base import SubsystemBase __docformat__ = 'reStructuredText' @@ -59,7 +59,7 @@ class ResourceType(Enum): class WebSubSystem(SubsystemBase): """ The web subsystem handles the agent side of routing web data from the - :class:`volttron.platform.web.MasterWebService`. + :class:`volttron.platform.web.PlatformWebService`. """ @@ -77,23 +77,23 @@ def onsetup(sender, **kwargs): rpc.export(self._route_callback, 'route.callback') def onstop(sender, **kwargs): - rpc.call(MASTER_WEB, 'unregister_all_agent_routes') + rpc.call(PLATFORM_WEB, 'unregister_all_agent_routes') core.onstop.connect(onstop, self) core.onsetup.connect(onsetup, self) def get_user_claims(self, bearer): - return self._rpc().call(MASTER_WEB, 'get_user_claims', bearer).get(timeout=10) + return self._rpc().call(PLATFORM_WEB, 'get_user_claims', bearer).get(timeout=10) def unregister_all_routes(self): - self._rpc().call(MASTER_WEB, 'unregister_all_agent_routes').get(timeout=10) + self._rpc().call(PLATFORM_WEB, 'unregister_all_agent_routes').get(timeout=10) def register_endpoint(self, endpoint, callback, res_type: ResourceType = ResourceType.JSONRPC): """ The :meth:`register_endpoint` method registers an endpoint with the :param res_type: - :class:`volttron.platform.web.MasterWebService` on the VOLTTRON + :class:`volttron.platform.web.PlatformWebService` on the VOLTTRON instance. Each endpoint can map to at most one callback function. The callback @@ -117,7 +117,7 @@ def callback(self, env, data): self._endpoints[endpoint] = callback if isinstance(res_type, ResourceType): res_type = res_type.value - self._rpc().call(MASTER_WEB, 'register_endpoint', endpoint, res_type).get(timeout=10) + self._rpc().call(PLATFORM_WEB, 'register_endpoint', endpoint, res_type).get(timeout=10) def register_path(self, prefix, static_path): """ @@ -129,14 +129,14 @@ def register_path(self, prefix, static_path): :param prefix: :param static_path: An existing path available to the - :class:`volttron.platform.web.MasterWebService` + :class:`volttron.platform.web.PlatformWebService` :type prefix: str :type static_path: str """ _log.info('Registering path prefix: {}, path: {}'.format( prefix, static_path )) - self._rpc().call(MASTER_WEB, 'register_path_route', prefix, + self._rpc().call(PLATFORM_WEB, 'register_path_route', prefix, static_path).get(timeout=10) def register_websocket(self, endpoint, opened=None, closed=None, @@ -144,7 +144,7 @@ def register_websocket(self, endpoint, opened=None, closed=None, """ The :meth:`register_websocket` method registers a websocket endpoint that can be connected to through the - :class:`volttron.platform.web.MasterWebService`. + :class:`volttron.platform.web.PlatformWebService`. The parameters opened and closed can be specified as callback events with the following signature: @@ -157,7 +157,7 @@ def ws_opened(self, endpoint): def ws_closed(self, endpoint): print('ws_closed endpoint {}'.format(endpoint)) - The received event is triggered when the websocket is writtent to fro + The received event is triggered when the websocket is written to from the client. The received event must have a signature such as the following interface: @@ -184,11 +184,11 @@ def ws_received(self, endpoint, message): """ _log.info("Agent registering websocket at: {}".format(endpoint)) self._ws_endpoint[endpoint] = (opened, closed, received) - self._rpc().call(MASTER_WEB, 'register_websocket', endpoint).get( + self._rpc().call(PLATFORM_WEB, 'register_websocket', endpoint).get( timeout=5) def unregister_websocket(self, endpoint): - self._rpc().call(MASTER_WEB, 'unregister_websocket', endpoint).get( + self._rpc().call(PLATFORM_WEB, 'unregister_websocket', endpoint).get( timeout=5) def send(self, endpoint, message=''): @@ -205,7 +205,7 @@ def send(self, endpoint, message=''): :type endpoint: str :type message: str """ - self._rpc().call(MASTER_WEB, 'websocket_send', endpoint, message).get( + self._rpc().call(PLATFORM_WEB, 'websocket_send', endpoint, message).get( timeout=5) def _route_callback(self, env, data): diff --git a/volttron/platform/vip/agent/utils.py b/volttron/platform/vip/agent/utils.py index 46c8ed2700..62a156beb4 100644 --- a/volttron/platform/vip/agent/utils.py +++ b/volttron/platform/vip/agent/utils.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- {{{ # vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et: # -# Copyright 2019, Battelle Memorial Institute. +# Copyright 2020, Battelle Memorial Institute. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -71,17 +71,17 @@ def get_server_keys(): return ks.public, ks.secret -def build_connection(identity, peer='', address=get_address(), +def build_connection(identity, peer='', address=None, publickey=None, secretkey=None, message_bus=None, **kwargs): + address = address if address is not None else get_address() if publickey is None or secretkey is None: publickey, secretkey = get_server_keys(publickey, secretkey) - cn = Connection(address=address, identity=identity, peer=peer, publickey=publickey, secretkey=secretkey, message_bus=message_bus, **kwargs) return cn -def build_agent(address=get_address(), identity=None, publickey=None, +def build_agent(address=None, identity=None, publickey=None, secretkey=None, timeout=10, serverkey=None, agent_class=Agent, volttron_central_address=None, volttron_central_instance_name=None, **kwargs) -> Agent: @@ -101,8 +101,8 @@ def build_agent(address=get_address(), identity=None, publickey=None, :return: an agent based upon agent_class that has been started :rtype: agent_class """ - # if not serverkey: - # serverkey = get_known_host_serverkey(address) + + address = address if address is not None else get_address() # This is a fix allows the connect to message bus to be different than # the one that is currently running. diff --git a/volttron/platform/vip/externalrpcservice.py b/volttron/platform/vip/externalrpcservice.py index 8aa3da9a15..fb432de219 100644 --- a/volttron/platform/vip/externalrpcservice.py +++ b/volttron/platform/vip/externalrpcservice.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- {{{ # vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et: # -# Copyright 2019, Battelle Memorial Institute. +# Copyright 2020, Battelle Memorial Institute. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/volttron/platform/vip/green.py b/volttron/platform/vip/green.py index 97f783c65d..cb7b4f6e15 100644 --- a/volttron/platform/vip/green.py +++ b/volttron/platform/vip/green.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- {{{ # vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et: # -# Copyright 2019, Battelle Memorial Institute. +# Copyright 2020, Battelle Memorial Institute. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/volttron/platform/vip/healthservice.py b/volttron/platform/vip/healthservice.py new file mode 100644 index 0000000000..115cadc1dc --- /dev/null +++ b/volttron/platform/vip/healthservice.py @@ -0,0 +1,139 @@ +# -*- coding: utf-8 -*- {{{ +# vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et: +# +# Copyright 2020, Battelle Memorial Institute. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# This material was prepared as an account of work sponsored by an agency of +# the United States Government. Neither the United States Government nor the +# United States Department of Energy, nor Battelle, nor any of their +# employees, nor any jurisdiction or organization that has cooperated in the +# development of these materials, makes any warranty, express or +# implied, or assumes any legal liability or responsibility for the accuracy, +# completeness, or usefulness or any information, apparatus, product, +# software, or process disclosed, or represents that its use would not infringe +# privately owned rights. Reference herein to any specific commercial product, +# process, or service by trade name, trademark, manufacturer, or otherwise +# does not necessarily constitute or imply its endorsement, recommendation, or +# favoring by the United States Government or any agency thereof, or +# Battelle Memorial Institute. The views and opinions of authors expressed +# herein do not necessarily state or reflect those of the +# United States Government or any agency thereof. +# +# PACIFIC NORTHWEST NATIONAL LABORATORY operated by +# BATTELLE for the UNITED STATES DEPARTMENT OF ENERGY +# under Contract DE-AC05-76RL01830 +# }}} + +from collections import defaultdict +from datetime import datetime +import logging + +from volttron.platform.agent.known_identities import CONTROL_CONNECTION, PROCESS_IDENTITIES +from volttron.platform.agent.utils import format_timestamp +from volttron.platform.vip.agent import Agent, Core, RPC +from volttron.platform.agent import utils +from datetime import timedelta +from volttron.utils.rmq_config_params import RMQConfig +from volttron.utils.rmq_setup import start_rabbit, RabbitMQStartError + +_log = logging.getLogger(__name__) + + +class HealthService(Agent): + + def __init__(self, **kwargs): + super(HealthService, self).__init__(**kwargs) + + # Store the health stats for given peers in a dictionary with + # keys being the identity of the connected agent. + self._health_dict = defaultdict(dict) + + def peer_added(self, peer): + """ + The `peer_added` method should be called whenever an agent is connected to the + platform. + + :param peer: The identity of the agent connected to the platform + """ + health = self._health_dict[peer] + + health['peer'] = peer + health['service_agent'] = peer in PROCESS_IDENTITIES + health['connected'] = format_timestamp(datetime.now()) + + def peer_dropped(self, peer): + # TODO: Should there be an option for a db/log file for agents coming and going from the platform? + self._health_dict[peer]['disconnected'] = format_timestamp(datetime.now()) + del self._health_dict[peer] + + @RPC.export + def get_platform_health(self): + """ + The `get_platform_health` retrieves all of the connected agent's health structures, + except for the `CONTROL_CONNECTION` (vctl's known identity). Vctl's identity is used for short + term connections and is not relevant to the core health system. + + This function returns a dictionary in the form identity: values such as the following: + + .. code-block :: json + + { + "listeneragent-3.3_35": + { + "peer": "listeneragent-3.3_35", + "service_agent": False, + "connected": "2020-10-28T12:46:58.701119", + "last_heartbeat": "2020-10-28T12:47:03.709605", + "message": "GOOD" + } + } + + :return: + """ + # Ignore the connection from control in the health as it will only be around for a short while. + agents = {k: v for k, v in self._health_dict.items() + if not v.get('peer') == CONTROL_CONNECTION} + return agents + + def _heartbeat_updates(self, peer, sender, bus, topic, headers, message): + """ + This method is called whenever a publish goes on the message bus from the + heartbeat* topic. + + :param peer: + :param sender: + :param bus: + :param topic: + :param headers: + :param message: + :return: + """ + health = self._health_dict[sender] + time_now = format_timestamp(datetime.now()) + if not health: + health['connected'] = time_now + health['peer'] = sender + health['service_agent'] = sender in PROCESS_IDENTITIES + + health['last_heartbeat'] = time_now + health['message'] = message + + @Core.receiver('onstart') + def onstart(self, sender, **kwargs): + # Start subscribing to heartbeat topic to get updates from the health subsystem. + self.vip.pubsub.subscribe('pubsub', 'heartbeat', callback=self._heartbeat_updates) + + + diff --git a/volttron/platform/vip/keydiscovery.py b/volttron/platform/vip/keydiscovery.py index 4cff75688f..3ad5d3862f 100644 --- a/volttron/platform/vip/keydiscovery.py +++ b/volttron/platform/vip/keydiscovery.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- {{{ # vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et: # -# Copyright 2019, Battelle Memorial Institute. +# Copyright 2020, Battelle Memorial Institute. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/volttron/platform/vip/proxy_zmq_router.py b/volttron/platform/vip/proxy_zmq_router.py index b1b3de2e8c..3b8a052753 100644 --- a/volttron/platform/vip/proxy_zmq_router.py +++ b/volttron/platform/vip/proxy_zmq_router.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- {{{ # vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et: # -# Copyright 2019, Battelle Memorial Institute. +# Copyright 2020, Battelle Memorial Institute. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -179,6 +179,7 @@ def outbound_response_handler(self, ch, method, props, body): # Reformat message into ZMQ VIP format frames = [to_identity, from_identity, 'VIP1', userid, props.message_id, props.type] + try: args = jsonapi.loads(body) try: @@ -197,7 +198,7 @@ def outbound_response_handler(self, ch, method, props, body): return _log.debug("Proxy ZMQ Router Outbound handler {0}, {1}".format(to_identity, args)) - + frames = serialize_frames(frames) try: self.zmq_router.socket.send_multipart(frames, copy=True) except ZMQError as ex: @@ -212,7 +213,7 @@ def rpc_message_handler(self, ch, method, props, body): :param body: :return: """ - zmq_frames = [] + frames = serialize_frames(jsonapi.loads(body)) try: diff --git a/volttron/platform/vip/pubsubservice.py b/volttron/platform/vip/pubsubservice.py index b60b6424b0..732ebb5faa 100644 --- a/volttron/platform/vip/pubsubservice.py +++ b/volttron/platform/vip/pubsubservice.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- {{{ # vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et: # -# Copyright 2019, Battelle Memorial Institute. +# Copyright 2020, Battelle Memorial Institute. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -827,7 +827,6 @@ def _handle_error(self, frames): # happens only in case of errors in multi platform use case _log.warning(f"In _handle_error of pubsub subsystem. Frames: {frames}") - def publish_callback(self, peer, sender, bus, topic, headers, message): """ Callback method to receive PubSub messages from internal RabbitMQ message bus and send it diff --git a/volttron/platform/vip/pubsubwrapper.py b/volttron/platform/vip/pubsubwrapper.py deleted file mode 100644 index 9fdd2e91fb..0000000000 --- a/volttron/platform/vip/pubsubwrapper.py +++ /dev/null @@ -1,192 +0,0 @@ -# -*- coding: utf-8 -*- {{{ -# vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et: -# -# Copyright 2019, Battelle Memorial Institute. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# This material was prepared as an account of work sponsored by an agency of -# the United States Government. Neither the United States Government nor the -# United States Department of Energy, nor Battelle, nor any of their -# employees, nor any jurisdiction or organization that has cooperated in the -# development of these materials, makes any warranty, express or -# implied, or assumes any legal liability or responsibility for the accuracy, -# completeness, or usefulness or any information, apparatus, product, -# software, or process disclosed, or represents that its use would not infringe -# privately owned rights. Reference herein to any specific commercial product, -# process, or service by trade name, trademark, manufacturer, or otherwise -# does not necessarily constitute or imply its endorsement, recommendation, or -# favoring by the United States Government or any agency thereof, or -# Battelle Memorial Institute. The views and opinions of authors expressed -# herein do not necessarily state or reflect those of the -# United States Government or any agency thereof. -# -# PACIFIC NORTHWEST NATIONAL LABORATORY operated by -# BATTELLE for the UNITED STATES DEPARTMENT OF ENERGY -# under Contract DE-AC05-76RL01830 -# }}} - - -import errno -import logging - -import gevent -from zmq import green as zmq - -from base64 import b64encode, b64decode -from zmq import SNDMORE -from volttron.platform.vip.agent import Agent, Core, RPC -from volttron.platform.vip.agent.errors import VIPError -from volttron.platform import jsonrpc, jsonapi -from collections import defaultdict - - -_log = logging.getLogger(__name__) - -def encode_peer(peer): - if peer.startswith('\x00'): - return peer[:1] + b64encode(peer[1:]) - return peer - -def decode_peer(peer): - if peer.startswith('\x00'): - return peer[:1] + b64decode(peer[1:]) - return peer - -class PubSubWrapper(Agent): - """PubSubWrapper Agent acts as a wrapper agent for PubSub subsystem when connected to remote platform that which is using - old pubsub (RPC based implementation). - When it receives PubSub requests from remote platform, - - calls the appropriate method of new platform. - - returns the result back""" - def __init__(self, identity, **kwargs): - super(PubSubWrapper, self).__init__(identity, **kwargs) - - def subscriptions(): - return defaultdict(set) - - self._peer_subscriptions = defaultdict(subscriptions) - - @Core.receiver('onsetup') - def onsetup(self, sender, **kwargs): - # pylint: disable=unused-argument - self.vip.rpc.export(self._peer_sync, 'pubsub.sync') - self.vip.rpc.export(self._peer_publish, 'pubsub.publish') - self.vip.rpc.export(self._peer_subscribe, 'pubsub.subscribe') - self.vip.rpc.export(self._peer_unsubscribe, 'pubsub.unsubscribe') - self.vip.rpc.export(self._peer_list, 'pubsub.list') - - def _sync(self, peer, items): - items = {(bus, prefix) for bus, topics in items.items() - for prefix in topics} - remove = [] - for bus, subscriptions in self._peer_subscriptions.items(): - for prefix, subscribers in subscriptions.items(): - item = bus, prefix - try: - items.remove(item) - except KeyError: - subscribers.discard(peer) - if not subscribers: - remove.append(item) - else: - subscribers.add(peer) - for bus, prefix in remove: - subscriptions = self._peer_subscriptions[bus] - assert not subscriptions.pop(prefix) - for bus, prefix in items: - self._add_peer_subscription(peer, bus, prefix) - self.vip.pubsub.subscribe(peer, prefix, self._collector, bus=bus) - - def _peer_sync(self, items): - peer = bytes(self.vip.rpc.context.vip_message.peer).decode("utf-8") - assert isinstance(items, dict) - self._sync(peer, items) - - def _peer_publish(self, topic, headers, message=None, bus=''): - peer = bytes(self.vip.rpc.context.vip_message.peer).decode("utf-8") - self.vip.pubsub.publish(peer, topic, headers, message=message, bus=bus) - - def add_bus(self, name): - self._peer_subscriptions.setdefault(name, {}) - - def _add_peer_subscription(self, peer, bus, prefix): - self._peer_subscriptions[bus][prefix].add(peer) - - def _peer_subscribe(self, prefix, bus=''): - peer = bytes(self.vip.rpc.context.vip_message.peer).decode("utf-8") - for prefix in prefix if isinstance(prefix, list) else [prefix]: - self._add_peer_subscription(peer, bus, prefix) - self.vip.pubsub.subscribe(peer, prefix, self._collector, bus=bus) - - def _distribute(self, peer, topic, headers, message=None, bus=''): - #self._check_if_protected_topic(topic) - subscriptions = self._peer_subscriptions[bus] - subscribers = set() - for prefix, subscription in subscriptions.items(): - if subscription and topic.startswith(prefix): - subscribers |= subscription - if subscribers: - sender = encode_peer(peer) - json_msg = jsonapi.dumps(jsonrpc.json_method( - None, 'pubsub.push', - [sender, bus, topic, headers, message], None)) - frames = [zmq.Frame(b''), zmq.Frame(b''), - zmq.Frame(b'RPC'), zmq.Frame(json_msg)] - socket = self.core.socket - for subscriber in subscribers: - socket.send(subscriber, flags=SNDMORE) - socket.send_multipart(frames, copy=False) - return len(subscribers) - - def _collector(self, peer, sender, bus, topic, headers, message): - self._distribute(peer, topic, headers, message, bus) - - def _peer_list(self, prefix='', bus='', subscribed=True, reverse=False): - peer = bytes(self.vip.rpc.context.vip_message.peer).decode("utf-8") - if bus is None: - buses = iter(self._peer_subscriptions.items()) - else: - buses = [(bus, self._peer_subscriptions[bus])] - if reverse: - test = prefix.startswith - else: - test = lambda t: t.startswith(prefix) - results = [] - for bus, subscriptions in buses: - for topic, subscribers in subscriptions.items(): - if test(topic): - member = peer in subscribers - if not subscribed or member: - results.append((bus, topic, member)) - return results - - def _peer_unsubscribe(self, prefix, bus=''): - peer = bytes(self.vip.rpc.context.vip_message.peer).decode("utf-8") - subscriptions = self._peer_subscriptions[bus] - if prefix is None: - remove = [] - for topic, subscribers in subscriptions.items(): - subscribers.discard(peer) - if not subscribers: - remove.append(topic) - for topic in remove: - del subscriptions[topic] - self.vip.pubsub.unsubscribe(peer, prefix, self._collector, bus=bus) - else: - for prefix in prefix if isinstance(prefix, list) else [prefix]: - subscribers = subscriptions[prefix] - subscribers.discard(peer) - if not subscribers: - del subscriptions[prefix] - self.vip.pubsub.unsubscribe(peer, prefix, self._collector, bus=bus) diff --git a/volttron/platform/vip/rmq_connection.py b/volttron/platform/vip/rmq_connection.py index bb1965eb19..3724802a74 100644 --- a/volttron/platform/vip/rmq_connection.py +++ b/volttron/platform/vip/rmq_connection.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- {{{ # vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et: # -# Copyright 2019, Battelle Memorial Institute. +# Copyright 2020, Battelle Memorial Institute. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -270,7 +270,7 @@ def connect(self, connection_callback=None, connection_error_callback=None): def register(self, vip_handler, error_handler=None): """ Register VIP handler to be invoked to handle incoming messages - :param handler: VIP handler callback method + :param vip_handler: VIP handler callback method :return: """ self._vip_handler = vip_handler diff --git a/volttron/platform/vip/rmq_router.py b/volttron/platform/vip/rmq_router.py index f062235abb..9d8a52a41d 100644 --- a/volttron/platform/vip/rmq_router.py +++ b/volttron/platform/vip/rmq_router.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- {{{ # vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et: # -# Copyright 2019, Battelle Memorial Institute. +# Copyright 2020, Battelle Memorial Institute. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -43,12 +43,14 @@ import logging import os from queue import Queue +from typing import Optional from volttron.platform import is_rabbitmq_available from volttron.platform import jsonapi from volttron.utils.rmq_mgmt import RabbitMQMgmt from .rmq_connection import RMQRouterConnection from .router import BaseRouter +from .servicepeer import ServicePeerNotifier from .socket import Message, Address from ..keystore import KeyStore from ..main import __version__ @@ -61,7 +63,7 @@ _log = logging.getLogger(__name__) -class RMQRouter(BaseRouter): +class RMQRouter(object): """ Concrete VIP Router for RabbitMQ message bus. It handles router specific messages and unrouteable messages. @@ -71,7 +73,8 @@ def __init__(self, address, local_address, instance_name, addresses=(), identity='router', default_user_id=None, volttron_central_address=None, volttron_central_serverkey=None, - bind_web_address=None + bind_web_address=None, + service_notifier=Optional[ServicePeerNotifier] ): """ Initialize the object instance. @@ -92,6 +95,7 @@ def __init__(self, address, local_address, instance_name, self._identity = identity self.rmq_mgmt = RabbitMQMgmt() self.event_queue = Queue() + self._service_notifier = service_notifier param = self._build_connection_parameters() self.connection = RMQRouterConnection(param, identity, @@ -167,10 +171,12 @@ def _add_peer(self, peer, message_bus='rmq'): self._distribute('peerlist', 'add', peer, message_bus) self._peers.add(peer) self._peers_with_messagebus[peer] = message_bus + self._service_notifier.peer_added(peer) def _drop_peer(self, peer, message_bus='rmq'): try: self._peers.remove(peer) + self._service_notifier.peer_dropped(peer) del self._peers_with_messagebus[peer] except KeyError: return @@ -205,7 +211,7 @@ def handle_system(self, message): # send welcome message back message.args = ['welcome', '1.0', self._identity, sender] elif subsystem == 'ping': - message.args = ['pong'] + message.args[0] = 'pong' elif subsystem == 'peerlist': try: op = message.args[0] @@ -316,9 +322,6 @@ def _distribute(self, *parts): _log.debug(f"Distributing to peers {peer}") if self._peers_with_messagebus[peer] == 'rmq': self.connection.send_vip_object(message) - else: - _log.debug(f"???????????????????Looks like we should be destributing message {message}") - #self.connection.send_vip_object_via_proxy(message) def _make_user_access_tokens(self, identity): tokens = dict() diff --git a/volttron/platform/vip/router.py b/volttron/platform/vip/router.py index e96501bfec..2c03f5f5fe 100644 --- a/volttron/platform/vip/router.py +++ b/volttron/platform/vip/router.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- {{{ # vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et: # -# Copyright 2019, Battelle Memorial Institute. +# Copyright 2020, Battelle Memorial Institute. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -41,9 +41,12 @@ import os import logging +from typing import Optional + import zmq from zmq import Frame, NOBLOCK, ZMQError, EINVAL, EHOSTUNREACH +from volttron.platform.vip.servicepeer import ServicePeerNotifier from volttron.utils.frame_serialization import serialize_frames __all__ = ['BaseRouter', 'OUTGOING', 'INCOMING', 'UNROUTABLE', 'ERROR'] @@ -66,6 +69,7 @@ zmq.Frame(os.strerror(zmq.EPROTONOSUPPORT).encode('ascii')) ) + class BaseRouter(object): '''Abstract base class of VIP router implementation. @@ -85,7 +89,7 @@ class BaseRouter(object): _socket_class = zmq.Socket _poller_class = zmq.Poller - def __init__(self, context=None, default_user_id=None): + def __init__(self, context=None, default_user_id=None, service_notifier=Optional[ServicePeerNotifier]): '''Initialize the object instance. If context is None (the default), the zmq global context will be @@ -98,6 +102,7 @@ def __init__(self, context=None, default_user_id=None): self._poller = self._poller_class() self._ext_sockets = [] self._socket_id_mapping = {} + self._service_notifier = service_notifier def run(self): '''Main router loop.''' @@ -213,6 +218,8 @@ def _distribute(self, *parts): drop.update(self._send(frames)) for peer in drop: self._drop_peer(peer) + if self._service_notifier: + self._service_notifier.peer_dropped(peer) def _drop_pubsub_peers(self, peer): '''Drop peers for pubsub subsystem. To be handled by subclasses''' @@ -228,6 +235,8 @@ def _add_peer(self, peer): self._distribute('peerlist', 'add', peer) self._peers.add(peer) self._add_pubsub_peers(peer) + if self._service_notifier: + self._service_notifier.peer_added(peer) def _drop_peer(self, peer): try: diff --git a/volttron/platform/vip/routingservice.py b/volttron/platform/vip/routingservice.py index 86bf1e476c..54e99de18e 100644 --- a/volttron/platform/vip/routingservice.py +++ b/volttron/platform/vip/routingservice.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- {{{ # vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et: # -# Copyright 2019, Battelle Memorial Institute. +# Copyright 2020, Battelle Memorial Institute. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/volttron/platform/vip/servicepeer.py b/volttron/platform/vip/servicepeer.py new file mode 100644 index 0000000000..74310aefdf --- /dev/null +++ b/volttron/platform/vip/servicepeer.py @@ -0,0 +1,91 @@ +# -*- coding: utf-8 -*- {{{ +# vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et: +# +# Copyright 2020, Battelle Memorial Institute. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# This material was prepared as an account of work sponsored by an agency of +# the United States Government. Neither the United States Government nor the +# United States Department of Energy, nor Battelle, nor any of their +# employees, nor any jurisdiction or organization that has cooperated in the +# development of these materials, makes any warranty, express or +# implied, or assumes any legal liability or responsibility for the accuracy, +# completeness, or usefulness or any information, apparatus, product, +# software, or process disclosed, or represents that its use would not infringe +# privately owned rights. Reference herein to any specific commercial product, +# process, or service by trade name, trademark, manufacturer, or otherwise +# does not necessarily constitute or imply its endorsement, recommendation, or +# favoring by the United States Government or any agency thereof, or +# Battelle Memorial Institute. The views and opinions of authors expressed +# herein do not necessarily state or reflect those of the +# United States Government or any agency thereof. +# +# PACIFIC NORTHWEST NATIONAL LABORATORY operated by +# BATTELLE for the UNITED STATES DEPARTMENT OF ENERGY +# under Contract DE-AC05-76RL01830 +# }}} + +import logging + +_log = logging.getLogger(__name__) + + +class ServicePeerNotifier(object): + """ + This class is responsible for routing the base_router's connections and disconnections + from the zmq thread through to the registered callback functions. + """ + def __init__(self): + self._registered_added = set() + self._registered_dropped = set() + + def register_peer_callback(self, added_callback, dropped_callback): + """ + Register functions for adding callbacks for connected and disconnected peers + to the message bus. + + The signature of the callback should be: + + .. code-block :: python + + def added_callback(peer): + # the peer is a string identity connected. + pass + + :param added_callback: + :param dropped_callback: + """ + assert added_callback is not None + assert dropped_callback is not None + + self._registered_added.add(added_callback) + self._registered_dropped.add(dropped_callback) + + def peer_added(self, peer): + """ + Handles calling registered methods + :param peer: + :return: + """ + for fn in self._registered_added: + fn(peer) + + def peer_dropped(self, peer): + """ + Handles calling of registered methods when a peer drops a connection to the platform. + :param peer: + :return: + """ + for fn in self._registered_dropped: + fn(peer) diff --git a/volttron/platform/vip/socket.py b/volttron/platform/vip/socket.py index bc6d2cb2ed..56ba6a8671 100644 --- a/volttron/platform/vip/socket.py +++ b/volttron/platform/vip/socket.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- {{{ # vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et: # -# Copyright 2019, Battelle Memorial Institute. +# Copyright 2020, Battelle Memorial Institute. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/volttron/platform/vip/tracking.py b/volttron/platform/vip/tracking.py index b1d20ba96c..688d57b4ce 100644 --- a/volttron/platform/vip/tracking.py +++ b/volttron/platform/vip/tracking.py @@ -4,7 +4,7 @@ # -*- coding: utf-8 -*- {{{ # vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et: # -# Copyright 2019, Battelle Memorial Institute. +# Copyright 2020, Battelle Memorial Institute. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/volttron/platform/vip/zmq_connection.py b/volttron/platform/vip/zmq_connection.py index b2c8e16502..09e5b04af9 100644 --- a/volttron/platform/vip/zmq_connection.py +++ b/volttron/platform/vip/zmq_connection.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- {{{ # vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et: # -# Copyright 2019, Battelle Memorial Institute. +# Copyright 2020, Battelle Memorial Institute. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -24,7 +24,7 @@ # completeness, or usefulness or any information, apparatus, product, # software, or process disclosed, or represents that its use would not infringe # privately owned rights. Reference herein to any specific commercial product, -# process, or service by trade name, trademark, manufacturer, or otherwise +# process, or service by trade name, trademark, manufactufrer, or otherwise # does not necessarily constitute or imply its endorsement, recommendation, or # favoring by the United States Government or any agency thereof, or # Battelle Memorial Institute. The views and opinions of authors expressed diff --git a/volttron/platform/web/__init__.py b/volttron/platform/web/__init__.py index f8023fed68..099f606ef7 100644 --- a/volttron/platform/web/__init__.py +++ b/volttron/platform/web/__init__.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- {{{ # vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et: # -# Copyright 2019, Battelle Memorial Institute. +# Copyright 2020, Battelle Memorial Institute. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -44,12 +44,12 @@ try: import jwt except ImportError: - logging.getLogger().warning("Missing library jwt within web package.") + pass from . discovery import DiscoveryInfo, DiscoveryError # Used outside so we make it available through this file. -from . master_web_service import MasterWebService +from . platform_web_service import PlatformWebService _log = logging.getLogger(__name__) diff --git a/volttron/platform/web/admin_endpoints.py b/volttron/platform/web/admin_endpoints.py index 154cfe2e86..7dc7884a9e 100644 --- a/volttron/platform/web/admin_endpoints.py +++ b/volttron/platform/web/admin_endpoints.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- {{{ # vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et: # -# Copyright 2019, Battelle Memorial Institute. +# Copyright 2020, Battelle Memorial Institute. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -41,17 +41,17 @@ import re from urllib.parse import parse_qs -from volttron.platform.agent.known_identities import MASTER_WEB, AUTH +from volttron.platform.agent.known_identities import PLATFORM_WEB, AUTH try: from jinja2 import Environment, FileSystemLoader, select_autoescape, TemplateNotFound except ImportError: - logging.getLogger().warning("Missing jinja2 libaray in admin_endpoints.py") + logging.getLogger().warning("Missing jinja2 library in admin_endpoints.py") try: from passlib.hash import argon2 except ImportError: - logging.getLogger().warning("Missing passlib libaray in admin_endpoints.py") + logging.getLogger().warning("Missing passlib library in admin_endpoints.py") from watchdog_gevent import Observer from volttron.platform.agent.web import Response @@ -76,10 +76,6 @@ def __init__(self, rmq_mgmt=None, ssl_public_key: bytes = None, rpc_caller=None) self._rpc_caller = rpc_caller self._rmq_mgmt = rmq_mgmt - self._certs = None - - if rmq_mgmt is not None: - self._certs = Certs() self._pending_auths = None self._denied_auths = None @@ -111,8 +107,6 @@ def __init__(self, rmq_mgmt=None, ssl_public_key: bytes = None, rpc_caller=None) get_home() ) self._observer.start() - if ssl_public_key is not None: - self._certs = Certs() def reload_userdict(self): webuserpath = os.path.join(get_home(), 'web-users.json') @@ -120,7 +114,7 @@ def reload_userdict(self): def get_routes(self): """ - Returns a list of tuples with the routes for the adminstration endpoints + Returns a list of tuples with the routes for the administration endpoints available in it. :return: @@ -139,7 +133,7 @@ def admin(self, env, data): pass2 = decoded.get('password2') if pass1 == pass2 and pass1 is not None: - _log.debug("Setting master password") + _log.debug("Setting administrator password") self.add_user(username, pass1, groups=['admin']) return Response('', status='302', headers={'Location': '/admin/login.html'}) @@ -148,6 +142,7 @@ def admin(self, env, data): if 'login.html' in env.get('PATH_INFO') or '/admin/' == env.get('PATH_INFO'): template = template_env(env).get_template('login.html') + _log.debug("Login.html: {}".format(env.get('PATH_INFO'))) return Response(template.render(), content_type='text/html') return self.verify_and_dispatch(env, data) @@ -161,7 +156,7 @@ def verify_and_dispatch(self, env, data): """ from volttron.platform.web import get_bearer, NotAuthorized try: - claims = self._rpc_caller(MASTER_WEB, 'get_user_claims', get_bearer(env)).get() + claims = self._rpc_caller(PLATFORM_WEB, 'get_user_claims', get_bearer(env)).get() except NotAuthorized: _log.error("Unauthorized user attempted to connect to {}".format(env.get('PATH_INFO'))) return Response('

Unauthorized User

', status="401 Unauthorized") @@ -181,14 +176,18 @@ def verify_and_dispatch(self, env, data): except TemplateNotFound: return Response("

404 Not Found

", status="404 Not Found") - # if page == 'list_certs.html': - # html = template.render(certs=self._certs.get_all_cert_subjects()) if page == 'pending_auth_reqs.html': - self._pending_auths = self._rpc_caller.call(AUTH, 'get_authorization_failures').get() - self._denied_auths = self._rpc_caller.call(AUTH, 'get_authorization_denied').get() - self._approved_auths = self._rpc_caller.call(AUTH, 'get_authorization_approved').get() - if self._certs: - html = template.render(csrs=self._certs.get_pending_csr_requests(), + try: + self._pending_auths = self._rpc_caller.call(AUTH, 'get_authorization_pending').get(timeout=2) + self._denied_auths = self._rpc_caller.call(AUTH, 'get_authorization_denied').get(timeout=2) + self._approved_auths = self._rpc_caller.call(AUTH, 'get_authorization_approved').get(timeout=2) + except TimeoutError: + self._pending_auths = [] + self._denied_auths = [] + self._approved_auths = [] + # When messagebus is rmq, include pending csrs in the output pending_auth_reqs.html page + if self._rmq_mgmt is not None: + html = template.render(csrs=self._rpc_caller.call(AUTH, 'get_pending_csrs').get(timeout=4), auths=self._pending_auths, denied_auths=self._denied_auths, approved_auths=self._approved_auths) @@ -232,78 +231,101 @@ def __api_endpoint(self, endpoint, data): def __approve_csr_api(self, common_name): try: _log.debug("Creating cert and permissions for user: {}".format(common_name)) - self._certs.approve_csr(common_name) - permissions = self._rmq_mgmt.get_default_permissions(common_name) - self._rmq_mgmt.create_user_with_permissions(common_name, - permissions, - True) - data = dict(status=self._certs.get_csr_status(common_name), - cert=self._certs.get_cert_from_csr(common_name)) - data['cert'] = data['cert'].decode('utf-8') + self._rpc_caller.call(AUTH, 'approve_authorization_failure', common_name).wait(timeout=4) + data = dict(status=self._rpc_caller.call(AUTH, "get_pending_csr_status", common_name).get(timeout=2), + cert=self._rpc_caller.call(AUTH, "get_pending_csr_cert", common_name).get(timeout=2)) except ValueError as e: data = dict(status="ERROR", message=e.message) + except TimeoutError as e: + data = dict(status="ERROR", message=e.message) + return Response(jsonapi.dumps(data), content_type="application/json") def __deny_csr_api(self, common_name): try: - self._certs.deny_csr(common_name) + self._rpc_caller.call(AUTH, 'deny_authorization_failure', common_name).wait(timeout=2) data = dict(status="DENIED", message="The administrator has denied the request") except ValueError as e: data = dict(status="ERROR", message=e.message) + except TimeoutError as e: + data = dict(status="ERROR", message=e.message) + return Response(jsonapi.dumps(data), content_type="application/json") def __delete_csr_api(self, common_name): try: - self._certs.delete_csr(common_name) + self._rpc_caller.call(AUTH, 'delete_authorization_failure', common_name).wait(timeout=2) data = dict(status="DELETED", message="The administrator has denied the request") except ValueError as e: data = dict(status="ERROR", message=e.message) + except TimeoutError as e: + data = dict(status="ERROR", message=e.message) + return Response(jsonapi.dumps(data), content_type="application/json") def __pending_csrs_api(self): - csrs = [c for c in self._certs.get_pending_csr_requests()] - return Response(jsonapi.dumps(csrs), content_type="application/json") + try: + data = self._rpc_caller.call(AUTH, 'get_pending_csrs').get(timeout=4) + + except TimeoutError as e: + data = dict(status="ERROR", message=e.message) + + return Response(jsonapi.dumps(data), content_type="application/json") def __cert_list_api(self): - subjects = [dict(common_name=x.common_name) - for x in self._certs.get_all_cert_subjects()] - return Response(jsonapi.dumps(subjects), content_type="application/json") + try: + data = [dict(common_name=x.common_name) for x in + self._rpc_caller.call(AUTH, "get_all_pending_csr_subjects").get(timeout=2)] + + except TimeoutError as e: + data = dict(status="ERROR", message=e.message) + + return Response(jsonapi.dumps(data), content_type="application/json") def __approve_credential_api(self, user_id): try: _log.debug("Creating credential and permissions for user: {}".format(user_id)) - self._rpc_caller.call(AUTH, 'approve_authorization_failure', user_id).wait() + self._rpc_caller.call(AUTH, 'approve_authorization_failure', user_id).wait(timeout=4) data = dict(status='APPROVED', message="The administrator has approved the request") except ValueError as e: data = dict(status="ERROR", message=e.message) + except TimeoutError as e: + data = dict(status="ERROR", message=e.message) + return Response(jsonapi.dumps(data), content_type="application/json") def __deny_credential_api(self, user_id): try: - self._rpc_caller.call(AUTH, 'deny_authorization_failure', user_id) + self._rpc_caller.call(AUTH, 'deny_authorization_failure', user_id).wait(timeout=2) data = dict(status="DENIED", message="The administrator has denied the request") except ValueError as e: data = dict(status="ERROR", message=e.message) + except TimeoutError as e: + data = dict(status="ERROR", message=e.message) + return Response(jsonapi.dumps(data), content_type="application/json") def __delete_credential_api(self, user_id): try: - self._rpc_caller.call(AUTH, 'delete_authorization_failure', user_id) + self._rpc_caller.call(AUTH, 'delete_authorization_failure', user_id).wait(timeout=2) data = dict(status="DELETED", message="The administrator has denied the request") except ValueError as e: data = dict(status="ERROR", message=e.message) + except TimeoutError as e: + data = dict(status="ERROR", message=e.message) + return Response(jsonapi.dumps(data), content_type="application/json") def add_user(self, username, unencrypted_pw, groups=None, overwrite=False): diff --git a/volttron/platform/web/authenticate_endpoint.py b/volttron/platform/web/authenticate_endpoint.py index 2d17f231f1..0af6ff425f 100644 --- a/volttron/platform/web/authenticate_endpoint.py +++ b/volttron/platform/web/authenticate_endpoint.py @@ -88,7 +88,7 @@ def get_auth_token(self, env, data): _log.warning("Authentication must use POST request.") return Response('401 Unauthorized', status='401 Unauthorized', content_type='text/html') - assert len(self._userdict) > 0, "No users in user dictionary, set the master password first!" + assert len(self._userdict) > 0, "No users in user dictionary, set the administrator password first!" if not isinstance(data, dict): _log.debug("data is not a dict, decoding") diff --git a/volttron/platform/web/csr_endpoints.py b/volttron/platform/web/csr_endpoints.py index 9504939454..c0e5b7deb4 100644 --- a/volttron/platform/web/csr_endpoints.py +++ b/volttron/platform/web/csr_endpoints.py @@ -115,7 +115,7 @@ def _csr_request_new(self, env, data): response = None try: - if json_response['cert']: + if json_response.get('cert', None): json_response['cert'] = json_response['cert'].decode('utf-8') response = Response(jsonapi.dumps(json_response), content_type='application/json', diff --git a/volttron/platform/web/master_web_service.py b/volttron/platform/web/platform_web_service.py similarity index 98% rename from volttron/platform/web/master_web_service.py rename to volttron/platform/web/platform_web_service.py index ed7528687d..7bb613ee23 100644 --- a/volttron/platform/web/master_web_service.py +++ b/volttron/platform/web/platform_web_service.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- {{{ # vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et: # -# Copyright 2019, Battelle Memorial Institute. +# Copyright 2020, Battelle Memorial Institute. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -106,7 +106,7 @@ class DuplicateEndpointError(Exception): ) -class MasterWebService(Agent): +class PlatformWebService(Agent): """The service that is responsible for managing and serving registered pages Agents can register either a directory of files to serve or an rpc method @@ -120,7 +120,7 @@ def __init__(self, serverkey, identity, address, bind_web_address, Initialize the configuration of the base web service integration within the platform. """ - super(MasterWebService, self).__init__(identity, address, **kwargs) + super(PlatformWebService, self).__init__(identity, address, **kwargs) # no matter what we need to have a bind_web_address passed to us. if not bind_web_address: @@ -204,6 +204,10 @@ def websocket_send(self, endpoint, message): message)) self.appContainer.websocket_send(endpoint, message) + @RPC.export + def print_websocket_clients(self): + _log.debug(self.appContainer.endpoint_clients) + @RPC.export def get_bind_web_address(self): return self.bind_web_address @@ -460,6 +464,7 @@ def app_routing(self, env, start_response): # if ws4pi.socket is set then this connection is a web socket # and so we return the websocket response. + if 'ws4py.socket' in env: return env['ws4py.socket'](env, start_response) @@ -517,7 +522,7 @@ def is_json_content(self, env): def process_response(self, start_response, response): # if we are using the original response, then morph it into a werkzueg response. - # response = MasterWebService.convert_response_to_werkzueg(response) + # response = PlatformWebService.convert_response_to_werkzueg(response) # return response() # process the response start_response(response.status, response.headers) @@ -746,7 +751,7 @@ def startupagent(self, sender, **kwargs): get_fq_identity(self.core.identity)), rpc_caller=rpc_caller) if ssl_key is None or ssl_cert is None: - # Because the master.web service certificate is a client to rabbitmq we + # Because the platform.web service certificate is a client to rabbitmq we # can't use it directly therefore we use the -server on the file to specify # the server based file. base_filename = get_fq_identity(self.core.identity) + "-server" diff --git a/volttron/platform/web/webapp.py b/volttron/platform/web/webapp.py index 6a504b3558..29de9d4c86 100644 --- a/volttron/platform/web/webapp.py +++ b/volttron/platform/web/webapp.py @@ -9,11 +9,11 @@ class WebApplicationWrapper(object): """ A container class that will hold all of the applications registered - with it. The class provides a contianer for managing the routing of + with it. The class provides a container for managing the routing of websocket, static content, and rpc function calls. """ - def __init__(self, masterweb, host, port): - self.masterweb = masterweb + def __init__(self, platformweb, host, port): + self.platformweb = platformweb self.port = port self.host = host self.ws = WebSocketWSGIApplication(handler_cls=VolttronWebSocket) @@ -35,7 +35,7 @@ def __call__(self, environ, start_response): environ['identity'] = self._wsregistry[environ['PATH_INFO']] return self.ws(environ, start_response) - return self.masterweb.app_routing(environ, start_response) + return self.platformweb.app_routing(environ, start_response) def favicon(self, environ, start_response): """ @@ -49,7 +49,7 @@ def favicon(self, environ, start_response): def client_opened(self, client, endpoint, identity): ip = client.environ['REMOTE_ADDR'] - should_open = self.masterweb.vip.rpc.call(identity, 'client.opened', + should_open = self.platformweb.vip.rpc.call(identity, 'client.opened', ip, endpoint) if not should_open: _log.error("Authentication failure, closing websocket.") @@ -71,11 +71,11 @@ def client_opened(self, client, endpoint, identity): def client_received(self, endpoint, message): clients = self.endpoint_clients.get(endpoint, []) for identity, _ in clients: - self.masterweb.vip.rpc.call(identity, 'client.message', + self.platformweb.vip.rpc.call(identity, 'client.message', str(endpoint), str(message)) def client_closed(self, client, endpoint, identity, - reason="Client left without proper explaination"): + reason="Client left without proper explanation"): client_set = self.endpoint_clients.get(endpoint, set()) @@ -85,7 +85,7 @@ def client_closed(self, client, endpoint, identity, except KeyError: pass else: - self.masterweb.vip.rpc.call(identity, 'client.closed', endpoint) + self.platformweb.vip.rpc.call(identity, 'client.closed', endpoint) def create_ws_endpoint(self, endpoint, identity): if endpoint not in self.endpoint_clients: @@ -105,8 +105,7 @@ def websocket_send(self, endpoint, message): _log.debug('Sending message to clients!') clients = self.endpoint_clients.get(endpoint, []) if not clients: - _log.warn("There were no clients for endpoint {}".format( - endpoint)) + _log.warning("There were no clients for endpoint {}".format(endpoint)) for c in clients: identity, client = c _log.debug('Sending endpoint&&message {}&&{}'.format( diff --git a/volttron/utils/__init__.py b/volttron/utils/__init__.py index cfb69b4abf..3e0bfb5700 100644 --- a/volttron/utils/__init__.py +++ b/volttron/utils/__init__.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- {{{ # vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et: # -# Copyright 2019, Battelle Memorial Institute. +# Copyright 2020, Battelle Memorial Institute. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -35,7 +35,7 @@ # BATTELLE for the UNITED STATES DEPARTMENT OF ENERGY # under Contract DE-AC05-76RL01830 # }}} - +import inspect import os import re @@ -138,3 +138,15 @@ def on_any_event(self, event): except BaseException as e: _log.error("Exception in callback: {}".format(e)) _log.debug("After callback on event {}".format(event)) + + +def print_stack(): + """ + Utility function to print the full frames stack of a function call. + + The format of the stack is filename->function:lineno + """ + called = 0 + for x in inspect.stack(): + _log.debug(f"stack: [{called}] {x.filename}->{x.function}:{x.lineno}") + called += 1 diff --git a/volttron/utils/frame_serialization.py b/volttron/utils/frame_serialization.py index 51bb846ce4..45c1e79e8c 100644 --- a/volttron/utils/frame_serialization.py +++ b/volttron/utils/frame_serialization.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- {{{ # vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et: # -# Copyright 2019, Battelle Memorial Institute. +# Copyright 2020, Battelle Memorial Institute. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -47,8 +47,13 @@ _log = logging.getLogger(__name__) +# python 3.8 formatting errors with utf-8 encoding. The ISO-8859-1 is equivilent to latin-1 +ENCODE_FORMAT = 'ISO-8859-1' + + def deserialize_frames(frames: List[Frame]) -> List: decoded = [] + for x in frames: if isinstance(x, list): decoded.append(deserialize_frames(x)) @@ -57,7 +62,7 @@ def deserialize_frames(frames: List[Frame]) -> List: elif isinstance(x, float): decoded.append(x) elif isinstance(x, bytes): - decoded.append(x.decode('utf-8')) + decoded.append(x.decode(ENCODE_FORMAT)) elif isinstance(x, str): decoded.append(x) elif x is not None: @@ -66,27 +71,25 @@ def deserialize_frames(frames: List[Frame]) -> List: decoded.append(x) continue try: - d = x.bytes.decode('utf-8') + d = x.bytes.decode(ENCODE_FORMAT) except UnicodeDecodeError as e: - _log.debug(e) + _log.error(f"Unicode decode error: {e}") decoded.append(x) continue try: decoded.append(jsonapi.loads(d)) except JSONDecodeError: decoded.append(d) - # _log.debug("deserialized: {}".format(decoded)) return decoded def serialize_frames(data: List[Any]) -> List[Frame]: frames = [] - #_log.info("Serializing: {}".format(data)) for x in data: try: if isinstance(x, list) or isinstance(x, dict): - frames.append(Frame(jsonapi.dumps(x).encode('utf-8'))) + frames.append(Frame(jsonapi.dumps(x).encode(ENCODE_FORMAT))) elif isinstance(x, Frame): frames.append(x) elif isinstance(x, bytes): @@ -100,8 +103,7 @@ def serialize_frames(data: List[Any]) -> List[Frame]: elif x is None: frames.append(Frame(x)) else: - #_log.info("serialize_frames:{}".format(x)) - frames.append(Frame(x.encode('utf-8'))) + frames.append(Frame(x.encode(ENCODE_FORMAT))) except TypeError as e: import sys sys.exit(0) diff --git a/volttron/utils/frozendict.py b/volttron/utils/frozendict.py index eb6039ac73..0ecbd918e5 100644 --- a/volttron/utils/frozendict.py +++ b/volttron/utils/frozendict.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- {{{ # vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et: # -# Copyright 2019, Battelle Memorial Institute. +# Copyright 2020, Battelle Memorial Institute. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/volttron/utils/prompt.py b/volttron/utils/prompt.py index 1acd325e58..58525f5043 100644 --- a/volttron/utils/prompt.py +++ b/volttron/utils/prompt.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- {{{ # vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et: # -# Copyright 2019, Battelle Memorial Institute. +# Copyright 2020, Battelle Memorial Institute. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/volttron/utils/rmq_config_params.py b/volttron/utils/rmq_config_params.py index d98796a450..aa639ee7d7 100644 --- a/volttron/utils/rmq_config_params.py +++ b/volttron/utils/rmq_config_params.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- {{{ # vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et: # -# Copyright 2019, Battelle Memorial Institute. +# Copyright 2020, Battelle Memorial Institute. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -52,6 +52,28 @@ _log = logging.getLogger(__name__) +def read_config_file(filename): + data = {} + try: + with open(filename, 'r') as yaml_file: + data = yaml.safe_load(yaml_file) + except IOError as exc: + _log.error("Error reading from file: {}".format(filename)) + except yaml.YAMLError as exc: + _log.error("Yaml Error: {}".format(filename)) + return data + + +def write_to_config_file(filename, data): + try: + with open(filename, 'w') as yaml_file: + yaml.dump(data, yaml_file, default_flow_style=False) + except IOError as exc: + _log.error("Error writing to file: {}".format(filename)) + except yaml.YAMLError as exc: + _log.error("Yaml Error: {}".format(filename)) + + class RMQConfig(object): """ Utility class to read/write RabbitMQ related configuration @@ -97,6 +119,7 @@ def _set_default_config(self): self.config_opts.setdefault('user', self.instance_name + '-admin') rmq_home = os.path.join(os.path.expanduser("~"), "rabbitmq_server/rabbitmq_server-3.7.7") + self.config_opts.setdefault('rabbitmq-service', False) self.config_opts.setdefault("rmq-home", rmq_home) def load_rmq_config(self, volttron_home=None): @@ -133,7 +156,6 @@ def write_rmq_config(self, volttron_home=None): raise - @property def hostname(self): return self.config_opts.get('host') @@ -198,6 +220,10 @@ def local_password(self): def node_name(self): return self.config_opts.get('node-name', 'rabbit') + @property + def rabbitmq_as_service(self): + return self.config_opts.get('rabbitmq-service', False) + def reconnect_delay(self): return self.config_opts.get('reconnect-delay') @@ -249,4 +275,10 @@ def certificate_data(self, data): def node_name(self, name): self.config_opts['node-name'] = name + @rabbitmq_as_service.setter + def rabbitmq_as_service(self, service_flag): + self.config_opts['rabbitmq-service'] = service_flag + + + diff --git a/volttron/utils/rmq_mgmt.py b/volttron/utils/rmq_mgmt.py index 2b2480a162..9b82d07a34 100644 --- a/volttron/utils/rmq_mgmt.py +++ b/volttron/utils/rmq_mgmt.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- {{{ # vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et: # -# Copyright 2019, Battelle Memorial Institute. +# Copyright 2020, Battelle Memorial Institute. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -53,15 +53,15 @@ NewConnectionError) import os from volttron.platform import certs -from volttron.platform import jsonapi -from . rmq_config_params import RMQConfig +from volttron.platform import jsonapi, get_home +from .rmq_config_params import RMQConfig, read_config_file, write_to_config_file + try: import yaml except ImportError: raise RuntimeError('PyYAML must be installed before running this script ') - _log = logging.getLogger(__name__) """ @@ -124,7 +124,6 @@ def _call_grequest(self, method_name, url_suffix, ssl_auth=True, **kwargs): raise e return response - def _get_authentication_args(self, ssl_auth): """ Return authentication kwargs for request/greqeust @@ -296,7 +295,7 @@ def delete_user(self, user, ssl_auth=None): try: response = self._http_delete_request(url, ssl_auth) except requests.exceptions.HTTPError as e: - if not e.message.startswith("404 Client Error"): + if e.response.status_code == 404: raise def delete_users_in_bulk(self, users, ssl_auth=None): @@ -329,7 +328,7 @@ def get_user_permissions(self, user, vhost=None, ssl_auth=None): response = self._http_get_request(url, ssl_auth) return response except requests.exceptions.HTTPError as e: - if e.message.startswith("404 Client Error"): + if e.response.status_code == 404: # No permissions are set for this user yet. Return none # so caller can try to set permissions return None @@ -349,8 +348,8 @@ def set_user_permissions(self, permissions, user, vhost=None, ssl_auth=None): """ ssl_auth = ssl_auth if ssl_auth is not None else self.is_ssl vhost = vhost if vhost else self.rmq_config.virtual_host - _log.debug("Create READ, WRITE and CONFIGURE permissions for the user: " - "{}".format(user)) + #_log.debug("Create READ, WRITE and CONFIGURE permissions for the user: " + # "{}".format(user)) url = '/api/permissions/{vhost}/{user}'.format(vhost=vhost, user=user) response = self._http_put_request(url, body=permissions, ssl_auth=ssl_auth) @@ -759,6 +758,25 @@ def delete_multiplatform_parameter(self, component, parameter_name, vhost=None): :param vhost: virtual host :return: """ + vhome = get_home() + if component == 'shovel': + config_file = os.path.join(vhome, 'rabbitmq_shovel_config.yml') + key = 'shovel' + else: + config_file = os.path.join(vhome, 'rabbitmq_federation_config.yml') + key = 'federation-upstream' + config = read_config_file(config_file) + print("Removing certificate paths from the shovel config file. Please remove remote certificates manually " + "from the VOLTTRON_HOME folder if needed") + + names = parameter_name.split("-") + + try: + del config[key][names[1]]['certificates'] + write_to_config_file(config_file, config) + except (KeyError, IndexError) as e: + print(f"names:{e}") + pass self.delete_parameter(component, parameter_name, vhost, ssl_auth=self.rmq_config.is_ssl) @@ -771,7 +789,7 @@ def build_connection_param(self, rmq_user, ssl_auth=None, retry_attempt=30, retr """ ssl_auth = ssl_auth if ssl_auth is not None else self.is_ssl crt = self.rmq_config.crts - heartbeat_interval = 20 #sec + heartbeat_interval = 20 # sec try: if ssl_auth: @@ -815,8 +833,7 @@ def build_remote_connection_param(self, rmq_user, rmq_address, ssl_auth=None, ce :param ssl_auth: If SSL based connection or not :return: """ - - from urllib import parse + from urllib import parse parsed_addr = parse.urlparse(rmq_address) ssl_auth = ssl_auth if ssl_auth is not None else self.is_ssl @@ -826,6 +843,7 @@ def build_remote_connection_param(self, rmq_user, rmq_address, ssl_auth=None, ce try: if ssl_auth: certfile = self.certs.cert_file(rmq_user, True) + _log.info("build_remote_connection_param: {}".format(certfile)) if cert_dir: # remote cert file for agents will be in agent-data/remote-certs dir certfile = os.path.join(cert_dir, os.path.basename(certfile)) @@ -865,7 +883,8 @@ def build_remote_connection_param(self, rmq_user, rmq_address, ssl_auth=None, ce def build_rmq_address(self, user=None, password=None, host=None, port=None, vhost=None, - ssl_auth=None, ssl_params=None): + ssl_auth=None, ssl_params=None, + certs_dict=None): """ Build RMQ address for federation or shovel connection :param ssl_auth: @@ -878,7 +897,7 @@ def build_rmq_address(self, user=None, password=None, host = host if host else self.rmq_config.hostname vhost = vhost if vhost else self.rmq_config.virtual_host if ssl_auth: - ssl_params = ssl_params if ssl_params else self.get_ssl_url_params() + ssl_params = ssl_params if ssl_params else self.get_ssl_url_params(user, certs_dict) rmq_address = None try: @@ -892,7 +911,6 @@ def build_rmq_address(self, user=None, password=None, vhost=vhost, ssl_params=ssl_params) else: - rmq_address = "amqp://{user}:{pwd}@{host}:{port}/{vhost}".format( user=user, pwd=password, host=host, port=port, @@ -951,7 +969,6 @@ def build_agent_connection(self, identity, instance_name): # vctl certs create-ssl-keypair should be used to create a cert/key pair # and then agents should be started. try: - _log.info("Creating ca signed certs for {}".format(rmq_user)) self.rmq_config.crts.create_signed_cert_files(rmq_user, overwrite=False) except Exception as e: _log.error("Exception creating certs. {}".format(e)) @@ -971,7 +988,7 @@ def build_agent_connection(self, identity, instance_name): return param - def build_shovel_connection(self, identity, instance_name, host, port, vhost, is_ssl): + def build_remote_plugin_connection(self, rmq_user, host, port, vhost, is_ssl, certs_dict=None): """ Check if RabbitMQ user and certs exists for this agent, if not create a new one. Add access control/permissions if necessary. @@ -984,7 +1001,7 @@ def build_shovel_connection(self, identity, instance_name, host, port, vhost, is :param is_ssl: Flag to indicate if SSL connection or not :return: Return connection uri """ - rmq_user = instance_name + '.' + identity + #rmq_user = instance_name + '.' + identity config_access = "{user}|{user}.pubsub.*|{user}.zmq.*|amq.*".format( user=rmq_user) read_access = "volttron|{}".format(config_access) @@ -994,12 +1011,19 @@ def build_shovel_connection(self, identity, instance_name, host, port, vhost, is self.create_user_with_permissions(rmq_user, permissions) ssl_params = None - if is_ssl: + if certs_dict is None: + if is_ssl: + self.rmq_config.crts.create_signed_cert_files(rmq_user, + overwrite=False) + ssl_params = self.get_ssl_url_params(user=rmq_user) + return self.build_rmq_address(rmq_user, self.rmq_config.admin_pwd, + host, port, vhost, is_ssl, ssl_params) + else: self.rmq_config.crts.create_signed_cert_files(rmq_user, overwrite=False) - ssl_params = self.get_ssl_url_params(user=rmq_user) - return self.build_rmq_address(rmq_user, self.rmq_config.admin_pwd, - host, port, vhost, is_ssl, ssl_params) + return self.build_rmq_address(rmq_user, self.rmq_config.admin_pwd, + host, port, vhost, is_ssl, ssl_params, + certs_dict=certs_dict) def build_router_connection(self, identity, instance_name): """ @@ -1025,19 +1049,26 @@ def build_router_connection(self, identity, instance_name): retry_delay=2) return param - def get_ssl_url_params(self, user=None): + + def get_ssl_url_params(self, user=None, certs_dict=None): """ Return SSL parameter string :return: """ - root_ca_name, server_cert, admin_user = \ - certs.Certs.get_admin_cert_names(self.rmq_config.instance_name) if not user: user = admin_user - ca_file = self.rmq_config.crts.cert_file(self.rmq_config.crts.trusted_ca_name) - cert_file = self.rmq_config.crts.cert_file(user) - key_file = self.rmq_config.crts.private_key_file(user) + if certs_dict is None: + + root_ca_name, server_cert, admin_user = \ + certs.Certs.get_admin_cert_names(self.rmq_config.instance_name) + ca_file = self.rmq_config.crts.cert_file(self.rmq_config.crts.trusted_ca_name) + cert_file = self.rmq_config.crts.cert_file(user) + key_file = self.rmq_config.crts.private_key_file(user) + else: + ca_file = certs_dict['ca_file'] + cert_file = certs_dict['cert_file'] + key_file = certs_dict['key_file'] return "cacertfile={ca}&certfile={cert}&keyfile={key}" \ "&verify=verify_peer&fail_if_no_peer_cert=true" \ "&auth_mechanism=external".format(ca=ca_file, diff --git a/volttron/utils/rmq_setup.py b/volttron/utils/rmq_setup.py index ce96127a5d..78e219d724 100644 --- a/volttron/utils/rmq_setup.py +++ b/volttron/utils/rmq_setup.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- {{{ # vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et: # -# Copyright 2019, Battelle Memorial Institute. +# Copyright 2020, Battelle Memorial Institute. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -50,9 +50,10 @@ from shutil import copy import gevent import yaml +import time from . rmq_mgmt import RabbitMQMgmt -from . rmq_config_params import RMQConfig +from . rmq_config_params import RMQConfig, read_config_file, write_to_config_file from volttron.platform import certs from volttron.platform import get_home @@ -60,6 +61,8 @@ execute_command) from volttron.utils.prompt import prompt_response, y, y_or_n from volttron.platform.agent.utils import get_platform_instance_name +from volttron.platform import jsonapi +from urllib.parse import urlparse _log = logging.getLogger(os.path.basename(__file__)) @@ -89,20 +92,17 @@ def _start_rabbitmq_without_ssl(rmq_config, conf_file, env=None): if os.path.exists(rmq_home): os.environ['RABBITMQ_HOME'] = rmq_home else: - print("\nERROR:\n" - "Missing key 'rmq_home' in RabbitMQ config and RabbitMQ is " - "not installed in default path: \n" - "~/rabbitmq_server/rabbitmq_server-3.7.7 \n" - "Please set the correct RabbitMQ installation path in " - "rabbitmq_config.yml") + _log.error("\nMissing key 'rmq_home' in RabbitMQ config and RabbitMQ is " + "not installed in default path: \n" + "~/rabbitmq_server/rabbitmq_server-3.7.7 \n" + "Please set the correct RabbitMQ installation path in " + "rabbitmq_config.yml") exit(1) else: if not os.path.exists(rmq_home) or not os.path.exists(os.path.join( rmq_home, 'sbin/rabbitmq-server')): - print("\nERROR:\n" - "Invalid rmq-home value ({}). Please fix rmq-home " - "in {} and rerun this script".format( - rmq_home, rmq_config.volttron_rmq_config)) + _log.error("\nInvalid rmq-home value ({}). Please fix rmq-home " + "in {} and rerun this script".format(rmq_home, rmq_config.volttron_rmq_config)) exit(1) else: os.environ['RABBITMQ_HOME'] = rmq_home @@ -145,7 +145,7 @@ def write_env_file(rmq_config, conf_file, env=None): # If there is a custom node name then we need to write a env file, set amqp port in this env file, and # point to conf file path if rmq_config.node_name != 'rabbit': - nodebase = os.path.dirname(conf_file) + nodebase = os.path.dirname(os.path.dirname(conf_file)) # Creating a custom node name with custome port. Create a env file and add entry to point to conf file in # the env file env_entries = """NODENAME={} @@ -182,26 +182,42 @@ def _create_federation_setup(admin_user, admin_password, is_ssl, vhost, vhome): federation_config_file = os.path.join(vhome, 'rabbitmq_federation_config.yml') - federation_config = _read_config_file(federation_config_file) + federation_config = read_config_file(federation_config_file) federation = federation_config.get('federation-upstream') if federation: - ssl_params = None - if is_ssl: - ssl_params = rmq_mgmt.get_ssl_url_params() + #ssl_params = None + #if is_ssl: + # ssl_params = rmq_mgmt.get_ssl_url_params() for host, upstream in federation.items(): try: - name = "upstream-{vhost}-{host}".format(vhost=upstream['virtual-host'], + name = "upstream-{host}-{vhost}".format(vhost=upstream['virtual-host'], host=host) _log.debug("Upstream Server: {name} ".format(name=name)) - address = rmq_mgmt.build_rmq_address(admin_user, - admin_password, host, - upstream['port'], - upstream['virtual-host'], - is_ssl, - ssl_params) + certs_dict = None + rmq_user = None + if 'certificates' in upstream: + _log.debug("upstream parameters under destination: {}".format(upstream)) + is_csr = upstream['certificates'].get('csr', False) + if is_csr: + certs_dict = dict() + certs_dict['ca_file'] = upstream['certificates']['remote_ca'] + certs_dict['cert_file'] = upstream['certificates']['public_cert'] + certs_dict['key_file'] = upstream['certificates']['private_key'] + rmq_user = upstream['federation-user'] + else: + # certificates key not found in upstream config + _log.debug("ERROR: certificates key not found in federation config. Cannot make connection to remote server without remote certificates") + continue + # Build destination address + address = rmq_mgmt.build_remote_plugin_connection(rmq_user, + host, + upstream['port'], + upstream['virtual-host'], + is_ssl, + certs_dict=certs_dict) prop = dict(vhost=vhost, component="federation-upstream", name=name, @@ -232,12 +248,11 @@ def _create_shovel_setup(instance_name, local_host, port, vhost, vhome, is_ssl): """ shovel_config_file = os.path.join(vhome, 'rabbitmq_shovel_config.yml') - shovel_config = _read_config_file(shovel_config_file) + shovel_config = read_config_file(shovel_config_file) shovels = shovel_config.get('shovel', {}) - rmq_mgmt = RabbitMQMgmt() - ssl_params = None + rmq_mgmt = RabbitMQMgmt() _log.debug("shovel config: {}".format(shovel_config)) try: for remote_host, shovel in shovels.items(): @@ -245,14 +260,30 @@ def _create_shovel_setup(instance_name, local_host, port, vhost, vhome, is_ssl): _log.debug("shovel parameters: {}".format(shovel)) for identity, topics in pubsub_config.items(): # Build source address - src_uri = rmq_mgmt.build_shovel_connection(identity, instance_name, + rmq_user = instance_name + '.' + identity + src_uri = rmq_mgmt.build_remote_plugin_connection(rmq_user, local_host, port, vhost, is_ssl) + certs_dict = None + if 'certificates' in shovel: + _log.debug("shovel parameters under destination: {}".format(shovel)) + is_csr = shovel['certificates'].get('csr', False) + if is_csr: + certs_dict = dict() + certs_dict['ca_file'] = shovel['certificates']['remote_ca'] + certs_dict['cert_file'] = shovel['certificates']['public_cert'] + certs_dict['key_file'] = shovel['certificates']['private_key'] + rmq_user = shovel['shovel-user'] + else: + # destination key not found in shovel config + _log.debug("ERROR: certificates key not found in shovel config. Cannot make connection to remote server without remote certificates") + continue # Build destination address - dest_uri = rmq_mgmt.build_shovel_connection(identity, instance_name, + dest_uri = rmq_mgmt.build_remote_plugin_connection(rmq_user, remote_host, shovel['port'], shovel['virtual-host'], - is_ssl) + is_ssl, certs_dict=certs_dict) + if not isinstance(topics, list): topics = [topics] for topic in topics: @@ -272,7 +303,6 @@ def _create_shovel_setup(instance_name, local_host, port, vhost, vhome, is_ssl): "dest-uri": dest_uri, "dest-exchange": "volttron"} ) - _log.debug("shovel property: {}".format(prop)) rmq_mgmt.set_parameter("shovel", name, prop) @@ -282,11 +312,28 @@ def _create_shovel_setup(instance_name, local_host, port, vhost, vhome, is_ssl): for ids in agent_ids: local_identity = ids[0] remote_identity = ids[1] - src_uri = rmq_mgmt.build_shovel_connection(local_identity, instance_name, - local_host, port, vhost, is_ssl) - dest_uri = rmq_mgmt.build_shovel_connection(local_identity, instance_name, + rmq_user = instance_name + '.' + local_identity + src_uri = rmq_mgmt.build_shovel_connection(rmq_user, + local_host, port, + vhost, is_ssl) + + certs_dict = None + if 'certificates' in shovel: + _log.debug("shovel parameters under destination: {}".format(shovel)) + is_csr = shovel['certificates'].get('csr', False) + if is_csr: + certs_dict = dict() + certs_dict['ca_file'] = shovel['certificates']['remote_ca'] + certs_dict['cert_file'] = shovel['certificates']['public_cert'] + certs_dict['key_file'] = shovel['certificates']['private_key'] + rmq_user = shovel['shovel-user'] + _log.debug(f"certs parameters: {certs_dict}") + + # Build destination address + dest_uri = rmq_mgmt.build_shovel_connection(rmq_user, remote_host, shovel['port'], - shovel['virtual-host'], is_ssl) + shovel['virtual-host'], + is_ssl, certs_dict=certs_dict) _log.info("Creating shovel to make RPC call to remote Agent" ": {}".format(remote_identity)) @@ -469,9 +516,8 @@ def _create_certs(rmq_config, admin_client_name, server_cert_name): 'common-name']) or all( k in cert_data for k in ['ca-public-key', 'ca-private-key'])): - print( - "\nERROR:\n" - "No certificate data found in {} or certificate data is " + _log.error( + "\nNo certificate data found in {} or certificate data is " "incomplete. certificate-data should either contain all " "the details necessary to create a self signed CA or " "point to the file path of an existing CA's public and " @@ -539,28 +585,29 @@ def setup_rabbitmq_volttron(setup_type, verbose=False, prompt=False, instance_na :param prompt :raises RabbitMQSetupAlreadyError """ + # TODO: we should ideally pass the rmq_conf_file here and use that + # setup any new shovels, federation links etc and then update + # the config in vhome. Else we will overwrite existing connection settings + # when trying to create new ones if not instance_name: instance_name = get_platform_instance_name(prompt=True) # Store config this is checked at startup store_message_bus_config(message_bus='rmq', instance_name=instance_name) rmq_config = RMQConfig() - if verbose: - _log.setLevel(logging.DEBUG) - _log.debug("verbose set to True") - _log.debug(get_home()) - logging.getLogger("requests.packages.urllib3.connectionpool" - "").setLevel(logging.DEBUG) - else: - _log.setLevel(logging.INFO) - logging.getLogger("requests.packages.urllib3.connectionpool" - "").setLevel(logging.WARN) - + success = True if prompt: # ignore any existing rabbitmq_config.yml in vhome. Prompt user and # generate a new rabbitmq_config.yml - _create_rabbitmq_config(rmq_config, setup_type) - + try: + success = _create_rabbitmq_config(rmq_config, setup_type, verbose) + except Exception as exc: + _log.error(f"{exc}") + return exc + if not success: + # something went wrong when creating rmq config + # do not create anything. return + return # Load either the newly created config or config passed try: rmq_config.load_rmq_config() @@ -662,13 +709,14 @@ def setup_rabbitmq_volttron(setup_type, verbose=False, prompt=False, instance_na _log.error("Unknown option. Exiting....") -def _create_rabbitmq_config(rmq_config, setup_type): +def _create_rabbitmq_config(rmq_config, setup_type, verbose=False): """ Prompt user for required details and create a rabbitmq_config.yml file in volttron home :param setup_type: type of rmq setup - single, federation, shovel or all """ - + success = True + # TODO - check if error cases(that are not exceptions) exit correctly for single, federation and all if setup_type == 'single' or setup_type == 'all': if os.path.exists(rmq_config.volttron_rmq_config): prompt = "rabbitmq_config.yml exists in {} Do you wish to " \ @@ -784,7 +832,8 @@ def _create_rabbitmq_config(rmq_config, setup_type): # if option was all then config_opts would be not null # if this was called with just setup_type = shovel, load existing # config so that we don't overwrite existing list - prompt_shovels(rmq_config.volttron_home) + success = prompt_shovels(rmq_config.volttron_home, verbose) + return success def is_file_readable(file_path): @@ -792,7 +841,7 @@ def is_file_readable(file_path): if os.path.exists(file_path) and os.access(file_path, os.R_OK): return True else: - print("\nInvalid file path. Path does not exists or is not readable") + _log.error("\nInvalid file path. Path does not exists or is not readable") return False @@ -851,7 +900,7 @@ def prompt_upstream_servers(vhome): 'rabbitmq_federation_config.yml') if os.path.exists(federation_config_file): - federation_config = _read_config_file(federation_config_file) + federation_config = read_config_file(federation_config_file) else: federation_config = {} @@ -868,14 +917,23 @@ def prompt_upstream_servers(vhome): port = prompt_response(prompt, default=5671) prompt = 'Virtual host of the upstream server: ' vhost = prompt_response(prompt, default='volttron') + upstream_servers[host] = {'port': port, 'virtual-host': vhost} + rmq_mgmt = RabbitMQMgmt() + instance_name = get_platform_instance_name() + upstream_user = 'federation' + rmq_mgmt.build_agent_connection(upstream_user, instance_name) + import time + time.sleep(2) + upstream_servers[host]['federation-user'] = instance_name + "." + upstream_user + upstream_servers[host]['certificates'] = _prompt_csr_request(upstream_user, host, 'federation') federation_config['federation-upstream'] = upstream_servers - _write_to_config_file(federation_config_file, federation_config) + write_to_config_file(federation_config_file, federation_config) -def prompt_shovels(vhome): +def prompt_shovels(vhome, verbose=False): """ Prompt for shovel configuration and save in rabbitmq_shovel_config.yml :return: @@ -883,7 +941,7 @@ def prompt_shovels(vhome): shovel_config_file = os.path.join(vhome, 'rabbitmq_shovel_config.yml') if os.path.exists(shovel_config_file): - shovel_config = _read_config_file(shovel_config_file) + shovel_config = read_config_file(shovel_config_file) else: shovel_config = {} @@ -892,6 +950,7 @@ def prompt_shovels(vhome): count = prompt_response(prompt, default=1) count = int(count) i = 0 + is_error = True try: for i in range(0, count): @@ -901,23 +960,43 @@ def prompt_shovels(vhome): port = prompt_response(prompt, default=5671) prompt = 'Virtual host of the destination server: ' vhost = prompt_response(prompt, default='volttron') - shovels[host] = {'port': port, 'virtual-host': vhost} + rmq_mgmt = RabbitMQMgmt() + instance_name = get_platform_instance_name() + shovel_user = 'shovel{}'.format(host) + + rmq_mgmt.build_agent_connection(shovel_user, instance_name) + import time + + time.sleep(2) + shovels[host]['shovel-user'] = instance_name + "." + shovel_user + #_log.debug("shovel_user: {}".format(shovel_user)) + + + certs_config = _prompt_csr_request(shovel_user, host, 'shovel', verbose) + if not certs_config: + # we did not get certificates - neither existing, nor through csr process + # exit + return False + shovels[host]['certificates'] = certs_config + prompt = prompt_response('\nDo you want shovels for ' 'PUBSUB communication? ', valid_answers=y_or_n, default='N') if prompt in y: - prompt = 'Name of the agent publishing the topic:' + prompt = 'VIP identity of the agent publishing the topic:' agent_id = prompt_response(prompt, mandatory=True) prompt = 'List of PUBSUB topics to publish to ' \ 'this remote instance (comma seperated)' topics = prompt_response(prompt, mandatory=True) - topics = topics.split(",") - shovels[host]['pubsub'] = {agent_id : topics} + import re + topics = re.sub(r"\s", "", topics) + multi_topics = topics.split(",") + shovels[host]['pubsub'] = {agent_id : multi_topics} prompt = prompt_response( '\nDo you want shovels for RPC communication? ', valid_answers=y_or_n, default='N') @@ -935,33 +1014,144 @@ def prompt_shovels(vhome): remote_agent_id = prompt_response(prompt, mandatory=True) agent_ids.append([local_agent_id, remote_agent_id]) shovels[host]['rpc'] = {remote_instance: agent_ids} + except (IOError, TimeoutError, ConnectionError) as e: + raise e except ValueError as e: _log.error("Invalid choice in the configuration: {}".format(e)) else: shovel_config['shovel'] = shovels - _write_to_config_file(shovel_config_file, shovel_config) + write_to_config_file(shovel_config_file, shovel_config) + return True -def _read_config_file(filename): - data = {} - try: - with open(filename, 'r') as yaml_file: - data = yaml.safe_load(yaml_file) - except IOError as exc: - _log.error("Error reading from file: {}".format(filename)) - except yaml.YAMLError as exc: - _log.error("Yaml Error: {}".format(filename)) - return data +def _prompt_csr_request(rmq_user, host, type, verbose=False): + prompt = prompt_response('\nDo you have certificates signed by remote CA? ', + valid_answers=y_or_n, + default='N') + csr_config = dict() -def _write_to_config_file(filename, data): - try: - with open(filename, 'w') as yaml_file: - yaml.dump(data, yaml_file, default_flow_style=False) - except IOError as exc: - _log.error("Error writing to file: {}".format(filename)) - except yaml.YAMLError as exc: - _log.error("Yaml Error: {}".format(filename)) + if prompt in y: + prompt = 'Full path to remote CA certificate: ' + ca_file = prompt_response(prompt, default='') + csr_config['csr'] = True + if not os.path.exists(ca_file): + raise IOError(f"Path does not exist: {ca_file}. Please check the path and try again") + # ca cert + csr_config['remote_ca'] = ca_file + + prompt = 'Full path to remote CA signed public certificate: ' + cert_file = prompt_response(prompt, default='') + if not os.path.exists(cert_file): + raise IOError(f"Path does not exist: {cert_file}. Please check the path and try again") + # public cert + csr_config['public_cert'] = cert_file + + prompt = 'Full path to private certificate: ' + private_cert = prompt_response(prompt, default='') + if not os.path.exists(private_cert): + raise IOError(f"Path does not exist: {private_cert}. Please check the path and try again") + # private_key + csr_config['private_key'] = private_cert + else: + remote_https_address = "https://{}:8443".format(host) + prompt = 'Path to remote web interface: ' + + remote_addr = prompt_response(prompt, default=remote_https_address) + parsed_address = urlparse(remote_addr) + if parsed_address.scheme not in ('https',): + raise IOError(f"Remote web interface is not valid: {parsed_address}. Please check and try again") + + # request CSR from remote host + ca_file, cert_file, prvt_file = _request_csr(rmq_user, remote_addr, type,verbose) + if ca_file is not None and cert_file is not None and prvt_file is not None: + csr_config['csr'] = True + # _log.debug("CA file path: {}".format(ca_file)) + csr_config['remote_ca'] = ca_file + + # public cert + csr_config['public_cert'] = cert_file + # _log.debug("Public cert path: {}".format(certfile)) + + # private_key + crts = certs.Certs() + # _log.debug("Private cert path: {}".format(prvtfile)) + csr_config['private_key'] = prvt_file + + return csr_config + + +def _request_csr(rmq_user, remote_addr, type, verbose=False): + ca_file = None + certfile = None + prvtfile = None + + if not verbose: + # so that we don't get info level logs showing up during our multiple csr requests + logging.getLogger("volttron.platform.web.discovery").setLevel(logging.WARNING) + + response = request_cert_for_plugin(rmq_user, remote_addr, type) + + success = False + retry_attempt = 0 + max_retries = 12 + denied = False + if response is None: + # Error /status is pending + raise ConnectionError("Please check the connection to the remote instance") + elif isinstance(response, tuple): + if response[0] == 'PENDING': + _log.info("Waiting for administrator to accept the CSR request.") + # Try for two minutes. + # TODO make max attempts and/or sleep interval optional arg + while not success and retry_attempt < max_retries: + + if response is None: + break + elif response[0] == 'PENDING': + if verbose: + _log.info("Waiting for administrator to accept the CSR request.") + sleep_period = 10 + time.sleep(sleep_period) + if verbose: + _log.info("Retrying request for signing certificate") + response = request_cert_for_plugin(rmq_user, remote_addr, type) + retry_attempt += 1 + elif response[0] == 'DENIED': + break + else: + success = True + if response[0] == 'DENIED': + denied = True + _log.info("Request for signed certificate(CSR) has been denied by the remote instance administrator") + else: + success = True + if not denied and retry_attempt >= max_retries and not success: + raise TimeoutError("Maximum retry attempts for CSR reached. " + "Please retry command once administrator of remote VOLTTRON instance is ready to approve" + " certificate signing request") + + if success: + # remote cert file for shovels will be in $VOLTTRON_HOME/certificates/shovels dir + cert_dir = None + filename = None + if os.path.exists(response): + certfile = response + cert_dir, filename = os.path.split(certfile) + else: + raise IOError(f"Path to {response} does not exist. ") + metafile = certfile[:-4] + ".json" + metadata = jsonapi.loads(open(metafile).read()) + local_keyfile = metadata['local_keyname'] + ca_name = metadata['remote_ca_name'] + # remote ca + ca_file = '/'.join((get_remote_certs_dir(type), ca_name + '.crt')) + + # private_key + crts = certs.Certs() + prvtfile = crts.private_key_file(name=local_keyfile) + + return ca_file, certfile, prvtfile def stop_rabbit(rmq_home, env=None, quite=False): @@ -997,6 +1187,114 @@ def restart_ssl(rmq_home, env=None): execute_command(cmd, err_prefix="Error reloading ssl certificates", env=env, logger=_log) +def get_remote_certs_dir(type): + base_dir = os.path.join(get_home(), 'certificates', type) + if not os.path.exists(base_dir): + os.makedirs(base_dir) + remote_certs_path = os.path.join(base_dir) + if not os.path.exists(remote_certs_path): + os.makedirs(remote_certs_path) + return remote_certs_path + + +def request_plugin_cert(csr_server, fully_qualified_local_identity, discovery_info, type): + import grequests + + # from volttron.platform.web import DiscoveryInfo + config = RMQConfig() + + if not config.is_ssl: + raise ValueError("Only can create csr for rabbitmq based platform in ssl mode.") + + crts = certs.Certs() + csr_request = crts.create_csr(fully_qualified_local_identity, discovery_info.instance_name) + # The csr request requires the fully qualified identity that is + # going to be connected to the external instance. + # + # The remote instance id is the instance name of the remote platform + # concatenated with the identity of the local fully qualified identity. + remote_cert_name = "{}.{}".format(discovery_info.instance_name, + fully_qualified_local_identity) + remote_ca_name = discovery_info.instance_name + "_ca" + + json_request = dict( + csr=csr_request.decode("utf-8"), + identity=remote_cert_name, + hostname=config.hostname + ) + request = grequests.post(csr_server + "/csr/request_new", + json=jsonapi.dumps(json_request), + verify=False) + response = grequests.map([request]) + + if response and isinstance(response, list): + response[0].raise_for_status() + response = response[0] + + j = response.json() + + status = j.get('status') + cert = j.get('cert') + message = j.get('message', '') + remote_certs_dir = get_remote_certs_dir(type) + if status == 'SUCCESSFUL' or status == 'APPROVED': + crts.save_agent_remote_info(remote_certs_dir, + fully_qualified_local_identity, + remote_cert_name, cert.encode("utf-8"), + remote_ca_name, + discovery_info.rmq_ca_cert.encode("utf-8")) + os.environ['REQUESTS_CA_BUNDLE'] = os.path.join(remote_certs_dir, "requests_ca_bundle") + elif status == 'PENDING': + pass + elif status == 'DENIED': + return status, None + elif status == 'ERROR': + err = "Error retrieving certificate from {}\n".format( + config.hostname) + err += "{}".format(message) + raise ValueError(err) + else: # No response + return None + + certfile = os.path.join(remote_certs_dir, remote_cert_name + ".crt") + if os.path.exists(certfile): + return certfile + else: + return status, message + + +def request_cert_for_plugin(rmq_user, https_address, type): + value = None + parsed_address = urlparse(https_address) + if parsed_address.scheme in ('https',): + from volttron.platform.web import DiscoveryInfo + from volttron.platform.agent.utils import get_platform_instance_name, get_fq_identity + info = DiscoveryInfo.request_discovery_info(https_address) + + # This is if both remote and local are rmq message buses. + if info.messagebus_type == 'rmq': + fqid_local = get_fq_identity(rmq_user) + + # Check if we already have the cert, if so use it instead of requesting cert again + remote_certs_dir = get_remote_certs_dir(type) + remote_cert_name = "{}.{}".format(info.instance_name, fqid_local) + certfile = os.path.join(remote_certs_dir, remote_cert_name + ".crt") + + if os.path.exists(certfile): + value = certfile + else: + # request for new CSR + response = request_plugin_cert(https_address, fqid_local, info, type) + if response is None: + _log.error("there was no response from the server") + value = None + elif isinstance(response, tuple): + value = response + elif os.path.exists(response): + value = response + return value + + def check_rabbit_status(rmq_home=None, env=None): status = True if not rmq_home: @@ -1080,5 +1378,3 @@ def start_rabbit(rmq_home, env=None): except KeyboardInterrupt: _log.info("Exiting setup process") - - diff --git a/volttron_data/agent_templates/common/agent.py_ b/volttron_data/agent_templates/common/agent.py_ index 34bd54276b..0fdee58d16 100644 --- a/volttron_data/agent_templates/common/agent.py_ +++ b/volttron_data/agent_templates/common/agent.py_ @@ -15,18 +15,18 @@ __version__ = "__version_string__" def __module_name__(config_path, **kwargs): - """Parses the Agent configuration and returns an instance of + """ + Parses the Agent configuration and returns an instance of the agent created using that configuration. :param config_path: Path to a configuration file. - :type config_path: str :returns: __class_name__ :rtype: __class_name__ """ try: config = utils.load_config(config_path) - except StandardError: + except Exception: config = {} if not config: @@ -35,9 +35,7 @@ def __module_name__(config_path, **kwargs): setting1 = int(config.get('setting1', 1)) setting2 = config.get('setting2', "some/random/topic") - return __class_name__(setting1, - setting2, - **kwargs) + return __class_name__(setting1, setting2, **kwargs) class __class_name__(Agent): @@ -45,8 +43,7 @@ class __class_name__(Agent): Document agent constructor here. """ - def __init__(self, setting1=1, setting2="some/random/topic", - **kwargs): + def __init__(self, setting1=1, setting2="some/random/topic", **kwargs): super(__class_name__, self).__init__(**kwargs) _log.debug("vip_identity: " + self.core.identity) @@ -56,11 +53,10 @@ class __class_name__(Agent): self.default_config = {"setting1": setting1, "setting2": setting2} - - #Set a default configuration to ensure that self.configure is called immediately to setup - #the agent. + # Set a default configuration to ensure that self.configure is called immediately to setup + # the agent. self.vip.config.set_default("config", self.default_config) - #Hook self.configure up to changes to the configuration file "config". + # Hook self.configure up to changes to the configuration file "config". self.vip.config.subscribe(self.configure, actions=["NEW", "UPDATE"], pattern="config") def configure(self, config_name, action, contents): @@ -88,15 +84,20 @@ class __class_name__(Agent): self._create_subscriptions(self.setting2) def _create_subscriptions(self, topic): - #Unsubscribe from everything. + """ + Unsubscribe from all pub/sub topics and create a subscription to a topic in the configuration which triggers + the _handle_publish callback + """ self.vip.pubsub.unsubscribe("pubsub", None, None) self.vip.pubsub.subscribe(peer='pubsub', prefix=topic, callback=self._handle_publish) - def _handle_publish(self, peer, sender, bus, topic, headers, - message): + def _handle_publish(self, peer, sender, bus, topic, headers, message): + """ + Callback triggered by the subscription setup using the topic from the agent's config file + """ pass @Core.receiver("onstart") @@ -109,11 +110,12 @@ class __class_name__(Agent): Usually not needed if using the configuration store. """ - #Example publish to pubsub - #self.vip.pubsub.publish('pubsub', "some/random/topic", message="HI!") + # Example publish to pubsub + self.vip.pubsub.publish('pubsub', "some/random/topic", message="HI!") - #Exmaple RPC call - #self.vip.rpc.call("some_agent", "some_method", arg1, arg2) + # Example RPC call + # self.vip.rpc.call("some_agent", "some_method", arg1, arg2) + pass @Core.receiver("onstop") def onstop(self, sender, **kwargs): @@ -128,9 +130,11 @@ class __class_name__(Agent): """ RPC method - May be called from another agent via self.core.rpc.call """ + May be called from another agent via self.core.rpc.call + """ return self.setting1 + arg1 - arg2 + def main(): """Main method called to start the agent.""" utils.vip_main(__module_name__, __identity__ diff --git a/volttron_data/agent_templates/common/config b/volttron_data/agent_templates/common/config index 5af739343c..3d9429c932 100644 --- a/volttron_data/agent_templates/common/config +++ b/volttron_data/agent_templates/common/config @@ -1,10 +1,10 @@ { # VOLTTRON config files are JSON with support for python style comments. - "setting1": 2, #Integers + "setting1": 2, # Integers "setting2": "some/random/topic2", #Strings - "setting3": true, #Booleans: remember that in JSON true and false are not capitalized. + "setting3": true, # Booleans: remember that in JSON true and false are not capitalized. "setting4": false, - "setting5": 5.1, #Floating point numbers. + "setting5": 5.1, # Floating point numbers. "setting6": [1,2,3,4], #Lists - "setting7": {"setting7a": "a", "setting7b": "b"} #Objects -} \ No newline at end of file + "setting7": {"setting7a": "a", "setting7b": "b"} # Objects +} diff --git a/volttron_data/agent_templates/historian/agent.py_ b/volttron_data/agent_templates/historian/agent.py_ index 956b90d63c..1519e582cc 100644 --- a/volttron_data/agent_templates/historian/agent.py_ +++ b/volttron_data/agent_templates/historian/agent.py_ @@ -7,6 +7,8 @@ __docformat__ = 'reStructuredText' import logging import sys import foo_db +from typing import Optional + from volttron.platform.agent import utils from volttron.platform.agent.base_historian import BaseHistorian @@ -44,8 +46,7 @@ class __class_name__(BaseHistorian): Document historian here. """ - def __init__(self, connection_parameters={}, - **kwargs): + def __init__(self, connection_parameters: Optional[dict] = None, **kwargs): # The publish_to_historian function will run in a # separate thread unless you change this to True. # Unless you need to interact with the VOLTTRON platform @@ -64,12 +65,14 @@ class __class_name__(BaseHistorian): self.update_default_config(config) def configure(self, configuration): - # The base historian will call this whenever the - # Historian is reconfigured in the main process thread. + """ + The base historian will call this whenever the Historian is reconfigured in the main process thread. - # If the Historian is already running historian_teardown - # will be called before this is called and - # historian_setup will be called afterwards. + If the Historian is already running historian_teardown will be called before this is called and historian_setup + will be called afterwards. + + :param configuration: dictionary representation of the agent's config file + """ connection_parameters = configuration["connection_parameters"] @@ -81,8 +84,11 @@ class __class_name__(BaseHistorian): self.db_port = connection_parameters.get("port") def publish_to_historian(self, to_publish_list): - # Called automatically by the BaseHistorian class when data is available to be - # published. + """ + Called automatically by the BaseHistorian class when data is available to be published. + + :param to_publish_list: list of dictionaries containing data to be added to the historian's data store + """ # This is run in a separate thread from the main agent thread. This means that # this function may block for a short period of time without fear of @@ -116,8 +122,7 @@ class __class_name__(BaseHistorian): # If everything is published in a single batch then self.report_all_handled # must be called. - _log.debug("publish_to_historian number of items: {}" - .format(len(to_publish_list))) + _log.debug("publish_to_historian number of items: {}".format(len(to_publish_list))) # Here it may be appropriate to check to see if our connection is still # active and restoring it as needed. @@ -140,18 +145,18 @@ class __class_name__(BaseHistorian): # The details of how to publish data will be data store specific. self.db_connection.publish(item["topic"], item["source"], item["value"], item["meta"]) self.report_handled(item) - except StandardError as e: + except Exception as e: _log.error("Failed to publish {}: {}".format(item, repr(e))) - #Example batch publish + # Example batch publish batch = [(item["topic"], item["source"], item["value"], item["meta"]) for item in to_publish_list] try: # The details of how to publish batch data will be data store specific. self.db_connection.batch_publish(batch) self.report_all_handled() - except StandardError: - _log.error("Failed to publish {}".format(repr(e))) + except Exception as e: + _log.error("Failed to publish {}".format(repr(e))) def manage_db_size(self, history_limit_timestamp, storage_limit_gb): """ @@ -169,8 +174,10 @@ class __class_name__(BaseHistorian): self.db_connection.delete(older_than=history_limit_timestamp) def historian_setup(self): - # Setup any connection needed for this historian. - # If a connection does not need to be maintained this function may be deleted. + """ + Setup any connection needed for this historian. + If a connection does not need to be maintained this function may be deleted. + """ # This is called from the same thread as publish_to_historian. @@ -186,14 +193,15 @@ class __class_name__(BaseHistorian): self.historian_teardown() try: self.db_connection = foo_db.connect(self.db_address, self.db_port) - except StandardError as e: + except Exception as e: _log.error("Failed to create data base connection: {}".format(e)) - def historian_teardown(self): - # Kill the connection if needed. - # If a connection does not need to be maintained this function may be deleted. - # This is called to shut down the connection before reconfiguration. + """ + Kill the connection if needed. + If a connection does not need to be maintained this function may be deleted. + This is called to shut down the connection before reconfiguration. + """ if self.db_connection is not None: self.db_connection.close() self.db_connection = None @@ -205,7 +213,6 @@ class __class_name__(BaseHistorian): """ return __version__ - # The following methods are for adding query support. This will allow other # agents to get data from the store and will allow this historian to act as # the platform.historian for VOLTTRON. @@ -239,7 +246,6 @@ class __class_name__(BaseHistorian): topic_name: {metadata_key:metadata_value, ...} ...} :rtype: dict - """ # The details of how to get meta data will be data store specific. # return self.db_connection.get_meta(topics) @@ -309,14 +315,12 @@ class __class_name__(BaseHistorian): :return: Results of the query :rtype: dict - """ # The details of how to get data will be data store specific. # return self.db_connection.get_data(topic, start, end, agg_type, # agg_period, skip, count, order) raise NotImplemented() - def query_aggregate_topics(self): """ This function is called by @@ -326,18 +330,18 @@ class __class_name__(BaseHistorian): :return: List of tuples containing (topic_name, aggregation_type, aggregation_time_period, metadata) :rtype: list - """ - [] + return [] def query_topics_by_pattern(self, topic_pattern): - """ Find the list of topics and its id for a given topic_pattern + """ + Find the list of topics and its id for a given topic_pattern - :return: returns list of dictionary object {topic_name:id}""" + :return: returns list of dictionary object {topic_name:id} + """ raise NotImplemented() - def main(): """Main method called to start the agent.""" utils.vip_main(historian, __identity__ diff --git a/volttron_data/agent_templates/historian/config b/volttron_data/agent_templates/historian/config index 9271706547..28419f6dc7 100644 --- a/volttron_data/agent_templates/historian/config +++ b/volttron_data/agent_templates/historian/config @@ -18,7 +18,7 @@ "gather_timing_data": false, # For benchmarking historians. "readonly": false, # Turn off data collection. - # Which catagories of data to collect. + # Which categories of data to collect. "capture_device_data": true, "capture_log_data": true, "capture_analysis_data": true, @@ -27,4 +27,4 @@ "message_publish_count": 10000, # How frequently to log the total number of publishes. "history_limit_days": null, # Number of days back to store data. Historian must implement manage_db_size. "storage_limit_gb": null # Limit to data size. Historian must implement manage_db_size. -} \ No newline at end of file +} diff --git a/volttrontesting/conftest.py b/volttrontesting/conftest.py index 68e5e611b1..c237dd2a35 100644 --- a/volttrontesting/conftest.py +++ b/volttrontesting/conftest.py @@ -1,6 +1,36 @@ import sys +import psutil +from volttron.platform import jsonapi from volttrontesting.fixtures.volttron_platform_fixtures import * # Add system path of the agent's directory sys.path.insert(0, os.path.abspath(os.path.dirname(__file__))) + +test_to_instance = {} + + +def pytest_runtest_logstart(nodeid, location): + before = 0 + print(f"test node: {nodeid} location: {location}") + for proc in psutil.process_iter(): + if 'volttron' in proc.name().lower(): + before += 1 + test_to_instance[nodeid] = dict(before=before, name=nodeid) + + +def pytest_runtest_logfinish(nodeid, location): + # After each test the nodid is the name of the test + after = 0 + print(f"test node: {nodeid} location: {location}") + for proc in psutil.process_iter(): + if 'volttron' in proc.name().lower(): + after += 1 + test_to_instance[nodeid]["after"] = after + + if test_to_instance[nodeid]["before"] == test_to_instance[nodeid]["after"]: + del test_to_instance[nodeid] + else: + with open("volttron_test_output_count.txt", 'w') as fp: + fp.write(jsonapi.dumps(test_to_instance, indent=2)) + # print(f"finished test nodeid: {nodeid} location: {location}") diff --git a/volttrontesting/fixtures/cert_fixtures.py b/volttrontesting/fixtures/cert_fixtures.py index 307719fcaf..2448244098 100644 --- a/volttrontesting/fixtures/cert_fixtures.py +++ b/volttrontesting/fixtures/cert_fixtures.py @@ -4,7 +4,10 @@ from types import SimpleNamespace from volttron.platform.certs import CertWrapper -from volttron.platform.certs import Certs +from volttron.platform.certs import Certs, _load_key + +#TODO: Combine cert_profile_1 and cert_profile_2 +# Verify whether we need it as dictionary or SimpleNamespace @contextlib.contextmanager @@ -50,3 +53,54 @@ def certs_profile_1(certificate_dir, fqdn=None, num_server_certs=1, num_client_c ns.client_certs.append(cert_ns) yield ns + + +def certs_profile_2(certificate_dir, fqdn=None, num_server_certs=1, num_client_certs=3): + """ + Profile 2 generates the specified number of server and client certificates + all signed by the same self-signed certificate. + + Usage: + + certs = certs_profile_1("/tmp/abc", 1, 2) + ... + + :param certificate_dir: + :return: ns + """ + + certs = Certs(certificate_dir) + data = {'C': 'US', + 'ST': 'Washington', + 'L': 'Richland', + 'O': 'pnnl', + 'OU': 'volttron_test', + 'CN': "myca"} + if not certs.ca_exists(): + ca_cert, ca_pk = certs.create_root_ca(**data) + # If the root ca already exists, get ca_cert and ca_pk from current root ca + else: + ca_cert = certs.cert(certs.root_ca_name) + ca_pk = _load_key(certs.private_key_file(certs.root_ca_name)) + # print(f"ca_cert: {ca_cert}") + # print(f"ca_pk: {ca_pk}") + # print(f"ca_pk_bytes: {certs.get_pk_bytes(certs.root_ca_name)}") + ns = dict(ca_cert=ca_cert, ca_key=ca_pk, ca_cert_file=certs.cert_file(certs.root_ca_name), + ca_key_file=certs.private_key_file(certs.root_ca_name), server_certs=[], client_certs=[]) + + for x in range(num_server_certs): + cert, key = certs.create_signed_cert_files(f"server{x}", cert_type="server", fqdn=fqdn) + + cert_ns = dict(key=key, cert=cert, cert_file=certs.cert_file(f"server{x}"), + key_file=certs.private_key_file(f"server{x}")) + + ns['server_certs'].append(cert_ns) + + for x in range(num_client_certs): + + cert, pk1 = certs.create_signed_cert_files(f"client{x}") + cert_ns = dict(key=pk1, cert=cert, cert_file=certs.cert_file(f"client{x}"), + key_file=certs.private_key_file(f"client{x}")) + ns['client_certs'].append(cert_ns) + + return ns diff --git a/volttrontesting/fixtures/docker_wrapper.py b/volttrontesting/fixtures/docker_wrapper.py index 133fceab9c..311674a93c 100644 --- a/volttrontesting/fixtures/docker_wrapper.py +++ b/volttrontesting/fixtures/docker_wrapper.py @@ -46,7 +46,7 @@ def create_container(image_name: str, ports: dict = None, env: dict = None, comm """ # Create docker client (Uses localhost as agent connection. - client = docker.from_env() + client = docker.from_env(version="auto") try: full_docker_image = image_name diff --git a/volttrontesting/fixtures/rmq_test_setup.py b/volttrontesting/fixtures/rmq_test_setup.py index 37f01d8652..4d7143fba8 100644 --- a/volttrontesting/fixtures/rmq_test_setup.py +++ b/volttrontesting/fixtures/rmq_test_setup.py @@ -152,7 +152,7 @@ def create_rmq_volttron_setup(vhome=None, ssl_auth=False, env=None, # instance name is the basename of the volttron home now. rabbit_config_obj.instance_name = instance_name - rabbit_config_obj.node_name = os.path.basename(vhome) + rabbit_config_obj.node_name = os.path.basename(os.path.dirname(vhome)) os.mkdir(os.path.join(vhome, "rmq_node_data")) @@ -168,7 +168,8 @@ def create_rmq_volttron_setup(vhome=None, ssl_auth=False, env=None, host, rabbit_config_obj.rabbitmq_config['mgmt-port'] = get_hostname_and_random_port(10000, 20000) host, rabbit_config_obj.rabbitmq_config['mgmt-port-ssl'] = get_hostname_and_random_port(10000, 20000) rabbit_config_obj.rabbitmq_config['host'] = host - rabbit_config_obj.rabbitmq_config['certificate-data']['common-name'] = '{}_root_ca'.format(rabbit_config_obj.instance_name) + rabbit_config_obj.rabbitmq_config['certificate-data']['common-name'] = \ + '{}_root_ca'.format(rabbit_config_obj.instance_name) from pprint import pprint print("RMQ Node Name: {} env: ".format(rabbit_config_obj.node_name)) diff --git a/volttrontesting/fixtures/volttron_platform_fixtures.py b/volttrontesting/fixtures/volttron_platform_fixtures.py index 2e8f2a7339..d9450f5459 100644 --- a/volttrontesting/fixtures/volttron_platform_fixtures.py +++ b/volttrontesting/fixtures/volttron_platform_fixtures.py @@ -1,7 +1,10 @@ import contextlib import os +from pathlib import Path import shutil +from typing import Optional +import psutil import pytest from volttron.platform import is_rabbitmq_available @@ -14,7 +17,9 @@ PRINT_LOG_ON_SHUTDOWN = False HAS_RMQ = is_rabbitmq_available() -rmq_skipif = pytest.mark.skipif(not HAS_RMQ, reason='RabbitMQ is not setup') +ci_skipif = pytest.mark.skipif(os.getenv('CI', None) == 'true', reason='SSL does not work in CI') +rmq_skipif = pytest.mark.skipif(not HAS_RMQ, + reason='RabbitMQ is not setup and/or SSL does not work in CI') def print_log(volttron_home): @@ -28,9 +33,9 @@ def print_log(volttron_home): print('NO LOG FILE AVAILABLE.') -def build_wrapper(vip_address, should_start=True, messagebus='zmq', remote_platform_ca=None, - instance_name=None, secure_agent_users=False, **kwargs): - +def build_wrapper(vip_address: str, should_start: bool = True, messagebus: str = 'zmq', + remote_platform_ca: Optional[str] = None, + instance_name: Optional[str] = None, secure_agent_users: bool = False, **kwargs): wrapper = PlatformWrapper(ssl_auth=kwargs.pop('ssl_auth', False), messagebus=messagebus, instance_name=instance_name, @@ -47,6 +52,13 @@ def cleanup_wrapper(wrapper): # wrapper.remove_all_agents() # Shutdown handles case where the platform hasn't started. wrapper.shutdown_platform() + if wrapper.p_process is not None: + if psutil.pid_exists(wrapper.p_process.pid): + proc = psutil.Process(wrapper.p_process.pid) + proc.terminate() + if not wrapper.debug_mode: + assert not Path(wrapper.volttron_home).parent.exists(), \ + f"{str(Path(wrapper.volttron_home).parent)} wasn't cleaned!" def cleanup_wrappers(platforms): @@ -56,7 +68,7 @@ def cleanup_wrappers(platforms): @pytest.fixture(scope="module", params=[dict(messagebus='zmq', ssl_auth=False), - #pytest.param(dict(messagebus='rmq', ssl_auth=True), marks=rmq_skipif), + # pytest.param(dict(messagebus='rmq', ssl_auth=True), marks=rmq_skipif), ]) def volttron_instance_msgdebug(request): print("building msgdebug instance") @@ -65,9 +77,10 @@ def volttron_instance_msgdebug(request): messagebus=request.param['messagebus'], ssl_auth=request.param['ssl_auth']) - yield wrapper - - cleanup_wrapper(wrapper) + try: + yield wrapper + finally: + cleanup_wrapper(wrapper) @pytest.fixture(scope="module") @@ -104,10 +117,19 @@ def volttron_instance(request, **kwargs): messagebus=request.param['messagebus'], ssl_auth=request.param['ssl_auth'], **kwargs) + wrapper_pid = wrapper.p_process.pid - yield wrapper - - cleanup_wrapper(wrapper) + try: + yield wrapper + except Exception as ex: + print(ex.args) + finally: + cleanup_wrapper(wrapper) + if not wrapper.debug_mode: + assert not Path(wrapper.volttron_home).exists() + # Final way to kill off the platform wrapper for the tests. + if psutil.pid_exists(wrapper_pid): + psutil.Process(wrapper_pid).kill() # Use this fixture to get more than 1 volttron instance for test. @@ -136,9 +158,10 @@ def test_function_that_uses_n_instances(get_volttron_instances): @return: function that can used to get any number of volttron instances for testing. """ - all_instances = [] + instances = [] def get_n_volttron_instances(n, should_start=True, **kwargs): + nonlocal instances get_n_volttron_instances.count = n instances = [] for i in range(0, n): @@ -149,12 +172,17 @@ def get_n_volttron_instances(n, should_start=True, **kwargs): ssl_auth=request.param['ssl_auth'], **kwargs) instances.append(wrapper) - instances = instances if n > 1 else instances[0] + if should_start: + for w in instances: + assert w.is_running() + # instances = instances if n > 1 else instances[0] # setattr(get_n_volttron_instances, 'instances', instances) - get_n_volttron_instances.instances = instances - return instances + get_n_volttron_instances.instances = instances if n > 1 else instances[0] + return instances if n > 1 else instances[0] def cleanup(): + nonlocal instances + print(f"My instances: {get_n_volttron_instances.count}") if isinstance(get_n_volttron_instances.instances, PlatformWrapper): print('Shutting down instance: {}'.format( get_n_volttron_instances.instances)) @@ -166,9 +194,11 @@ def cleanup(): get_n_volttron_instances.instances[i].volttron_home)) cleanup_wrapper(get_n_volttron_instances.instances[i]) - request.addfinalizer(cleanup) + try: + yield get_n_volttron_instances + finally: + cleanup() - return get_n_volttron_instances # Use this fixture when you want a single instance of volttron platform for zmq message bus @@ -213,6 +243,7 @@ def volttron_instance_rmq(request): @pytest.fixture(scope="module", params=[ dict(messagebus='zmq', ssl_auth=False), + pytest.param(dict(messagebus='zmq', ssl_auth=True), marks=ci_skipif), pytest.param(dict(messagebus='rmq', ssl_auth=True), marks=rmq_skipif), ]) def volttron_instance_web(request): @@ -235,13 +266,17 @@ def volttron_instance_web(request): cleanup_wrapper(wrapper) +#TODO: Add functionality for http use case for tests @pytest.fixture(scope="module", params=[ - dict(sink='zmq_web', source='zmq'), - pytest.param(dict(sink='rmq_web', source='zmq'), marks=rmq_skipif), - pytest.param(dict(sink='rmq_web', source='rmq'), marks=rmq_skipif), - pytest.param(dict(sink='zmq_web', source='rmq'), marks=rmq_skipif), + dict(sink='zmq_web', source='zmq', zmq_ssl=False), + pytest.param(dict(sink='zmq_web', source='zmq', zmq_ssl=True), marks=ci_skipif), + pytest.param(dict(sink='rmq_web', source='zmq', zmq_ssl=False), marks=rmq_skipif), + pytest.param(dict(sink='rmq_web', source='rmq', zmq_ssl=False), marks=rmq_skipif), + pytest.param(dict(sink='zmq_web', source='rmq', zmq_ssl=False), marks=rmq_skipif), + pytest.param(dict(sink='zmq_web', source='rmq', zmq_ssl=True), marks=rmq_skipif), + ]) def volttron_multi_messagebus(request): """ This fixture allows multiple two message bus types to be configured to work together @@ -254,6 +289,7 @@ def volttron_multi_messagebus(request): :param request: :return: """ + def get_volttron_multi_msgbus_instances(instance_name1=None, instance_name2=None): print("volttron_multi_messagebus source: {} sink: {}".format(request.param['source'], request.param['sink'])) @@ -264,7 +300,13 @@ def get_volttron_multi_msgbus_instances(instance_name1=None, instance_name2=None web_address = 'https://{hostname}:{port}'.format(hostname=hostname, port=port) messagebus = 'rmq' ssl_auth = True + elif request.param['sink'] == 'zmq_web' and request.param['zmq_ssl'] is True: + hostname, port = get_hostname_and_random_port() + web_address = 'https://{hostname}:{port}'.format(hostname=hostname, port=port) + messagebus = 'zmq' + ssl_auth = True else: + hostname, port = get_hostname_and_random_port() web_address = "http://{}".format(get_rand_ip_and_port()) messagebus = 'zmq' ssl_auth = False @@ -275,6 +317,7 @@ def get_volttron_multi_msgbus_instances(instance_name1=None, instance_name2=None bind_web_address=web_address, volttron_central_address=web_address, instance_name="volttron1") + # sink.web_admin_api.create_web_admin("admin", "admin") source_address = get_rand_vip() messagebus = 'zmq' @@ -287,6 +330,13 @@ def get_volttron_multi_msgbus_instances(instance_name1=None, instance_name2=None if sink.messagebus == 'rmq': # sink_ca_file = sink.certsobj.cert_file(sink.certsobj.root_ca_name) + source = build_wrapper(source_address, + ssl_auth=ssl_auth, + messagebus=messagebus, + volttron_central_address=sink.bind_web_address, + remote_platform_ca=sink.certsobj.cert_file(sink.certsobj.root_ca_name), + instance_name='volttron2') + elif sink.messagebus == 'zmq' and sink.ssl_auth is True: source = build_wrapper(source_address, ssl_auth=ssl_auth, messagebus=messagebus, @@ -304,21 +354,26 @@ def get_volttron_multi_msgbus_instances(instance_name1=None, instance_name2=None return source, sink def cleanup(): - cleanup_wrapper(get_volttron_multi_msgbus_instances.source) - cleanup_wrapper(get_volttron_multi_msgbus_instances.sink) - + # Handle the case where source or sink fail to be created + try: + cleanup_wrapper(get_volttron_multi_msgbus_instances.source) + except AttributeError as e: + print(e) + try: + cleanup_wrapper(get_volttron_multi_msgbus_instances.sink) + except AttributeError as e: + print(e) request.addfinalizer(cleanup) return get_volttron_multi_msgbus_instances - @contextlib.contextmanager def get_test_volttron_home(messagebus: str, web_https=False, web_http=False, has_vip=True, volttron_home: str = None, config_params: dict = None, env_options: dict = None): """ - Create a full volttronn_Home test environment with all of the options available in the environment + Create a full volttronn_home test environment with all of the options available in the environment (os.environ) and configuration file (volttron_home/config) in order to test from. @param messagebus: @@ -432,5 +487,5 @@ def get_test_volttron_home(messagebus: str, web_https=False, web_http=False, has finally: os.environ.clear() os.environ.update(env_cpy) - if not os.environ.get("DEBUG", 0) != 1 and not os.environ.get("DEBUG_MODE",0): + if not os.environ.get("DEBUG", 0) != 1 and not os.environ.get("DEBUG_MODE", 0): shutil.rmtree(volttron_home, ignore_errors=True) diff --git a/volttrontesting/platform/auth_tests/conftest.py b/volttrontesting/platform/auth_tests/conftest.py new file mode 100644 index 0000000000..db512664fc --- /dev/null +++ b/volttrontesting/platform/auth_tests/conftest.py @@ -0,0 +1,7 @@ +def assert_auth_entries_same(e1, e2): + for field in ['domain', 'address', 'user_id', 'credentials', 'comments', + 'enabled']: + assert e1[field] == e2[field] + for field in ['roles', 'groups']: + assert set(e1[field]) == set(e2[field]) + assert e1['capabilities'] == e2['capabilities'] diff --git a/volttrontesting/platform/auth_test.py b/volttrontesting/platform/auth_tests/test_auth.py similarity index 90% rename from volttrontesting/platform/auth_test.py rename to volttrontesting/platform/auth_tests/test_auth.py index 7c81057c57..38525c173c 100644 --- a/volttrontesting/platform/auth_test.py +++ b/volttrontesting/platform/auth_tests/test_auth.py @@ -4,27 +4,12 @@ import pytest from volttron.platform import jsonrpc -from volttron.platform import keystore from volttrontesting.utils.utils import poll_gevent_sleep from volttron.platform.vip.agent.errors import VIPError from volttron.platform import jsonapi from volttron.platform.auth import AuthFile -def build_agent(platform, identity): - """Build an agent, configure its keys and return the agent.""" - keys = keystore.KeyStore(os.path.join(platform.volttron_home, - identity + '.keys')) - keys.generate() - agent = platform.build_agent(identity=identity, - serverkey=platform.serverkey, - publickey=keys.public, - secretkey=keys.secret) - # Make publickey easily accessible for these tests - agent.publickey = keys.public - return agent - - @pytest.fixture def build_two_test_agents(volttron_instance): """Returns two agents for testing authorization @@ -32,8 +17,8 @@ def build_two_test_agents(volttron_instance): The first agent is the "RPC callee." The second agent is the unauthorized "RPC caller." """ - agent1 = build_agent(volttron_instance, 'agent1') - agent2 = build_agent(volttron_instance, 'agent2') + agent1 = volttron_instance.build_agent(identity='agent1') + agent2 = volttron_instance.build_agent(identity='agent2') gevent.sleep(1) agent1.foo = lambda x: x @@ -42,14 +27,17 @@ def build_two_test_agents(volttron_instance): agent1.vip.rpc.export(method=agent1.foo) agent1.vip.rpc.allow(agent1.foo, 'can_call_foo') - yield agent1, agent2 - - agent1.core.stop() - agent2.core.stop() - auth_file = AuthFile(os.path.join(volttron_instance.volttron_home, 'auth.json')) - allow_entries = auth_file.read_allow_entries() - auth_file.remove_by_indices(list(range(3, len(allow_entries)))) - gevent.sleep(0.5) + try: + yield agent1, agent2 + finally: + agent1.core.stop() + agent2.core.stop() + auth_file = AuthFile(os.path.join(volttron_instance.volttron_home, 'auth.json')) + allow_entries = auth_file.read_allow_entries() + auth_file.remove_by_indices(list(range(3, len(allow_entries)))) + # TODO if we have to wait for auth propagation anyways why do we create new agents for each test case + # we should just update capabilities, at least we will save on agent creation and tear down time + gevent.sleep(3) @pytest.fixture @@ -60,11 +48,12 @@ def build_agents_with_capability_args(volttron_instance): The first agent is the "RPC callee." The second agent is the unauthorized "RPC caller." """ - agent1 = build_agent(volttron_instance, 'agent1') - gevent.sleep(1) - agent2 = build_agent(volttron_instance, 'agent2') + # Can't call the fixture directly so build our own agent here. + agent1 = volttron_instance.build_agent(identity='agent1') + agent2 = volttron_instance.build_agent(identity='agent2') gevent.sleep(1) + agent1.foo = lambda x: x agent1.foo.__name__ = 'foo' @@ -86,8 +75,6 @@ def build_agents_with_capability_args(volttron_instance): gevent.sleep(0.5) - - @pytest.fixture def build_protected_pubsub(volttron_instance, build_two_agents_pubsub_agents): @@ -102,11 +89,11 @@ def protected_pubsub_fn(topic, capabilities, topic_regex=None, topic_file = os.path.join(volttron_instance.volttron_home, 'protected_topics.json') with open(topic_file, 'w') as f: jsonapi.dump(topic_dict, f) - gevent.sleep(.5) + gevent.sleep(1) if add_capabilities: volttron_instance.add_capabilities(agent2.publickey, capabilities) - gevent.sleep(.2) + gevent.sleep(2) return {'agent1': agent2, 'agent2': agent2, 'topic': topic, 'instance': volttron_instance, 'messages': msgs, diff --git a/volttrontesting/platform/auth_control_test.py b/volttrontesting/platform/auth_tests/test_auth_control.py similarity index 70% rename from volttrontesting/platform/auth_control_test.py rename to volttrontesting/platform/auth_tests/test_auth_control.py index db8913e14e..4d91722f18 100644 --- a/volttrontesting/platform/auth_control_test.py +++ b/volttrontesting/platform/auth_tests/test_auth_control.py @@ -1,9 +1,11 @@ -import gevent import os import re import subprocess import pytest from mock import MagicMock + +from volttrontesting.platform.auth_tests.conftest import assert_auth_entries_same +from volttrontesting.utils.platformwrapper import with_os_environ from volttrontesting.utils.utils import AgentMock from volttron.platform.vip.agent import Agent from volttron.platform.auth import AuthService @@ -62,27 +64,27 @@ def test_auth(): yield auth -def test_get_authorization_failures(mock_auth_service, test_auth): +def test_get_authorization_pending(mock_auth_service, test_auth): mock_auth = mock_auth_service auth = test_auth - mock_auth._update_auth_failures( + mock_auth._update_auth_pending( auth['domain'], auth['address'], auth['mechanism'], auth['credentials'], auth['user_id']) - auth_failure = mock_auth.get_authorization_failures()[0] - assert auth['domain'] == auth_failure['domain'] - assert auth['address'] == auth_failure['address'] - assert auth['mechanism'] == auth_failure['mechanism'] - assert auth['credentials'] == auth_failure['credentials'] - assert auth['user_id'] == auth_failure['user_id'] - assert auth_failure['retries'] == 1 + auth_pending = mock_auth.get_authorization_pending()[0] + assert auth['domain'] == auth_pending['domain'] + assert auth['address'] == auth_pending['address'] + assert auth['mechanism'] == auth_pending['mechanism'] + assert auth['credentials'] == auth_pending['credentials'] + assert auth['user_id'] == auth_pending['user_id'] + assert auth_pending['retries'] == 1 @pytest.mark.control def test_approve_authorization_failure(mock_auth_service, test_auth): mock_auth = mock_auth_service auth = test_auth - mock_auth._update_auth_failures( + mock_auth._update_auth_pending( auth['domain'], auth['address'], auth['mechanism'], auth['credentials'], auth['user_id']) - assert len(mock_auth._auth_failures) == 1 + assert len(mock_auth._auth_pending) == 1 mock_auth.approve_authorization_failure(auth['user_id']) assert len(mock_auth.auth_entries) == 0 @@ -90,16 +92,16 @@ def test_approve_authorization_failure(mock_auth_service, test_auth): mock_auth.read_auth_file() assert len(mock_auth.auth_entries) == 1 assert len(mock_auth._auth_approved) == 1 - assert len(mock_auth._auth_failures) == 0 + assert len(mock_auth._auth_pending) == 0 @pytest.mark.control def test_deny_approved_authorization(mock_auth_service, test_auth): mock_auth = mock_auth_service auth = test_auth - mock_auth._update_auth_failures( + mock_auth._update_auth_pending( auth['domain'], auth['address'], auth['mechanism'], auth['credentials'], auth['user_id']) - assert len(mock_auth._auth_failures) == 1 + assert len(mock_auth._auth_pending) == 1 assert len(mock_auth._auth_approved) == 0 mock_auth.approve_authorization_failure(auth['user_id']) @@ -108,10 +110,9 @@ def test_deny_approved_authorization(mock_auth_service, test_auth): assert len(mock_auth._auth_approved) == 1 mock_auth.deny_authorization_failure(auth['user_id']) + mock_auth.read_auth_file() assert len(mock_auth._auth_denied) == 1 assert len(mock_auth._auth_approved) == 0 - - mock_auth.read_auth_file() assert len(mock_auth.auth_entries) == 0 @@ -119,22 +120,21 @@ def test_deny_approved_authorization(mock_auth_service, test_auth): def test_delete_approved_authorization(mock_auth_service, test_auth): mock_auth = mock_auth_service auth = test_auth - mock_auth._update_auth_failures( + mock_auth._update_auth_pending( auth['domain'], auth['address'], auth['mechanism'], auth['credentials'], auth['user_id']) - assert len(mock_auth._auth_failures) == 1 + assert len(mock_auth._auth_pending) == 1 assert len(mock_auth._auth_approved) == 0 mock_auth.approve_authorization_failure(auth['user_id']) - assert len(mock_auth._auth_approved) == 1 - assert len(mock_auth._auth_failures) == 0 - assert len(mock_auth.auth_entries) == 0 mock_auth.read_auth_file() + assert len(mock_auth._auth_approved) == 1 + assert len(mock_auth._auth_pending) == 0 assert len(mock_auth.auth_entries) == 1 mock_auth.delete_authorization_failure(auth['user_id']) - assert len(mock_auth._auth_approved) == 0 mock_auth.read_auth_file() + assert len(mock_auth._auth_approved) == 0 assert len(mock_auth.auth_entries) == 0 @@ -142,19 +142,20 @@ def test_delete_approved_authorization(mock_auth_service, test_auth): def test_approve_denied_authorization(mock_auth_service, test_auth): mock_auth = mock_auth_service auth = test_auth - mock_auth._update_auth_failures( + mock_auth._update_auth_pending( auth['domain'], auth['address'], auth['mechanism'], auth['credentials'], auth['user_id']) - assert len(mock_auth._auth_failures) == 1 + assert len(mock_auth._auth_pending) == 1 assert len(mock_auth._auth_denied) == 0 mock_auth.deny_authorization_failure(auth['user_id']) + mock_auth.read_auth_file() assert len(mock_auth._auth_denied) == 1 - assert len(mock_auth._auth_failures) == 0 + assert len(mock_auth._auth_pending) == 0 mock_auth.approve_authorization_failure(auth['user_id']) assert len(mock_auth.auth_entries) == 0 - assert len(mock_auth._auth_approved) == 1 mock_auth.read_auth_file() + assert len(mock_auth._auth_approved) == 1 assert len(mock_auth.auth_entries) == 1 assert len(mock_auth._auth_denied) == 0 @@ -163,26 +164,28 @@ def test_approve_denied_authorization(mock_auth_service, test_auth): def test_deny_authorization_failure(mock_auth_service, test_auth): mock_auth = mock_auth_service auth = test_auth - mock_auth._update_auth_failures( + mock_auth._update_auth_pending( auth['domain'], auth['address'], auth['mechanism'], auth['credentials'], auth['user_id']) - assert len(mock_auth._auth_failures) == 1 + assert len(mock_auth._auth_pending) == 1 assert len(mock_auth._auth_denied) == 0 mock_auth.deny_authorization_failure(auth['user_id']) + mock_auth.read_auth_file() assert len(mock_auth._auth_denied) == 1 - assert len(mock_auth._auth_failures) == 0 + assert len(mock_auth._auth_pending) == 0 @pytest.mark.control def test_delete_authorization_failure(mock_auth_service, test_auth): mock_auth = mock_auth_service auth = test_auth - mock_auth._update_auth_failures( + mock_auth._update_auth_pending( auth['domain'], auth['address'], auth['mechanism'], auth['credentials'], auth['user_id']) - assert len(mock_auth._auth_failures) == 1 + assert len(mock_auth._auth_pending) == 1 assert len(mock_auth._auth_denied) == 0 mock_auth.delete_authorization_failure(auth['user_id']) - assert len(mock_auth._auth_failures) == 0 + + assert len(mock_auth._auth_pending) == 0 assert len(mock_auth._auth_denied) == 0 @@ -190,29 +193,24 @@ def test_delete_authorization_failure(mock_auth_service, test_auth): def test_delete_denied_authorization(mock_auth_service, test_auth): mock_auth = mock_auth_service auth = test_auth - mock_auth._update_auth_failures( + mock_auth._update_auth_pending( auth['domain'], auth['address'], auth['mechanism'], auth['credentials'], auth['user_id']) - assert len(mock_auth._auth_failures) == 1 + assert len(mock_auth._auth_pending) == 1 assert len(mock_auth._auth_denied) == 0 mock_auth.deny_authorization_failure(auth['user_id']) + mock_auth.read_auth_file() assert len(mock_auth._auth_denied) == 1 - assert len(mock_auth._auth_failures) == 0 + assert len(mock_auth._auth_pending) == 0 mock_auth.delete_authorization_failure(auth['user_id']) + mock_auth.read_auth_file() assert len(mock_auth._auth_denied) == 0 - -def get_env(platform): - env = os.environ.copy() - env['VOLTTRON_HOME'] = platform.volttron_home - return env - - def auth_list(platform): - env = get_env(platform) - return subprocess.check_output(['volttron-ctl', 'auth', 'list'], env=env, universal_newlines=True) + with with_os_environ(platform.env): + return subprocess.check_output(['volttron-ctl', 'auth', 'list'], env=platform.env, universal_newlines=True) def auth_list_json(platform): @@ -242,11 +240,11 @@ def entry_to_input_string(domain='', address='', user_id='', capabilities='', def auth_add(platform, entry): - env = get_env(platform) - p = subprocess.Popen(['volttron-ctl', 'auth', 'add'], env=env, - stdin=subprocess.PIPE, universal_newlines=True) - p.communicate(input=entry_to_input_string(**entry.__dict__)) - assert p.returncode == 0 + with with_os_environ(platform.env): + p = subprocess.Popen(['volttron-ctl', 'auth', 'add'], env=platform.env, + stdin=subprocess.PIPE, universal_newlines=True) + p.communicate(input=entry_to_input_string(**entry.__dict__)) + assert p.returncode == 0 def auth_add_cmd_line(platform, entry): @@ -265,35 +263,26 @@ def auth_add_cmd_line(platform, entry): if not enabled: args.append('--disabled') - env = get_env(platform) - p = subprocess.Popen(args, env=env, stdin=subprocess.PIPE, universal_newlines=True) - p.communicate() - assert p.returncode == 0 + with with_os_environ(platform.env): + p = subprocess.Popen(args, env=platform.env, stdin=subprocess.PIPE, universal_newlines=True) + p.communicate() + assert p.returncode == 0 def auth_remove(platform, index): - env = get_env(platform) - p = subprocess.Popen(['volttron-ctl', 'auth', 'remove', str(index)], env=env, - stdin=subprocess.PIPE, universal_newlines=True) - p.communicate(input='Y\n') - assert p.returncode == 0 + with with_os_environ(platform.env): + p = subprocess.Popen(['volttron-ctl', 'auth', 'remove', str(index)], env=platform.env, + stdin=subprocess.PIPE, universal_newlines=True) + p.communicate(input='Y\n') + assert p.returncode == 0 def auth_update(platform, index, **kwargs): - env = get_env(platform) - p = subprocess.Popen(['volttron-ctl', 'auth', 'update', str(index)], env=env, - stdin=subprocess.PIPE, universal_newlines=True) - p.communicate(input=entry_to_input_string(**kwargs)) - assert p.returncode == 0 - - -def assert_auth_entries_same(e1, e2): - for field in ['domain', 'address', 'user_id', 'credentials', 'comments', - 'enabled']: - assert e1[field] == e2[field] - for field in ['roles', 'groups']: - assert set(e1[field]) == set(e2[field]) - assert e1['capabilities'] == e2['capabilities'] + with with_os_environ(platform.env): + p = subprocess.Popen(['volttron-ctl', 'auth', 'update', str(index)], env=platform.env, + stdin=subprocess.PIPE, universal_newlines=True) + p.communicate(input=entry_to_input_string(**kwargs)) + assert p.returncode == 0 @pytest.mark.control @@ -416,12 +405,12 @@ def _run_group_or_role_cmds(platform, add_fn, list_fn, update_fn, remove_fn): def _add_group_or_role(platform, cmd, name, list_): - args = ['volttron-ctl', 'auth', cmd, name] - args.extend(list_) - env = get_env(platform) - p = subprocess.Popen(args, env=env, stdin=subprocess.PIPE, universal_newlines=True) - p.communicate() - assert p.returncode == 0 + with with_os_environ(platform.env): + args = ['volttron-ctl', 'auth', cmd, name] + args.extend(list_) + p = subprocess.Popen(args, env=platform.env, stdin=subprocess.PIPE, universal_newlines=True) + p.communicate() + assert p.returncode == 0 def _add_group(platform, group, roles): @@ -433,19 +422,19 @@ def _add_role(platform, role, capabilities): def _list_groups_or_roles(platform, cmd): - env = get_env(platform) - output = subprocess.check_output(['volttron-ctl', 'auth', cmd], - env=env, universal_newlines=True) - # For these tests don't use names that contain space, [, comma, or ' - output = output.replace('[', '').replace("'", '').replace(']', '') - output = output.replace(',', '') - lines = output.split('\n') + with with_os_environ(platform.env): + output = subprocess.check_output(['volttron-ctl', 'auth', cmd], + env=platform.env, universal_newlines=True) + # For these tests don't use names that contain space, [, comma, or ' + output = output.replace('[', '').replace("'", '').replace(']', '') + output = output.replace(',', '') + lines = output.split('\n') - dict_ = {} - for line in lines[2:-1]: # skip two header lines and last (empty) line - list_ = ' '.join(line.split()).split() # combine multiple spaces - dict_[list_[0]] = list_[1:] - return dict_ + dict_ = {} + for line in lines[2:-1]: # skip two header lines and last (empty) line + list_ = ' '.join(line.split()).split() # combine multiple spaces + dict_[list_[0]] = list_[1:] + return dict_ def _list_groups(platform): @@ -457,14 +446,14 @@ def _list_roles(platform): def _update_group_or_role(platform, cmd, key, values, remove): - args = ['volttron-ctl', 'auth', cmd, key] - args.extend(values) - if remove: - args.append('--remove') - env = get_env(platform) - p = subprocess.Popen(args, env=env, stdin=subprocess.PIPE, universal_newlines=True) - p.communicate() - assert p.returncode == 0 + with with_os_environ(platform.env): + args = ['volttron-ctl', 'auth', cmd, key] + args.extend(values) + if remove: + args.append('--remove') + p = subprocess.Popen(args, env=platform.env, stdin=subprocess.PIPE, universal_newlines=True) + p.communicate() + assert p.returncode == 0 def _update_group(platform, group, roles, remove=False): @@ -476,11 +465,11 @@ def _update_role(platform, role, caps, remove=False): def _remove_group_or_role(platform, cmd, key): - args = ['volttron-ctl', 'auth', cmd, key] - env = get_env(platform) - p = subprocess.Popen(args, env=env, stdin=subprocess.PIPE, universal_newlines=True) - p.communicate() - assert p.returncode == 0 + with with_os_environ(platform.env): + args = ['volttron-ctl', 'auth', cmd, key] + p = subprocess.Popen(args, env=platform.env, stdin=subprocess.PIPE, universal_newlines=True) + p.communicate() + assert p.returncode == 0 def _remove_group(platform, group): @@ -507,32 +496,32 @@ def test_known_host_cmds(volttron_instance): def _add_known_host(platform, host, serverkey): - args = ['volttron-ctl', 'auth', 'add-known-host'] - args.extend(['--host', host]) - args.extend(['--serverkey', serverkey]) - env = get_env(platform) - p = subprocess.Popen(args, env=env, stdin=subprocess.PIPE, universal_newlines=True) - p.communicate() - assert p.returncode == 0 + with with_os_environ(platform.env): + args = ['volttron-ctl', 'auth', 'add-known-host'] + args.extend(['--host', host]) + args.extend(['--serverkey', serverkey]) + p = subprocess.Popen(args, env=platform.env, stdin=subprocess.PIPE, universal_newlines=True) + p.communicate() + assert p.returncode == 0 def _list_known_hosts(platform): - env = get_env(platform) - output = subprocess.check_output(['volttron-ctl', 'auth', - 'list-known-hosts'], env=env, universal_newlines=True) + with with_os_environ(platform.env): + output = subprocess.check_output(['volttron-ctl', 'auth', + 'list-known-hosts'], env=platform.env, universal_newlines=True) - lines = output.split('\n') - dict_ = {} - for line in lines[2:-1]: # skip two header lines and last (empty) line - host, pubkey = ' '.join(line.split()).split() # combine multiple spaces - dict_[host] = pubkey - return dict_ + lines = output.split('\n') + dict_ = {} + for line in lines[2:-1]: # skip two header lines and last (empty) line + host, pubkey = ' '.join(line.split()).split() # combine multiple spaces + dict_[host] = pubkey + return dict_ def _remove_known_host(platform, host): - args = ['volttron-ctl', 'auth', 'remove-known-host', host] - env = get_env(platform) - p = subprocess.Popen(args, env=env, stdin=subprocess.PIPE, universal_newlines=True) - p.communicate() - assert p.returncode == 0 + with with_os_environ(platform.env): + args = ['volttron-ctl', 'auth', 'remove-known-host', host] + p = subprocess.Popen(args, env=platform.env, stdin=subprocess.PIPE, universal_newlines=True) + p.communicate() + assert p.returncode == 0 diff --git a/volttrontesting/platform/auth_file_test.py b/volttrontesting/platform/auth_tests/test_auth_file.py similarity index 98% rename from volttrontesting/platform/auth_file_test.py rename to volttrontesting/platform/auth_tests/test_auth_file.py index b5157a6960..ab74b8e99c 100644 --- a/volttrontesting/platform/auth_file_test.py +++ b/volttrontesting/platform/auth_tests/test_auth_file.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- {{{ # vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et: # -# Copyright 2019, Battelle Memorial Institute. +# Copyright 2020, Battelle Memorial Institute. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -46,9 +46,10 @@ AuthFileEntryAlreadyExists, AuthFileUserIdAlreadyExists, AuthEntryInvalid) -from volttrontesting.platform.auth_control_test import assert_auth_entries_same from volttron.platform.agent.known_identities import VOLTTRON_CENTRAL_PLATFORM, CONTROL from volttron.platform import jsonapi +from volttrontesting.platform.auth_tests.conftest import assert_auth_entries_same + @pytest.fixture(scope='function') def auth_file_platform_tuple(volttron_instance): @@ -264,7 +265,7 @@ def test_upgrade_file_verison_0_to_1_2(tmpdir_factory): fp.write(jsonapi.dumps(version0, indent=2)) upgraded = AuthFile(filename) - entries, groups, roles = upgraded.read() + entries, denied_entries, groups, roles = upgraded.read() assert groups == version0['groups'] assert roles == version0['roles'] assert len(entries) == 1 diff --git a/volttrontesting/platform/base_market_agent/test_market_registration.py b/volttrontesting/platform/base_market_agent/test_market_registration.py index ab4ae4d2a9..d6f36315ec 100644 --- a/volttrontesting/platform/base_market_agent/test_market_registration.py +++ b/volttrontesting/platform/base_market_agent/test_market_registration.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- {{{ # vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et: # -# Copyright 2019, Battelle Memorial Institute. +# Copyright 2020, Battelle Memorial Institute. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/volttrontesting/platform/base_market_agent/test_point.py b/volttrontesting/platform/base_market_agent/test_point.py index c3fae335f1..d2b79bfafe 100644 --- a/volttrontesting/platform/base_market_agent/test_point.py +++ b/volttrontesting/platform/base_market_agent/test_point.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- {{{ # vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et: # -# Copyright 2019, Battelle Memorial Institute. +# Copyright 2020, Battelle Memorial Institute. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/volttrontesting/platform/base_market_agent/test_poly_line.py b/volttrontesting/platform/base_market_agent/test_poly_line.py index 6e74561482..e711093162 100644 --- a/volttrontesting/platform/base_market_agent/test_poly_line.py +++ b/volttrontesting/platform/base_market_agent/test_poly_line.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- {{{ # vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et: # -# Copyright 2019, Battelle Memorial Institute. +# Copyright 2020, Battelle Memorial Institute. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/volttrontesting/platform/base_market_agent/test_poly_line_factory.py b/volttrontesting/platform/base_market_agent/test_poly_line_factory.py index 74f36f4f87..075bcb1270 100644 --- a/volttrontesting/platform/base_market_agent/test_poly_line_factory.py +++ b/volttrontesting/platform/base_market_agent/test_poly_line_factory.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- {{{ # vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et: # -# Copyright 2019, Battelle Memorial Institute. +# Copyright 2020, Battelle Memorial Institute. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/volttrontesting/platform/connection_test.py b/volttrontesting/platform/connection_test.py index be7b9c4fc6..6c6941bbc2 100644 --- a/volttrontesting/platform/connection_test.py +++ b/volttrontesting/platform/connection_test.py @@ -1,6 +1,6 @@ import gevent import pytest -from volttron.platform.agent.known_identities import CONTROL, MASTER_WEB, AUTH, CONFIGURATION_STORE +from volttron.platform.agent.known_identities import CONTROL, PLATFORM_WEB, AUTH, CONFIGURATION_STORE from volttron.platform.keystore import KeyStore from volttron.platform.vip.agent.connection import Connection from volttron.platform.vip.agent.utils import build_connection diff --git a/volttrontesting/platform/dbutils/test_backup_database.py b/volttrontesting/platform/dbutils/test_backup_database.py new file mode 100644 index 0000000000..a87ce62084 --- /dev/null +++ b/volttrontesting/platform/dbutils/test_backup_database.py @@ -0,0 +1,271 @@ +import os +import pytest + +from gevent import subprocess +from datetime import datetime +from pytz import UTC + +from volttron.platform.agent.base_historian import BackupDatabase, BaseHistorian + +SIZE_LIMIT = 1000 # the default submit_size_limit for BaseHistorianAgents + + +def test_get_outstanding_to_publish_should_return_records( + backup_database, new_publish_list_unique +): + init_db(backup_database, new_publish_list_unique) + expected_records = [] + for idx in range(1000): + data = { + "_id": idx + 1, + "headers": {}, + "meta": {}, + "source": "foobar_source", + "timestamp": datetime(2020, 6, 1, 12, 31, tzinfo=UTC), + "topic": f"foobar_topic{idx}", + "value": idx, + } + expected_records.append(data) + + actual_records = backup_database.get_outstanding_to_publish(SIZE_LIMIT) + + assert actual_records == expected_records + assert backup_database._record_count == len(expected_records) + + +def test_get_outstanding_to_publish_should_return_unique_records_when_duplicates_in_db( + backup_database, new_publish_list_dupes +): + init_db_with_dupes(backup_database, new_publish_list_dupes) + expected_records = [ + { + "_id": 1, + "headers": {}, + "meta": {}, + "source": "dupesource", + "timestamp": datetime(2020, 6, 1, 12, 30, 59, tzinfo=UTC), + "topic": "dupetopic", + "value": 123, + } + ] + for x in range(4, 1000): + data = { + "_id": x, + "headers": {}, + "meta": {}, + "source": "foobar_source", + "timestamp": datetime(2020, 6, 1, 12, 31, tzinfo=UTC), + "topic": f"foobar_topic{x}", + "value": x, + } + expected_records.append(data) + + actual_records = backup_database.get_outstanding_to_publish(SIZE_LIMIT) + + assert actual_records == expected_records + assert backup_database._record_count == len(new_publish_list_dupes) + + +def test_remove_successfully_published_should_clear_cache( + backup_database, new_publish_list_unique +): + init_db(backup_database, new_publish_list_unique) + + assert backup_database._record_count == len(new_publish_list_unique) + + orig_record_count = backup_database._record_count + + backup_database.get_outstanding_to_publish(SIZE_LIMIT) + backup_database.remove_successfully_published(set((None,)), SIZE_LIMIT) + + assert get_all_data("outstanding") == [] + current_record_count = backup_database._record_count + assert current_record_count < orig_record_count + assert current_record_count == 0 + + +def test_remove_successfully_published_should_keep_duplicates_in_cache( + backup_database, new_publish_list_dupes +): + init_db_with_dupes(backup_database, new_publish_list_dupes) + orig_record_count = backup_database._record_count + + assert len(get_all_data("outstanding")) == len(new_publish_list_dupes) + + expected_cache_after_update = [ + "2|2020-06-01 12:30:59|dupesource|1|456|{}", + "3|2020-06-01 12:30:59|dupesource|1|789|{}", + ] + + backup_database.get_outstanding_to_publish(SIZE_LIMIT) + backup_database.remove_successfully_published(set((None,)), SIZE_LIMIT) + + assert get_all_data("outstanding") == expected_cache_after_update + current_record_count = backup_database._record_count + assert current_record_count < orig_record_count + assert current_record_count == 2 + + +def test_get_outstanding_to_publish_should_return_unique_records_on_multiple_trans( + backup_database, new_publish_list_dupes +): + init_db_with_dupes(backup_database, new_publish_list_dupes) + assert len(get_all_data("outstanding")) == len(new_publish_list_dupes) + + # First transaction + expected_records = [ + { + "_id": 1, + "headers": {}, + "meta": {}, + "source": "dupesource", + "timestamp": datetime(2020, 6, 1, 12, 30, 59, tzinfo=UTC), + "topic": "dupetopic", + "value": 123, + } + ] + for x in range(4, 1000): + data = { + "_id": x, + "headers": {}, + "meta": {}, + "source": "foobar_source", + "timestamp": datetime(2020, 6, 1, 12, 31, tzinfo=UTC), + "topic": f"foobar_topic{x}", + "value": x, + } + expected_records.append(data) + + actual_records = backup_database.get_outstanding_to_publish(SIZE_LIMIT) + + assert actual_records == expected_records + + # Second transaction + backup_database.remove_successfully_published(set((None,)), SIZE_LIMIT) + expected_records = [ + { + "_id": 2, + "headers": {}, + "meta": {}, + "source": "dupesource", + "timestamp": datetime(2020, 6, 1, 12, 30, 59, tzinfo=UTC), + "topic": "dupetopic", + "value": 456, + } + ] + + actual_records = backup_database.get_outstanding_to_publish(SIZE_LIMIT) + + assert actual_records == expected_records + + # Third transaction + backup_database.remove_successfully_published(set((None,)), SIZE_LIMIT) + expected_records = [ + { + "_id": 3, + "headers": {}, + "meta": {}, + "source": "dupesource", + "timestamp": datetime(2020, 6, 1, 12, 30, 59, tzinfo=UTC), + "topic": "dupetopic", + "value": 789, + } + ] + + actual_records = backup_database.get_outstanding_to_publish(SIZE_LIMIT) + + assert actual_records == expected_records + + # Fourth Transaction + backup_database.remove_successfully_published(set((None,)), SIZE_LIMIT) + assert backup_database.get_outstanding_to_publish(SIZE_LIMIT) == [] + + +def init_db_with_dupes(backup_database, new_publish_list_dupes): + backup_database.backup_new_data(new_publish_list_dupes) + + +def init_db(backup_database, new_publish_list_unique): + backup_database.backup_new_data(new_publish_list_unique) + + +@pytest.fixture(scope="module") +def new_publish_list_unique(): + publish_list_unique = list() + for idx in range(1000): + data = { + "source": "foobar_source", + "topic": f"foobar_topic{idx}", + "meta": {}, + "readings": [("2020-06-01 12:31:00", idx)], + "headers": {}, + } + publish_list_unique.append(data) + + return tuple(publish_list_unique) + + +@pytest.fixture(scope="module") +def new_publish_list_dupes(): + dupes = [ + { + "source": "dupesource", + "topic": "dupetopic", + "meta": {}, + "readings": [("2020-06-01 12:30:59", 123)], + "headers": {}, + }, + { + "source": "dupesource", + "topic": "dupetopic", + "meta": {}, + "readings": [("2020-06-01 12:30:59", 456)], + "headers": {}, + }, + { + "source": "dupesource", + "topic": "dupetopic", + "meta": {}, + "readings": [("2020-06-01 12:30:59", 789)], + "headers": {}, + }, + ] + for idx in range(4, 1000): + data = { + "source": "foobar_source", + "topic": f"foobar_topic{idx}", + "meta": {}, + "readings": [("2020-06-01 12:31:00", idx)], + "headers": {}, + } + dupes.append(data) + + return tuple(dupes) + + +@pytest.fixture() +def backup_database(): + yield BackupDatabase(BaseHistorian(), None, 0.9) + + # Teardown + # the backup database is an sqlite database with the name "backup.sqlite". + # the db is created if it doesn't exist; see the method: BackupDatabase._setupdb(check_same_thread) for details + if os.path.exists("./backup.sqlite"): + os.remove("./backup.sqlite") + + +def get_all_data(table): + q = f"""SELECT * FROM {table}""" + res = query_db(q) + return res.splitlines() + + +def query_db(query): + output = subprocess.run( + ["sqlite3", "backup.sqlite", query], text=True, capture_output=True + ) + # check_returncode() will raise a CalledProcessError if the query fails + # see https://docs.python.org/3/library/subprocess.html#subprocess.CompletedProcess.returncode + output.check_returncode() + + return output.stdout diff --git a/volttrontesting/platform/dbutils/test_influxdbutils.py b/volttrontesting/platform/dbutils/test_influxdbutils.py new file mode 100644 index 0000000000..3d0128250d --- /dev/null +++ b/volttrontesting/platform/dbutils/test_influxdbutils.py @@ -0,0 +1,510 @@ +from time import time + +from gevent import sleep, os +import pytest + +try: + from influxdb import InfluxDBClient +except ImportError: + pytest.skip( + "Required imports for testing are not installed; thus, not running tests. " + "If on Ubuntu or Debian OS, install imports with: services/core/InfluxdbHistorian/scripts/install-influx.sh " + "Otherwise, see https://docs.influxdata.com/influxdb/v1.4/introduction/installation/.", + allow_module_level=True, + ) + +import volttron.platform.dbutils.influxdbutils as influxdbutils +from volttrontesting.fixtures.docker_wrapper import create_container +from volttrontesting.utils.utils import get_rand_port + +IMAGES = ["influxdb"] + +if "CI" not in os.environ: + IMAGES.extend(["influxdb:1.7", "influxdb:1.8.1", "influxdb:1.7.10"]) + +TEST_DATABASE = "test_historian" +ENV_INFLUXDB = {"INFLUXDB_DB": TEST_DATABASE} +ALLOW_CONNECTION_TIME = 10 + + +@pytest.mark.dbutils +@pytest.mark.influxdbutils +def test_get_all_topics(get_container_func, ports_config): + get_container, image = get_container_func + with get_container( + image, ports=ports_config["ports"], env=ENV_INFLUXDB + ) as container: + wait_for_connection(container) + points = [ + { + "measurement": "meta", + "tags": {"topic_id": "sometopic_id"}, + "time": 1465839830100400200, + "fields": { + "topic": "some_topic_name", + "meta_dict": str({"metadata1": "foobar"}), + }, + } + ] + add_data_to_measurement(ports_config, points) + expected_topics = ["some_topic_name"] + + actual_topics = influxdbutils.get_all_topics(influxdb_client(ports_config)) + + assert actual_topics == expected_topics + + +@pytest.mark.dbutils +@pytest.mark.influxdbutils +@pytest.mark.parametrize( + "topic_id", [("a^p"), ("a[p-z]"), ("\\w+\\b"), ("fgfd$"), ("\\/foobar\\/")] +) +def test_get_topic_values_raises_value_error_on_regex( + get_container_func, ports_config, topic_id +): + with pytest.raises(ValueError): + influxdbutils.get_topic_values( + None, topic_id, None, None, None, None, None, None, None, None + ) + + +@pytest.mark.dbutils +@pytest.mark.influxdbutils +@pytest.mark.parametrize( + "points, topic_id, start, end, agg_type, agg_period, skip, count, order,use_calendar_time_periods, expected_topic_values", + [ + ( + [ + { + "measurement": "power_kw", + "tags": { + "device": "device1", + "building": "building1", + "campus": "campusa", + }, + "fields": {"value": "somevalue"}, + "time": 1465839830100400200, + } + ], + "CampusA/Building1/Device1/Power_KW".lower(), + None, + None, + None, + None, + 0, + 1000, + "FIRST_TO_LAST", + False, + [("2016-06-13T17:43:50.100400+00:00", "somevalue")], + ) + ], +) +def test_get_topic_values( + get_container_func, + ports_config, + points, + topic_id, + start, + end, + agg_type, + agg_period, + skip, + count, + order, + use_calendar_time_periods, + expected_topic_values, +): + get_container, image = get_container_func + with get_container( + image, ports=ports_config["ports"], env=ENV_INFLUXDB + ) as container: + wait_for_connection(container) + add_data_to_measurement(ports_config, points) + + actual_topic_values = influxdbutils.get_topic_values( + influxdb_client(ports_config), + topic_id, + start, + end, + agg_type, + agg_period, + skip, + count, + order, + use_calendar_time_periods, + ) + + assert actual_topic_values == expected_topic_values + + +@pytest.mark.dbutils +@pytest.mark.influxdbutils +@pytest.mark.parametrize( + "points, topic_id, expected_meta", + [ + ( + [ + { + "measurement": "meta", + "tags": {"topic_id": "sometopic_id"}, + "time": 1465839830100400200, + "fields": { + "topic": "some_topic_name", + "meta_dict": str({"metadata1": "foobar", "metadata2": 42}), + "last_updated": "1465839830100400200", + }, + } + ], + "sometopic_id", + {"metadata1": "foobar", "metadata2": 42}, + ) + ], +) +def test_get_topic_meta( + get_container_func, ports_config, points, topic_id, expected_meta +): + get_container, image = get_container_func + with get_container( + image, ports=ports_config["ports"], env=ENV_INFLUXDB + ) as container: + wait_for_connection(container) + add_data_to_measurement(ports_config, points) + + actual_meta = influxdbutils.get_topic_meta( + influxdb_client(ports_config), topic_id + ) + + assert actual_meta == expected_meta + + +@pytest.mark.dbutils +@pytest.mark.influxdbutils +@pytest.mark.parametrize( + "points, expected_results", + [ + ( + [ + { + "measurement": "meta", + "tags": {"topic_id": "sometopic_id"}, + "fields": { + "topic": "actual_topic_name", + "meta_dict": str({"metadata1": "foobar"}), + }, + } + ], + ( + {"sometopic_id": "actual_topic_name"}, + {"sometopic_id": {"metadata1": "foobar"}}, + ), + ), + ( + [ + { + "measurement": "meta", + "tags": {"topic_id": "sometopic_id"}, + "fields": { + "topic": "actual_topic_name1", + "meta_dict": str({"metadata1": "foobar"}), + }, + }, + { + "measurement": "meta", + "tags": {"topic_id": "other_id"}, + "fields": { + "topic": "actual_topic_name2", + "meta_dict": str({"metadata2": 42}), + }, + }, + ], + ( + { + "sometopic_id": "actual_topic_name1", + "other_id": "actual_topic_name2", + }, + { + "sometopic_id": {"metadata1": "foobar"}, + "other_id": {"metadata2": 42}, + }, + ), + ), + ], +) +def test_get_all_topic_id_and_meta( + get_container_func, ports_config, points, expected_results +): + get_container, image = get_container_func + with get_container( + image, ports=ports_config["ports"], env=ENV_INFLUXDB + ) as container: + wait_for_connection(container) + add_data_to_measurement(ports_config, points) + + actual_results = influxdbutils.get_all_topic_id_and_meta( + influxdb_client(ports_config) + ) + + assert actual_results == expected_results + + +@pytest.mark.dbutils +@pytest.mark.influxdbutils +@pytest.mark.parametrize( + "topic_id, topic, meta, updated_time, expected_data", + [ + ( + "sometopic_id", + "actual_topic_name", + {"metadata1": "foobar"}, + "1465839830100400200", + [ + { + "time": "1970-01-01T00:00:00Z", + "last_updated": "1465839830100400200", + "meta_dict": "{'metadata1': 'foobar'}", + "topic": "actual_topic_name", + "topic_id": "sometopic_id", + } + ], + ) + ], +) +def test_insert_meta( + get_container_func, ports_config, topic_id, topic, meta, updated_time, expected_data +): + get_container, image = get_container_func + with get_container( + image, ports=ports_config["ports"], env=ENV_INFLUXDB + ) as container: + wait_for_connection(container) + assert get_data(ports_config, "meta") == [] + + influxdbutils.insert_meta( + influxdb_client(ports_config), topic_id, topic, meta, updated_time + ) + actual_results = get_data(ports_config, "meta") + + assert actual_results == expected_data + + +@pytest.mark.dbutils +@pytest.mark.influxdbutils +@pytest.mark.parametrize( + "measurement, updatedtime, topic_id, source, value, value_string, expected_data", + [ + ( + "POWER_KW", + "2017-12-28T20:41:00.004260096Z", + "CampusA/Building1/Device1/POWER_KW", + "scrape", + "123.4", + "foobar", + [ + { + "time": "2017-12-28T20:41:00.004260Z", + "building": "Building1", + "campus": "CampusA", + "device": "Device1", + "source": "scrape", + "value": "123.4", + "value_string": "foobar", + } + ], + ), + ( + "OutsideAirTemperature", + "2017-12-28T20:41:00.004260096Z", + "CampusA/Building1/LAB/Device/OutsideAirTemperature", + "scrape", + "123.4", + "foobar", + [ + { + "time": "2017-12-28T20:41:00.004260Z", + "building": "LAB", + "campus": "CampusA/Building1", + "device": "Device", + "source": "scrape", + "value": "123.4", + "value_string": "foobar", + } + ], + ), + ( + "temp", + "2017-12-28T20:41:00.004260096Z", + "LAB/Device/temp", + "scrape", + "123.4", + "foobar", + [ + { + "time": "2017-12-28T20:41:00.004260Z", + "building": "LAB", + "device": "Device", + "source": "scrape", + "value": "123.4", + "value_string": "foobar", + } + ], + ), + ], +) +def test_insert_data_point( + get_container_func, + ports_config, + measurement, + updatedtime, + topic_id, + source, + value, + value_string, + expected_data, +): + get_container, image = get_container_func + with get_container( + image, ports=ports_config["ports"], env=ENV_INFLUXDB + ) as container: + wait_for_connection(container) + + assert get_data(ports_config, measurement) == [] + + influxdbutils.insert_data_point( + influxdb_client(ports_config), + updatedtime, + topic_id, + source, + value, + value_string, + ) + actual_data = get_data(ports_config, measurement) + + assert actual_data == expected_data + + +@pytest.mark.dbutils +@pytest.mark.influxdbutils +@pytest.mark.parametrize( + "pattern, expected_topics", + [ + ("actual", [{"actual_topic_name": "sometopic_id"}]), + ( + "topic", + [ + {"actual_topic_name": "sometopic_id"}, + {"snafu_topic": "ghsfjkhkjf_ID"}, + {"topic_snafu_2": "topic_id_42"}, + ], + ), + ("foo", []), + ( + "^(snafu).*", + [{"snafu_Topic2": "other_topic_id"}, {"snafu_topic": "ghsfjkhkjf_ID"}], + ), + ("(name)$", [{"actual_topic_name": "sometopic_id"}]), + ], +) +def test_get_topics_by_pattern( + get_container_func, ports_config, pattern, expected_topics +): + get_container, image = get_container_func + with get_container( + image, ports=ports_config["ports"], env=ENV_INFLUXDB + ) as container: + wait_for_connection(container) + points = [ + { + "measurement": "meta", + "tags": {"topic_id": "sometopic_id"}, + "fields": { + "topic": "actual_topic_name", + "meta_dict": str({"metadata1": "foobar"}), + }, + }, + { + "measurement": "meta", + "tags": {"topic_id": "ghsfjkhkjf_ID"}, + "fields": { + "topic": "snafu_topic", + "meta_dict": str({"metadata42": "foobar"}), + }, + }, + { + "measurement": "meta", + "tags": {"topic_id": "topic_id_42"}, + "fields": { + "topic": "topic_snafu_2", + "meta_dict": str({"metadata42": "foobar"}), + }, + }, + { + "measurement": "meta", + "tags": {"topic_id": "other_topic_id"}, + "fields": { + "topic": "snafu_Topic2", + "meta_dict": str({"metadata42": "foobar"}), + }, + }, + ] + add_data_to_measurement(ports_config, points) + + actual_topics = influxdbutils.get_topics_by_pattern( + influxdb_client(ports_config), pattern + ) + + assert actual_topics == expected_topics + + +@pytest.fixture(params=IMAGES) +def get_container_func(request): + return create_container, request.param + + +@pytest.fixture() +def ports_config(): + port_on_host = get_rand_port(ip="8086") + return {"port_on_host": port_on_host, "ports": {"8086/tcp": port_on_host}} + + +def influxdb_client(ports_config): + connection_params = { + "host": "localhost", + "port": ports_config["port_on_host"], + "database": TEST_DATABASE, + } + return influxdbutils.get_client(connection_params) + + +def wait_for_connection(container): + sleep(ALLOW_CONNECTION_TIME) + query_database(container, f"use {TEST_DATABASE}") + + +def query_database(container, query): + cmd = f'influx -execute "{query}" -database test_historian' + + start_time = time() + while time() - start_time < ALLOW_CONNECTION_TIME: + r = container.exec_run(cmd=cmd, tty=True) + print(r) + if r[0] != 0: + continue + else: + return + + return RuntimeError(r) + + +def add_data_to_measurement(ports_config, points): + client = InfluxDBClient( + host="localhost", port=ports_config["port_on_host"], database=TEST_DATABASE + ) + client.write_points(points) + + +def get_data(ports_config, measurement): + client = InfluxDBClient( + host="localhost", port=ports_config["port_on_host"], database=TEST_DATABASE + ) + res = client.query(f"""SELECT * from {measurement}""", database=TEST_DATABASE) + return list(res.get_points()) diff --git a/volttrontesting/platform/dbutils/test_mongoutils.py b/volttrontesting/platform/dbutils/test_mongoutils.py new file mode 100644 index 0000000000..33117a751a --- /dev/null +++ b/volttrontesting/platform/dbutils/test_mongoutils.py @@ -0,0 +1,202 @@ +import os +from time import time + +from gevent import sleep +import pytest + +import volttron.platform.dbutils.mongoutils as mongoutils +from volttrontesting.fixtures.docker_wrapper import create_container +from volttrontesting.utils.utils import get_rand_port + + +IMAGES = ["mongo:3-xenial", "mongo:bionic"] + +if "CI" not in os.environ: + IMAGES.extend( + [ + "mongo:3.6-xenial", + "mongo:3.6.19-xenial", + "mongo:4.0-xenial", + "mongo:4.0.19-xenial", + "mongo:4-bionic", + "mongo:4.2-bionic", + "mongo:4.2.8-bionic", + "mongo:4.4-bionic", + "mongo:4.4.0-bionic", + ] + ) + +TEST_DATABASE = "test_historian" +ROOT_USERNAME = "mongoadmin" +ROOT_PASSWORD = "12345" +ENV_MONGODB = { + "MONGO_INITDB_ROOT_USERNAME": ROOT_USERNAME, + "MONGO_INITDB_ROOT_PASSWORD": ROOT_PASSWORD, + "MONGO_INITDB_DATABASE": TEST_DATABASE, +} +ALLOW_CONNECTION_TIME = 10 + + +@pytest.mark.mongoutils +@pytest.mark.parametrize( + "query, expected_topic_id_map, expected_topic_name_map", + [ + ( + '\'db.topics.insertOne({topic_name:"foobar", _id:"42"})\'', + {"foobar": "42"}, + {"foobar": "foobar"}, + ), + ( + '\'db.topics.insertOne({topic_name:"ROMA", _id:"17"})\'', + {"roma": "17"}, + {"roma": "ROMA"}, + ), + ], +) +def test_get_topic_map( + get_container_func, + ports_config, + query, + expected_topic_id_map, + expected_topic_name_map, +): + get_container, image = get_container_func + with get_container( + image, ports=ports_config["ports"], env=ENV_MONGODB + ) as container: + wait_for_connection(container) + query_database(container, query) + + actual_topic_id_map, actual_topic_name_map = mongoutils.get_topic_map( + mongo_client(ports_config["port_on_host"]), "topics" + ) + + assert actual_topic_id_map == expected_topic_id_map + assert actual_topic_name_map == expected_topic_name_map + + +@pytest.mark.mongoutils +@pytest.mark.parametrize( + "query, agg_topics_collection, expected_agg_topic_map", + [ + ( + '\'db.aggregate_topics.insertOne({agg_topic_name:"foobar", agg_type:"AVG", agg_time_period:"2001", _id:"42"})\'', + "aggregate_topics", + {("foobar", "AVG", "2001"): "42"}, + ), + ( + '\'db.aggregate_topics.insertOne({agg_topic_name:"ROmA", agg_type:"AVG", agg_time_period:"2001", _id:"42"})\'', + "aggregate_topics", + {("roma", "AVG", "2001"): "42"}, + ), + ], +) +def test_get_agg_topic_map( + get_container_func, + ports_config, + query, + agg_topics_collection, + expected_agg_topic_map, +): + get_container, image = get_container_func + with get_container( + image, ports=ports_config["ports"], env=ENV_MONGODB + ) as container: + wait_for_connection(container) + query_database(container, query) + + actual_agg_topic_map = mongoutils.get_agg_topic_map( + mongo_client(ports_config["port_on_host"]), agg_topics_collection + ) + + assert actual_agg_topic_map == expected_agg_topic_map + + +@pytest.mark.mongoutils +@pytest.mark.parametrize( + "query_agg_topics, query_agg_meta, expected_agg_topics", + [ + ( + '\'db.aggregate_topics.insertOne({agg_topic_name:"foobar", agg_type:"AVG", agg_time_period:"2001", _id:"42"})\'', + '\'db.aggregate_meta.insertOne({agg_topic_id:"42", meta:{configured_topics: "topic1"}})\'', + [("foobar", "AVG", "2001", "topic1")], + ), + ( + '\'db.aggregate_topics.insertOne({agg_topic_name:"FOO", agg_type:"AVG", agg_time_period:"2001", _id:"42"})\'', + '\'db.aggregate_meta.insertOne({agg_topic_id:"42", meta:{configured_topics: "topic1"}})\'', + [("foo", "AVG", "2001", "topic1")], + ), + ], +) +def test_get_agg_topics( + get_container_func, + ports_config, + query_agg_topics, + query_agg_meta, + expected_agg_topics, +): + get_container, image = get_container_func + with get_container( + image, ports=ports_config["ports"], env=ENV_MONGODB + ) as container: + wait_for_connection(container) + query_database(container, query_agg_topics) + query_database(container, query_agg_meta) + + actual_agg_topics = mongoutils.get_agg_topics( + mongo_client(ports_config["port_on_host"]), + "aggregate_topics", + "aggregate_meta", + ) + + assert actual_agg_topics == expected_agg_topics + + +def mongo_client(port): + connection_params = { + "host": "localhost", + "port": port, + "database": TEST_DATABASE, + "user": ROOT_USERNAME, + "passwd": ROOT_PASSWORD, + "authSource": "admin", + } + + return mongoutils.get_mongo_client(connection_params) + + +@pytest.fixture(params=IMAGES) +def get_container_func(request): + return create_container, request.param + + +@pytest.fixture() +def ports_config(): + port_on_host = get_rand_port(ip="27017") + return {"port_on_host": port_on_host, "ports": {"27017/tcp": port_on_host}} + + +def wait_for_connection(container): + command = f'mongo --username="{ROOT_USERNAME}" --password="{ROOT_PASSWORD}" --authenticationDatabase admin {TEST_DATABASE} --eval "db.getName()"' + query_database(container, None, command=command) + + +def query_database(container, query, command=None): + if command is None: + cmd = ( + f'mongo --username "{ROOT_USERNAME}" --password "{ROOT_PASSWORD}" ' + f"--authenticationDatabase admin {TEST_DATABASE} --eval={query}" + ) + else: + cmd = command + + start_time = time() + while time() - start_time < ALLOW_CONNECTION_TIME: + r = container.exec_run(cmd=cmd, tty=True) + if r[0] != 0: + continue + else: + sleep(0.5) + return + + return RuntimeError(r) diff --git a/volttrontesting/platform/dbutils/test_mysqlfuncts.py b/volttrontesting/platform/dbutils/test_mysqlfuncts.py index 08f16660cc..d04c9e01a7 100644 --- a/volttrontesting/platform/dbutils/test_mysqlfuncts.py +++ b/volttrontesting/platform/dbutils/test_mysqlfuncts.py @@ -1,23 +1,41 @@ import contextlib import datetime -from time import time +import os +import logging + +logging.getLogger("urllib3.connectionpool").setLevel(logging.INFO) + +from time import time, sleep import pytest -import mysql.connector + +try: + import mysql.connector +except ImportError: + pytest.skip( + "Required imports for testing are not installed; thus, not running tests. Install imports with: python bootstrap.py --mysql", + allow_module_level=True, + ) + from volttron.platform.dbutils.mysqlfuncts import MySqlFuncts from volttrontesting.fixtures.docker_wrapper import create_container from volttrontesting.utils.utils import get_rand_port +pytestmark = [pytest.mark.mysqlfuncts, pytest.mark.dbutils, pytest.mark.unit] # mysqlfuncts was written for MYSQL 5.7; however, the latest version is 8.0 # these tests cannot use latest or anything 8.0 and above and will fail if the latest image/8.0 is used # for example, latest/8.0 will throw a "specified key was too long; max key length is 3072 bytes" error -IMAGES = ["mysql:5.7", ] # To test more images, add them here +IMAGES = ["mysql:5.6.49"] + +if "CI" not in os.environ: + IMAGES.extend(["mysql:5.7.31", "mysql:5", "mysql:5.6", "mysql:5.7"]) + TEST_DATABASE = "test_historian" ROOT_PASSWORD = "12345" ENV_MYSQL = {"MYSQL_ROOT_PASSWORD": ROOT_PASSWORD, "MYSQL_DATABASE": TEST_DATABASE} -ALLOW_CONNECTION_TIME = 10 +ALLOW_CONNECTION_TIME = 30 DATA_TABLE = "data" TOPICS_TABLE = "topics" META_TABLE = "meta" @@ -39,9 +57,9 @@ def test_setup_historian_tables_should_create_tables(get_container_func, ports_c mysqlfuncts.setup_historian_tables() tables = get_tables(port_on_host) - assert 'data' in tables - assert 'topics' in tables - assert 'meta' in tables + assert "data" in tables + assert "topics" in tables + assert "meta" in tables @pytest.mark.mysqlfuncts @@ -52,14 +70,18 @@ def test_record_table_definitions_should_succeed(get_container_func, ports_confi port_on_host = ports_config["port_on_host"] with get_mysqlfuncts(port_on_host) as mysqlfuncts: - tables_def = {'table_prefix': "prefix", - 'data_table': "data", - 'topics_table': "topics", - 'meta_table': "meta"} - meta_table_name = 'meta_other' - expected_data = {('data_table', "data", "prefix"), - ('topics_table', "topics", "prefix"), - ('meta_table', "meta", "prefix")} + tables_def = { + "table_prefix": "prefix", + "data_table": "data", + "topics_table": "topics", + "meta_table": "meta", + } + meta_table_name = "meta_other" + expected_data = { + ("data_table", "data", "prefix"), + ("topics_table", "topics", "prefix"), + ("meta_table", "meta", "prefix"), + } tables = get_tables(port_on_host) assert meta_table_name not in tables @@ -75,7 +97,9 @@ def test_record_table_definitions_should_succeed(get_container_func, ports_confi @pytest.mark.mysqlfuncts -def test_setup_aggregate_historian_tables_should_succeed(get_container_func, ports_config): +def test_setup_aggregate_historian_tables_should_succeed( + get_container_func, ports_config +): get_container, image = get_container_func with get_container(image, ports=ports_config["ports"], env=ENV_MYSQL) as container: wait_for_connection(container) @@ -88,7 +112,7 @@ def test_setup_aggregate_historian_tables_should_succeed(get_container_func, por assert AGG_TOPICS_TABLE not in tables assert AGG_META_TABLE not in tables - mysqlfuncts.setup_aggregate_historian_tables('metadata') + mysqlfuncts.setup_aggregate_historian_tables("metadata") tables = get_tables(port_on_host) assert AGG_TOPICS_TABLE in tables @@ -96,10 +120,20 @@ def test_setup_aggregate_historian_tables_should_succeed(get_container_func, por @pytest.mark.mysqlfuncts -@pytest.mark.parametrize("topic_ids, id_name_map, expected_values", - [([42], {42: "topic42"}, {"topic42": []}), - ([43], {43: "topic43"}, {"topic43": [('2020-06-01T12:30:59.000000+00:00', [2,3])]})]) -def test_query_should_return_data(get_container_func, ports_config, topic_ids, id_name_map, expected_values): +@pytest.mark.parametrize( + "topic_ids, id_name_map, expected_values", + [ + ([42], {42: "topic42"}, {"topic42": []}), + ( + [43], + {43: "topic43"}, + {"topic43": [("2020-06-01T12:30:59.000000+00:00", [2, 3])]}, + ), + ], +) +def test_query_should_return_data( + get_container_func, ports_config, topic_ids, id_name_map, expected_values +): get_container, image = get_container_func with get_container(image, ports=ports_config["ports"], env=ENV_MYSQL) as container: wait_for_connection(container) @@ -138,7 +172,7 @@ def test_insert_meta_query_should_succeed(get_container_func, ports_config): res = mysqlfuncts.insert_meta(topic_id, metadata) assert res is True - assert get_data_in_table(port_on_host, 'meta')[0] == expected_data + assert get_data_in_table(port_on_host, "meta")[0] == expected_data @pytest.mark.mysqlfuncts @@ -150,7 +184,7 @@ def test_insert_data_query_should_succeed(get_container_func, ports_config): port_on_host = ports_config["port_on_host"] with get_mysqlfuncts(port_on_host) as mysqlfuncts: - ts = '2001-09-11 08:46:00' + ts = "2001-09-11 08:46:00" topic_id = "11" data = "1wtc" expected_data = [(datetime.datetime(2001, 9, 11, 8, 46), 11, '"1wtc"')] @@ -158,7 +192,7 @@ def test_insert_data_query_should_succeed(get_container_func, ports_config): res = mysqlfuncts.insert_data(ts, topic_id, data) assert res is True - assert get_data_in_table(port_on_host, 'data') == expected_data + assert get_data_in_table(port_on_host, "data") == expected_data @pytest.mark.mysqlfuncts @@ -175,7 +209,9 @@ def test_insert_topic_query_should_succeed(get_container_func, ports_config): actual_id = mysqlfuncts.insert_topic(topic) assert isinstance(actual_id, int) - assert (actual_id, 'football') == get_data_in_table(port_on_host, 'topics')[0] + assert (actual_id, "football") == get_data_in_table(port_on_host, "topics")[ + 0 + ] @pytest.mark.mysqlfuncts @@ -196,7 +232,7 @@ def test_update_topic_should_succeed(get_container_func, ports_config): result = mysqlfuncts.update_topic("soccer", actual_id) assert result is True - assert (actual_id, 'soccer') == get_data_in_table(port_on_host, 'topics')[0] + assert (actual_id, "soccer") == get_data_in_table(port_on_host, "topics")[0] @pytest.mark.mysqlfuncts @@ -211,7 +247,7 @@ def test_insert_agg_topic_should_succeed(get_container_func, ports_config): topic = "some_agg_topic" agg_type = "AVG" agg_time_period = "2019" - expected_data = (1, 'some_agg_topic', 'AVG', '2019') + expected_data = (1, "some_agg_topic", "AVG", "2019") actual_id = mysqlfuncts.insert_agg_topic(topic, agg_type, agg_time_period) @@ -231,7 +267,7 @@ def test_update_agg_topic_should_succeed(get_container_func, ports_config): topic = "cars" agg_type = "SUM" agg_time_period = "2100ZULU" - expected_data = (1, 'cars', 'SUM', '2100ZULU') + expected_data = (1, "cars", "SUM", "2100ZULU") actual_id = mysqlfuncts.insert_agg_topic(topic, agg_type, agg_time_period) @@ -239,7 +275,7 @@ def test_update_agg_topic_should_succeed(get_container_func, ports_config): assert get_data_in_table(port_on_host, AGG_TOPICS_TABLE)[0] == expected_data new_agg_topic_name = "boats" - expected_data = (1, 'boats', 'SUM', '2100ZULU') + expected_data = (1, "boats", "SUM", "2100ZULU") result = mysqlfuncts.update_agg_topic(actual_id, new_agg_topic_name) @@ -258,7 +294,7 @@ def test_insert_agg_meta_should_succeed(get_container_func, ports_config): with get_mysqlfuncts(port_on_host) as mysqlfuncts: topic_id = 42 - metadata = 'meaning of life' + metadata = "meaning of life" expected_data = (42, '"meaning of life"') result = mysqlfuncts.insert_agg_meta(topic_id, metadata) @@ -283,7 +319,10 @@ def test_get_topic_map_should_succeed(get_container_func, ports_config): VALUES ('baseball'); """ seed_database(container, query) - expected = ({'baseball': 2, 'football': 1}, {'baseball': 'baseball', 'football': 'football'}) + expected = ( + {"baseball": 2, "football": 1}, + {"baseball": "baseball", "football": "football"}, + ) actual = mysqlfuncts.get_topic_map() @@ -305,7 +344,7 @@ def test_get_agg_topic_map_should_return_dict(get_container_func, ports_config): VALUES ('topic_name', 'AVG', '2001'); """ seed_database(container, query) - expected = {('topic_name', 'AVG', '2001'): 1} + expected = {("topic_name", "AVG", "2001"): 1} actual = mysqlfuncts.get_agg_topic_map() @@ -330,7 +369,7 @@ def test_query_topics_by_pattern_should_succeed(get_container_func, ports_config VALUES ('xyzzzzzzzz'); """ seed_database(container, query) - expected = {'football': 1, 'foobar': 2} + expected = {"football": 1, "foobar": 2} topic_pattern = "foo" actual = mysqlfuncts.query_topics_by_pattern(topic_pattern) @@ -350,13 +389,16 @@ def test_create_aggregate_store_should_succeed(get_container_func, ports_config) agg_type = "AVG" agg_time_period = "1984" expected_aggregate_table = "AVG_1984" - expected_fields = {'value_string', 'topics_list', 'topic_id', 'ts'} + expected_fields = {"value_string", "topics_list", "topic_id", "ts"} result = mysqlfuncts.create_aggregate_store(agg_type, agg_time_period) assert result is not None assert expected_aggregate_table in get_tables(port_on_host) - assert describe_table(port_on_host, expected_aggregate_table) == expected_fields + assert ( + describe_table(port_on_host, expected_aggregate_table) + == expected_fields + ) @pytest.mark.mysqlfuncts @@ -382,16 +424,25 @@ def test_insert_aggregate_stmt_should_succeed(get_container_func, ports_config): ts = "2020-06-01 12:30:59" data = "some_data" topic_ids = [12, 54, 65] - expected_data = (datetime.datetime(2020, 6, 1, 12, 30, 59), 42, '"some_data"', '[12, 54, 65]') + expected_data = ( + datetime.datetime(2020, 6, 1, 12, 30, 59), + 42, + '"some_data"', + "[12, 54, 65]", + ) - res = mysqlfuncts.insert_aggregate(agg_topic_id, agg_type, period, ts, data, topic_ids) + res = mysqlfuncts.insert_aggregate( + agg_topic_id, agg_type, period, ts, data, topic_ids + ) assert res is True assert get_data_in_table(port_on_host, "AVG_1776")[0] == expected_data @pytest.mark.mysqlfuncts -def test_collect_aggregate_should_return_aggregate_result(get_container_func, ports_config): +def test_collect_aggregate_should_return_aggregate_result( + get_container_func, ports_config +): get_container, image = get_container_func with get_container(image, ports=ports_config["ports"], env=ENV_MYSQL) as container: wait_for_connection(container) @@ -429,17 +480,22 @@ def test_collect_aggregate_should_raise_value_error(get_container_func, ports_co @contextlib.contextmanager def get_mysqlfuncts(port): - connect_params = {"host": 'localhost', - "port": port, - "database": TEST_DATABASE, - "user": "root", - "passwd": ROOT_PASSWORD} - - table_names = {"data_table": DATA_TABLE, - "topics_table": TOPICS_TABLE, - "meta_table": META_TABLE, - "agg_topics_table": AGG_TOPICS_TABLE, - "agg_meta_table": AGG_META_TABLE} + connect_params = { + "host": "localhost", + "port": port, + "database": TEST_DATABASE, + "user": "root", + "passwd": ROOT_PASSWORD, + "connection_timeout": ALLOW_CONNECTION_TIME + } + + table_names = { + "data_table": DATA_TABLE, + "topics_table": TOPICS_TABLE, + "meta_table": META_TABLE, + "agg_topics_table": AGG_TOPICS_TABLE, + "agg_meta_table": AGG_META_TABLE, + } mysqlfuncts = MySqlFuncts(connect_params, table_names) @@ -454,8 +510,7 @@ def get_container_func(request): @pytest.fixture() def ports_config(): port_on_host = get_rand_port(ip="3306") - return {"port_on_host": port_on_host, - "ports": {"3306/tcp": port_on_host}} + return {"port_on_host": port_on_host, "ports": {"3306/tcp": port_on_host}} def wait_for_connection(container): @@ -463,7 +518,7 @@ def wait_for_connection(container): response = None while time() - start_time < ALLOW_CONNECTION_TIME: command = ( - f"mysqlshow --user=\"root\" --password=\"{ROOT_PASSWORD}\" {TEST_DATABASE}" + f'mysqlshow --user="root" --password="{ROOT_PASSWORD}" {TEST_DATABASE}' ) response = container.exec_run(command, tty=True) exit_code, output = response @@ -476,7 +531,6 @@ def wait_for_connection(container): raise RuntimeError(f"Failed to make connection within allowed time {response}") - def create_historian_tables(container): query = """ CREATE TABLE IF NOT EXISTS data @@ -494,7 +548,7 @@ def create_historian_tables(container): metadata TEXT NOT NULL, PRIMARY KEY(topic_id)); """ - command = f"mysql --user=\"root\" --password=\"{ROOT_PASSWORD}\" {TEST_DATABASE} --execute=\"{query}\"" + command = f'mysql --user="root" --password="{ROOT_PASSWORD}" {TEST_DATABASE} --execute="{query}"' container.exec_run(cmd=command, tty=True) return @@ -512,7 +566,7 @@ def create_metadata_table(container): REPLACE INTO metadata VALUES ('meta_table', 'meta', 'p'); """ - command = f"mysql --user=\"root\" --password=\"{ROOT_PASSWORD}\" {TEST_DATABASE} --execute=\"{query}\"" + command = f'mysql --user="root" --password="{ROOT_PASSWORD}" {TEST_DATABASE} --execute="{query}"' container.exec_run(cmd=command, tty=True) return @@ -531,7 +585,7 @@ def create_aggregate_tables(container): metadata TEXT NOT NULL, PRIMARY KEY(agg_topic_id)); """ - command = f"mysql --user=\"root\" --password=\"{ROOT_PASSWORD}\" {TEST_DATABASE} --execute=\"{query}\"" + command = f'mysql --user="root" --password="{ROOT_PASSWORD}" {TEST_DATABASE} --execute="{query}"' container.exec_run(cmd=command, tty=True) return @@ -544,8 +598,9 @@ def create_all_tables(container): def seed_database(container, query): - command = f"mysql --user=\"root\" --password=\"{ROOT_PASSWORD}\" {TEST_DATABASE} --execute=\"{query}\"" + command = f'mysql --user="root" --password="{ROOT_PASSWORD}" {TEST_DATABASE} --execute="{query}"' container.exec_run(cmd=command, tty=True) + sleep(3) return @@ -602,12 +657,14 @@ def get_data_in_table(port, table): def get_cnx_cursor(port): - connect_params = {"host": 'localhost', - "port": port, - "database": TEST_DATABASE, - "user": "root", - "passwd": ROOT_PASSWORD} + sleep(3) + connect_params = { + "host": "localhost", + "port": port, + "database": TEST_DATABASE, + "user": "root", + "passwd": ROOT_PASSWORD, + } cnx = mysql.connector.connect(**connect_params) cursor = cnx.cursor() return cnx, cursor - diff --git a/volttrontesting/platform/dbutils/test_postgresql_timescaledb.py b/volttrontesting/platform/dbutils/test_postgresql_timescaledb.py new file mode 100644 index 0000000000..b80ebafcea --- /dev/null +++ b/volttrontesting/platform/dbutils/test_postgresql_timescaledb.py @@ -0,0 +1,853 @@ +import contextlib +import datetime +import os +import logging + +logging.getLogger("urllib3.connectionpool").setLevel(logging.INFO) + + +from time import time + +import pytest + +try: + import psycopg2 + from psycopg2.sql import SQL, Identifier +except ImportError: + pytest.skip( + "Required imports for testing are not installed; thus, not running tests. Install imports with: python bootstrap.py --postgres", + allow_module_level=True, + ) + +from volttron.platform.dbutils.postgresqlfuncts import PostgreSqlFuncts +from volttrontesting.fixtures.docker_wrapper import create_container +from volttrontesting.utils.utils import get_rand_port + +IMAGES = ["timescale/timescaledb:latest-pg10", "timescale/timescaledb:latest-pg11"] + +if "CI" not in os.environ: + IMAGES.extend( + ["timescale/timescaledb:latest-pg12"] + ) + +ALLOW_CONNECTION_TIME = 3 +TEST_DATABASE = "test_historian" +ROOT_USER = "postgres" +ROOT_PASSWORD = "password" +ENV_POSTGRESQL = { + "POSTGRES_USER": ROOT_USER, # defining user not necessary but added to be explicit + "POSTGRES_PASSWORD": ROOT_PASSWORD, + "POSTGRES_DB": TEST_DATABASE, +} +DATA_TABLE = "data" +TOPICS_TABLE = "topics" +META_TABLE = "meta" +AGG_TOPICS_TABLE = "aggregate_topics" +AGG_META_TABLE = "aggregate_meta" +METADATA_TABLE = "metadata" + + +@pytest.mark.postgresqlfuncts_timescaledb +def test_setup_historian_tables_should_create_tables(get_container_func, ports_config): + get_container, image = get_container_func + with get_container( + image, ports=ports_config["ports"], env=ENV_POSTGRESQL + ) as container: + port_on_host = ports_config["port_on_host"] + wait_for_connection(container, port_on_host) + + with get_postgresqlfuncts(port_on_host) as postgresqlfuncts: + tables_before_setup = get_tables(port_on_host) + assert tables_before_setup == set() + + expected_tables = set(["data", "topics", "meta"]) + + postgresqlfuncts.setup_historian_tables() + + actual_tables = get_tables(port_on_host) + + assert actual_tables == expected_tables + + +@pytest.mark.postgresqlfuncts_timescaledb +def test_record_table_definitions_should_create_meta_table( + get_container_func, ports_config +): + get_container, image = get_container_func + with get_container( + image, ports=ports_config["ports"], env=ENV_POSTGRESQL + ) as container: + port_on_host = ports_config["port_on_host"] + wait_for_connection(container, port_on_host) + + with get_postgresqlfuncts(port_on_host) as postgresqlfuncts: + assert METADATA_TABLE not in get_tables(port_on_host) + + tables_def = { + "table_prefix": "", + "data_table": "data", + "topics_table": "topics", + "meta_table": "meta", + } + expected_table_defs = set( + ["table_name", "table_id", "key", "value", "include_in_telemetry"] + ) + + postgresqlfuncts.record_table_definitions(tables_def, METADATA_TABLE) + + assert METADATA_TABLE in get_tables(port_on_host) + assert describe_table(port_on_host, METADATA_TABLE) == expected_table_defs + + +@pytest.mark.postgresqlfuncts_timescaledb +def test_read_tablenames_from_db_should_return_table_names( + get_container_func, ports_config +): + get_container, image = get_container_func + with get_container( + image, ports=ports_config["ports"], env=ENV_POSTGRESQL + ) as container: + port_on_host = ports_config["port_on_host"] + wait_for_connection(container, port_on_host) + create_meta_data_table(container) + + with get_postgresqlfuncts(port_on_host) as postgresqlfuncts: + expected_tables = { + "data_table": "data", + "topics_table": "topics", + "meta_table": "meta", + "agg_topics_table": "aggregate_topics", + "agg_meta_table": "aggregate_meta", + } + + actual_tables = postgresqlfuncts.read_tablenames_from_db(METADATA_TABLE) + + assert actual_tables == expected_tables + + +@pytest.mark.postgresqlfuncts_timescaledb +def test_setup_aggregate_historian_tables_should_create_aggregate_tables( + get_container_func, ports_config +): + get_container, image = get_container_func + + with get_container( + image, ports=ports_config["ports"], env=ENV_POSTGRESQL + ) as container: + port_on_host = ports_config["port_on_host"] + wait_for_connection(container, port_on_host) + + with get_postgresqlfuncts(port_on_host) as postgresqlfuncts: + agg_topic_table = "aggregate_topics" + agg_meta_table = "aggregate_meta" + + original_tables = get_tables(port_on_host) + assert agg_topic_table not in original_tables + assert agg_meta_table not in original_tables + + create_meta_data_table(container) + expected_agg_topic_fields = { + "agg_topic_id", + "agg_topic_name", + "agg_time_period", + "agg_type", + } + expected_agg_meta_fields = {"agg_topic_id", "metadata"} + + postgresqlfuncts.setup_aggregate_historian_tables(METADATA_TABLE) + + updated_tables = get_tables(port_on_host) + assert agg_topic_table in updated_tables + assert agg_meta_table in updated_tables + assert ( + describe_table(port_on_host, agg_topic_table) + == expected_agg_topic_fields + ) + assert ( + describe_table(port_on_host, agg_meta_table) == expected_agg_meta_fields + ) + + +@pytest.mark.parametrize( + "topic_ids, id_name_map, expected_values", + [ + ([42], {42: "topic42"}, {"topic42": []}), + ( + [43], + {43: "topic43"}, + {"topic43": [("2020-06-01T12:30:59.000000+00:00", [2, 3])]}, + ), + ], +) +@pytest.mark.postgresqlfuncts_timescaledb +def test_query_should_return_data( + get_container_func, ports_config, topic_ids, id_name_map, expected_values +): + get_container, image = get_container_func + with get_container( + image, ports=ports_config["ports"], env=ENV_POSTGRESQL + ) as container: + port_on_host = ports_config["port_on_host"] + wait_for_connection(container, port_on_host) + create_all_tables(container) + + with get_postgresqlfuncts(port_on_host) as postgresqlfuncts: + query = f""" + INSERT INTO {DATA_TABLE} VALUES ('2020-06-01 12:30:59', 43, '[2,3]') + """ + seed_database(container, query) + + actual_values = postgresqlfuncts.query(topic_ids, id_name_map) + + assert actual_values == expected_values + + +@pytest.mark.postgresqlfuncts_timescaledb +def test_insert_topic_should_return_topic_id(get_container_func, ports_config): + get_container, image = get_container_func + + with get_container( + image, ports=ports_config["ports"], env=ENV_POSTGRESQL + ) as container: + port_on_host = ports_config["port_on_host"] + wait_for_connection(container, port_on_host) + create_all_tables(container) + + with get_postgresqlfuncts(port_on_host) as postgresqlfuncts: + + topic = "football" + expected_topic_id = 1 + + actual_topic_id = postgresqlfuncts.insert_topic(topic) + + assert actual_topic_id == expected_topic_id + + +@pytest.mark.postgresqlfuncts_timescaledb +def test_insert_agg_topic_should_return_agg_topic_id(get_container_func, ports_config): + get_container, image = get_container_func + + with get_container( + image, ports=ports_config["ports"], env=ENV_POSTGRESQL + ) as container: + port_on_host = ports_config["port_on_host"] + wait_for_connection(container, port_on_host) + create_all_tables(container) + + with get_postgresqlfuncts(port_on_host) as postgresqlfuncts: + + topic = "some_agg_topic" + agg_type = "AVG" + agg_time_period = "2019" + expected_data = (1, "some_agg_topic", "AVG", "2019") + + actual_id = postgresqlfuncts.insert_agg_topic( + topic, agg_type, agg_time_period + ) + + assert isinstance(actual_id, int) + assert get_data_in_table(port_on_host, AGG_TOPICS_TABLE)[0] == expected_data + + +@pytest.mark.postgresqlfuncts_timescaledb +def test_insert_meta_should_return_true(get_container_func, ports_config): + get_container, image = get_container_func + + with get_container( + image, ports=ports_config["ports"], env=ENV_POSTGRESQL + ) as container: + port_on_host = ports_config["port_on_host"] + wait_for_connection(container, port_on_host) + create_all_tables(container) + + with get_postgresqlfuncts(port_on_host) as postgresqlfuncts: + topic_id = "44" + metadata = "foobar44" + expected_data = (44, '"foobar44"') + + res = postgresqlfuncts.insert_meta(topic_id, metadata) + + assert res is True + assert get_data_in_table(port_on_host, "meta")[0] == expected_data + + +@pytest.mark.postgresqlfuncts_timescaledb +def test_insert_data_should_return_true(get_container_func, ports_config): + get_container, image = get_container_func + + with get_container( + image, ports=ports_config["ports"], env=ENV_POSTGRESQL + ) as container: + port_on_host = ports_config["port_on_host"] + wait_for_connection(container, port_on_host) + create_all_tables(container) + + with get_postgresqlfuncts(port_on_host) as postgresqlfuncts: + ts = "2001-09-11 08:46:00" + topic_id = "11" + data = "1wtc" + expected_data = [(datetime.datetime(2001, 9, 11, 8, 46), 11, '"1wtc"')] + + res = postgresqlfuncts.insert_data(ts, topic_id, data) + + assert res is True + assert get_data_in_table(port_on_host, "data") == expected_data + + +@pytest.mark.postgresqlfuncts_timescaledb +def test_update_topic_should_return_true(get_container_func, ports_config): + get_container, image = get_container_func + + with get_container( + image, ports=ports_config["ports"], env=ENV_POSTGRESQL + ) as container: + port_on_host = ports_config["port_on_host"] + wait_for_connection(container, port_on_host) + create_all_tables(container) + + with get_postgresqlfuncts(port_on_host) as postgresqlfuncts: + topic = "football" + + actual_id = postgresqlfuncts.insert_topic(topic) + + assert isinstance(actual_id, int) + + result = postgresqlfuncts.update_topic("soccer", actual_id) + + assert result is True + assert (actual_id, "soccer") == get_data_in_table(port_on_host, "topics")[0] + + +@pytest.mark.postgresqlfuncts_timescaledb +def test_get_aggregation_list_should_return_list(get_container_func, ports_config): + get_container, image = get_container_func + + with get_container( + image, ports=ports_config["ports"], env=ENV_POSTGRESQL + ) as container: + port_on_host = ports_config["port_on_host"] + wait_for_connection(container, port_on_host) + + with get_postgresqlfuncts(port_on_host) as postgresqlfuncts: + expected_list = [ + "AVG", + "MIN", + "MAX", + "COUNT", + "SUM", + "BIT_AND", + "BIT_OR", + "BOOL_AND", + "BOOL_OR", + "MEDIAN", + "STDDEV", + "STDDEV_POP", + "STDDEV_SAMP", + "VAR_POP", + "VAR_SAMP", + "VARIANCE", + ] + + assert postgresqlfuncts.get_aggregation_list() == expected_list + + +@pytest.mark.postgresqlfuncts_timescaledb +def test_insert_agg_topic_should_return_true(get_container_func, ports_config): + get_container, image = get_container_func + + with get_container( + image, ports=ports_config["ports"], env=ENV_POSTGRESQL + ) as container: + port_on_host = ports_config["port_on_host"] + wait_for_connection(container, port_on_host) + create_all_tables(container) + + with get_postgresqlfuncts(port_on_host) as postgresqlfuncts: + topic = "some_agg_topic" + agg_type = "AVG" + agg_time_period = "2019" + expected_data = (1, "some_agg_topic", "AVG", "2019") + + actual_id = postgresqlfuncts.insert_agg_topic( + topic, agg_type, agg_time_period + ) + + assert isinstance(actual_id, int) + assert get_data_in_table(port_on_host, AGG_TOPICS_TABLE)[0] == expected_data + + +@pytest.mark.postgresqlfuncts_timescaledb +def test_update_agg_topic_should_return_true(get_container_func, ports_config): + get_container, image = get_container_func + + with get_container( + image, ports=ports_config["ports"], env=ENV_POSTGRESQL + ) as container: + port_on_host = ports_config["port_on_host"] + wait_for_connection(container, port_on_host) + create_all_tables(container) + + with get_postgresqlfuncts(port_on_host) as postgresqlfuncts: + topic = "cars" + agg_type = "SUM" + agg_time_period = "2100ZULU" + expected_data = (1, "cars", "SUM", "2100ZULU") + + actual_id = postgresqlfuncts.insert_agg_topic( + topic, agg_type, agg_time_period + ) + + assert isinstance(actual_id, int) + assert get_data_in_table(port_on_host, AGG_TOPICS_TABLE)[0] == expected_data + + new_agg_topic_name = "boats" + expected_data = (1, "boats", "SUM", "2100ZULU") + + result = postgresqlfuncts.update_agg_topic(actual_id, new_agg_topic_name) + + assert result is True + assert get_data_in_table(port_on_host, AGG_TOPICS_TABLE)[0] == expected_data + + +@pytest.mark.postgresqlfuncts_timescaledb +def test_insert_agg_meta_should_return_true(get_container_func, ports_config): + get_container, image = get_container_func + + with get_container( + image, ports=ports_config["ports"], env=ENV_POSTGRESQL + ) as container: + port_on_host = ports_config["port_on_host"] + wait_for_connection(container, port_on_host) + create_all_tables(container) + + with get_postgresqlfuncts(port_on_host) as postgresqlfuncts: + topic_id = 42 + # metadata must be in the following convention because aggregation methods, i.e. get_agg_topics, rely on metadata having a key called "configured_topics" + metadata = {"configured_topics": "meaning of life"} + expected_data = (42, '{"configured_topics": "meaning of life"}') + + result = postgresqlfuncts.insert_agg_meta(topic_id, metadata) + + assert result is True + assert get_data_in_table(port_on_host, AGG_META_TABLE)[0] == expected_data + + +@pytest.mark.postgresqlfuncts_timescaledb +def test_get_topic_map_should_return_maps(get_container_func, ports_config): + get_container, image = get_container_func + + with get_container( + image, ports=ports_config["ports"], env=ENV_POSTGRESQL + ) as container: + port_on_host = ports_config["port_on_host"] + wait_for_connection(container, port_on_host) + create_all_tables(container) + + with get_postgresqlfuncts(port_on_host) as postgresqlfuncts: + query = """ + INSERT INTO topics (topic_name) + VALUES ('football'); + INSERT INTO topics (topic_name) + VALUES ('baseball'); + """ + seed_database(container, query) + expected = ( + {"baseball": 2, "football": 1}, + {"baseball": "baseball", "football": "football"}, + ) + + actual = postgresqlfuncts.get_topic_map() + + assert actual == expected + + +@pytest.mark.postgresqlfuncts_timescaledb +def test_get_agg_topics_should_return_list(get_container_func, ports_config): + get_container, image = get_container_func + + with get_container( + image, ports=ports_config["ports"], env=ENV_POSTGRESQL + ) as container: + port_on_host = ports_config["port_on_host"] + wait_for_connection(container, port_on_host) + create_all_tables(container) + + with get_postgresqlfuncts(port_on_host) as postgresqlfuncts: + topic = "some_agg_topic" + agg_type = "AVG" + agg_time_period = "2019" + topic_id = postgresqlfuncts.insert_agg_topic( + topic, agg_type, agg_time_period + ) + metadata = {"configured_topics": "meaning of life"} + postgresqlfuncts.insert_agg_meta(topic_id, metadata) + expected_list = [("some_agg_topic", "AVG", "2019", "meaning of life")] + + actual_list = postgresqlfuncts.get_agg_topics() + + assert actual_list == expected_list + + +@pytest.mark.postgresqlfuncts_timescaledb +def test_get_agg_topic_map_should_return_dict(get_container_func, ports_config): + get_container, image = get_container_func + + with get_container( + image, ports=ports_config["ports"], env=ENV_POSTGRESQL + ) as container: + port_on_host = ports_config["port_on_host"] + wait_for_connection(container, port_on_host) + create_all_tables(container) + + with get_postgresqlfuncts(port_on_host) as postgresqlfuncts: + query = f""" + INSERT INTO {AGG_TOPICS_TABLE} + (agg_topic_name, agg_type, agg_time_period) + VALUES ('topic_name', 'AVG', '2001'); + """ + seed_database(container, query) + expected = {("topic_name", "AVG", "2001"): 1} + + actual = postgresqlfuncts.get_agg_topic_map() + + assert actual == expected + + +@pytest.mark.postgresqlfuncts_timescaledb +def test_query_topics_by_pattern_should_return_matching_results( + get_container_func, ports_config +): + get_container, image = get_container_func + + with get_container( + image, ports=ports_config["ports"], env=ENV_POSTGRESQL + ) as container: + port_on_host = ports_config["port_on_host"] + wait_for_connection(container, port_on_host) + create_all_tables(container) + + with get_postgresqlfuncts(port_on_host) as postgresqlfuncts: + query = f""" + INSERT INTO {TOPICS_TABLE} (topic_name) + VALUES ('football'); + INSERT INTO {TOPICS_TABLE} (topic_name) + VALUES ('foobar'); + INSERT INTO {TOPICS_TABLE} (topic_name) + VALUES ('xyzzzzzzzz'); + """ + seed_database(container, query) + expected = {"football": 1, "foobar": 2} + topic_pattern = "foo" + + actual = postgresqlfuncts.query_topics_by_pattern(topic_pattern) + + assert actual == expected + + +@pytest.mark.postgresqlfuncts_timescaledb +def test_create_aggregate_store_should_succeed(get_container_func, ports_config): + get_container, image = get_container_func + + with get_container( + image, ports=ports_config["ports"], env=ENV_POSTGRESQL + ) as container: + port_on_host = ports_config["port_on_host"] + wait_for_connection(container, port_on_host) + create_all_tables(container) + + with get_postgresqlfuncts(port_on_host) as postgresqlfuncts: + agg_type = "AVG" + agg_time_period = "1984" + expected_aggregate_table = "AVG_1984" + expected_fields = {"value_string", "topics_list", "topic_id", "ts"} + + postgresqlfuncts.create_aggregate_store(agg_type, agg_time_period) + + assert expected_aggregate_table in get_tables(port_on_host) + assert ( + describe_table(port_on_host, expected_aggregate_table) + == expected_fields + ) + + +@pytest.mark.postgresqlfuncts_timescaledb +def test_insert_aggregate_stmt_should_succeed(get_container_func, ports_config): + get_container, image = get_container_func + + with get_container( + image, ports=ports_config["ports"], env=ENV_POSTGRESQL + ) as container: + port_on_host = ports_config["port_on_host"] + wait_for_connection(container, port_on_host) + create_all_tables(container) + + with get_postgresqlfuncts(port_on_host) as postgresqlfuncts: + # be aware that Postgresql will automatically fold unquoted names into lower case + # From : https://www.postgresql.org/docs/current/sql-syntax-lexical.html + # Quoting an identifier also makes it case-sensitive, whereas unquoted names are always folded to lower case. + # For example, the identifiers FOO, foo, and "foo" are considered the same by PostgreSQL, + # but "Foo" and "FOO" are different from these three and each other. + # (The folding of unquoted names to lower case in PostgreSQL is incompatible with the SQL standard, + # which says that unquoted names should be folded to upper case. + # Thus, foo should be equivalent to "FOO" not "foo" according to the standard. + # If you want to write portable applications you are advised to always quote a particular name or never quote it.) + query = """ + CREATE TABLE AVG_1776 ( + ts timestamp NOT NULL, + topic_id INTEGER NOT NULL, + value_string TEXT NOT NULL, + topics_list TEXT, + UNIQUE(ts, topic_id)); + CREATE INDEX IF NOT EXISTS idx_avg_1776 ON avg_1776 (ts ASC); + """ + seed_database(container, query) + + agg_topic_id = 42 + agg_type = "avg" + period = "1776" + ts = "2020-06-01 12:30:59" + data = "some_data" + topic_ids = [12, 54, 65] + expected_data = ( + datetime.datetime(2020, 6, 1, 12, 30, 59), + 42, + '"some_data"', + "[12, 54, 65]", + ) + + res = postgresqlfuncts.insert_aggregate( + agg_topic_id, agg_type, period, ts, data, topic_ids + ) + + assert res is True + assert get_data_in_table(port_on_host, "avg_1776")[0] == expected_data + + +@pytest.mark.postgresqlfuncts_timescaledb +def test_collect_aggregate_stmt_should_return_rows(get_container_func, ports_config): + get_container, image = get_container_func + + with get_container( + image, ports=ports_config["ports"], env=ENV_POSTGRESQL + ) as container: + port_on_host = ports_config["port_on_host"] + wait_for_connection(container, port_on_host) + create_all_tables(container) + + with get_postgresqlfuncts(port_on_host) as postgresqlfuncts: + query = f""" + INSERT INTO {DATA_TABLE} + VALUES ('2020-06-01 12:30:59', 42, '2'); + INSERT INTO {DATA_TABLE} + VALUES ('2020-06-01 12:31:59', 43, '8') + """ + seed_database(container, query) + + topic_ids = [42, 43] + agg_type = "avg" + expected_aggregate = (5.0, 2) + + actual_aggregate = postgresqlfuncts.collect_aggregate(topic_ids, agg_type) + + assert actual_aggregate == expected_aggregate + + +@pytest.mark.postgresqlfuncts_timescaledb +def test_collect_aggregate_stmt_should_raise_value_error( + get_container_func, ports_config +): + get_container, image = get_container_func + with get_container( + image, ports=ports_config["ports"], env=ENV_POSTGRESQL + ) as container: + port_on_host = ports_config["port_on_host"] + wait_for_connection(container, port_on_host) + + with pytest.raises(ValueError): + with get_postgresqlfuncts(port_on_host) as postgresqlfuncts: + postgresqlfuncts.collect_aggregate("dfdfadfdadf", "Invalid agg type") + + +@contextlib.contextmanager +def get_postgresqlfuncts(port): + connect_params = { + "dbname": TEST_DATABASE, + "user": ROOT_USER, + "password": ROOT_PASSWORD, + "host": "localhost", + "port": port, + "timescale_dialect": True, + } + + table_names = { + "data_table": DATA_TABLE, + "topics_table": TOPICS_TABLE, + "meta_table": META_TABLE, + "agg_topics_table": AGG_TOPICS_TABLE, + "agg_meta_table": AGG_META_TABLE, + } + + postgresqlfuncts = PostgreSqlFuncts(connect_params, table_names) + + yield postgresqlfuncts + + +@pytest.fixture(params=IMAGES) +def get_container_func(request): + return create_container, request.param + + +@pytest.fixture() +def ports_config(): + port_on_host = get_rand_port(ip="5432") + return {"port_on_host": port_on_host, "ports": {"5432/tcp": port_on_host}} + + +def create_all_tables(container): + create_historian_tables(container) + create_meta_data_table(container) + create_aggregate_historian_tables(container) + + +def create_historian_tables(container): + query = f""" + CREATE TABLE {DATA_TABLE} ( + ts TIMESTAMP NOT NULL, + topic_id INTEGER NOT NULL, + value_string TEXT NOT NULL, + UNIQUE (topic_id, ts)); + CREATE TABLE IF NOT EXISTS {TOPICS_TABLE} ( + topic_id SERIAL PRIMARY KEY NOT NULL, + topic_name VARCHAR(512) NOT NULL, + UNIQUE (topic_name)); + CREATE TABLE IF NOT EXISTS {META_TABLE} ( + topic_id INTEGER PRIMARY KEY NOT NULL, + metadata TEXT NOT NULL); + """ + + seed_database(container, query) + + +def create_meta_data_table(container): + query = f""" + CREATE TABLE {METADATA_TABLE} + (table_id VARCHAR(512) PRIMARY KEY NOT NULL, + table_name VARCHAR(512) NOT NULL); + INSERT INTO {METADATA_TABLE} VALUES ('data_table', '{DATA_TABLE}'); + INSERT INTO {METADATA_TABLE} VALUES ('topics_table', '{TOPICS_TABLE}'); + INSERT INTO {METADATA_TABLE} VALUES ('meta_table', '{META_TABLE}'); + """ + seed_database(container, query) + + +def create_aggregate_historian_tables(container): + query = f""" + CREATE TABLE IF NOT EXISTS {AGG_TOPICS_TABLE} ( + agg_topic_id SERIAL PRIMARY KEY NOT NULL, + agg_topic_name VARCHAR(512) NOT NULL, + agg_type VARCHAR(512) NOT NULL, + agg_time_period VARCHAR(512) NOT NULL, + UNIQUE (agg_topic_name, agg_type, agg_time_period)); + CREATE TABLE IF NOT EXISTS {AGG_META_TABLE} ( + agg_topic_id INTEGER PRIMARY KEY NOT NULL, + metadata TEXT NOT NULL); + """ + + seed_database(container, query) + + +def seed_database(container, query): + command = ( + f'psql --username="{ROOT_USER}" --dbname="{TEST_DATABASE}" --command="{query}"' + ) + r = container.exec_run(cmd=command, tty=True) + if r[0] == 1: + raise RuntimeError( + f"SQL query did not successfully complete on the container: \n {r}" + ) + return + + +def get_tables(port): + cnx, cursor = get_cnx_cursor(port) + # unlike MYSQL, Postgresql does not have a "SHOW TABLES" shortcut + # we have to create the query ourselves + query = SQL( + "SELECT table_name " + "FROM information_schema.tables " + "WHERE table_type = 'BASE TABLE' and " + "table_schema not in ('pg_catalog', 'information_schema', '_timescaledb_catalog', '_timescaledb_config', '_timescaledb_internal', '_timescaledb_cache')" + ) + results = execute_statement(cnx, cursor, query) + + return {t[0] for t in results} + + +def describe_table(port, table): + cnx, cursor = get_cnx_cursor(port) + query = SQL( + "SELECT column_name " "FROM information_schema.columns " "WHERE table_name = %s" + ) + + results = execute_statement(cnx, cursor, query, args=[table]) + + return {t[0] for t in results} + + +def get_data_in_table(port, table): + cnx, cursor = get_cnx_cursor(port) + query = SQL("SELECT * " "FROM {table_name}").format(table_name=Identifier(table)) + + results = execute_statement(cnx, cursor, query) + + return results + + +def execute_statement(cnx, cursor, query, args=None): + cursor.execute(query, vars=args) + + results = cursor.fetchall() + + cursor.close() + cnx.close() + + return results + + +def get_cnx_cursor(port): + connect_params = { + "database": TEST_DATABASE, + "user": ROOT_USER, + "password": ROOT_PASSWORD, + "host": "localhost", + "port": port, + } + + cnx = psycopg2.connect(**connect_params) + + return cnx, cnx.cursor() + + +def wait_for_connection(container, port): + start_time = time() + while time() - start_time < ALLOW_CONNECTION_TIME: + command = f"psql --user={ROOT_USER} --dbname={TEST_DATABASE} --port={port}" + response = container.exec_run(command, tty=True) + # https://www.postgresql.org/docs/10/app-psql.html#id-1.9.4.18.7 + # psql returns 0 to the shell if it finished normally, + # 1 if a fatal error of its own occurs (e.g. out of memory, file not found), + # 2 if the connection to the server went bad and the session was not interactive, + # and 3 if an error occurred in a script and the variable ON_ERROR_STOP was set. + exit_code = response[0] + + if exit_code == 0: + return + elif exit_code == 1: + raise RuntimeError(response) + elif exit_code == 2: + continue + elif exit_code == 3: + raise RuntimeError(response) + + # if we break out of the loop, we assume that connection has been verified given enough sleep time + return diff --git a/volttrontesting/platform/dbutils/test_postgresqlfuncts.py b/volttrontesting/platform/dbutils/test_postgresqlfuncts.py index b0aa9f4d72..bca1702f35 100644 --- a/volttrontesting/platform/dbutils/test_postgresqlfuncts.py +++ b/volttrontesting/platform/dbutils/test_postgresqlfuncts.py @@ -22,6 +22,8 @@ from volttrontesting.fixtures.docker_wrapper import create_container from volttrontesting.utils.utils import get_rand_port +pytestmark = [pytest.mark.postgresqlfuncts, pytest.mark.dbutils, pytest.mark.unit] + # Current documentation claims that we have tested Historian on Postgres 10 # See https://volttron.readthedocs.io/en/develop/core_services/historians/SQL-Historian.html#postgresql-and-redshift IMAGES = ["postgres:9.6.18", "postgres:10.13"] @@ -57,8 +59,6 @@ METADATA_TABLE = "metadata" -@pytest.mark.postgresqlfuncts -@pytest.mark.dbutils def test_setup_historian_tables_should_create_tables(get_container_func, ports_config): get_container, image = get_container_func with get_container( @@ -79,8 +79,6 @@ def test_setup_historian_tables_should_create_tables(get_container_func, ports_c assert actual_tables == expected_tables -@pytest.mark.postgresqlfuncts -@pytest.mark.dbutils def test_record_table_definitions_should_create_meta_table( get_container_func, ports_config ): @@ -108,10 +106,81 @@ def test_record_table_definitions_should_create_meta_table( assert describe_table(port_on_host, METADATA_TABLE) == expected_table_defs +def create_meta_data_table(container): + query = f""" + CREATE TABLE {METADATA_TABLE} + (table_id VARCHAR(512) PRIMARY KEY NOT NULL, + table_name VARCHAR(512) NOT NULL); + INSERT INTO {METADATA_TABLE} VALUES ('data_table', '{DATA_TABLE}'); + INSERT INTO {METADATA_TABLE} VALUES ('topics_table', '{TOPICS_TABLE}'); + INSERT INTO {METADATA_TABLE} VALUES ('meta_table', '{META_TABLE}'); + """ + seed_database(container, query) + + return + + +def create_empty_meta_data_table(container): + query = f""" + CREATE TABLE {METADATA_TABLE} + (table_id VARCHAR(512) PRIMARY KEY NOT NULL, + table_name VARCHAR(512) NOT NULL); + """ + seed_database(container, query) + + return + + +def create_incorrect_meta_data_table(container): + query = f""" + CREATE TABLE {METADATA_TABLE} + (table_id VARCHAR(512) PRIMARY KEY NOT NULL, + table_name VARCHAR(512) NOT NULL); + INSERT INTO {METADATA_TABLE} VALUES ('data_tableFOOOBAR', '{DATA_TABLE}'); + INSERT INTO {METADATA_TABLE} VALUES ('topifdkjadslkfcs_table', '{TOPICS_TABLE}'); + INSERT INTO {METADATA_TABLE} VALUES ('3333gjhmeta_table', '{META_TABLE}'); + """ + seed_database(container, query) + + return + + +@pytest.mark.parametrize( + "seed_meta_data_table, expected_tables", + [ + ( + create_meta_data_table, + { + "data_table": "data", + "topics_table": "topics", + "meta_table": "meta", + "agg_topics_table": "aggregate_topics", + "agg_meta_table": "aggregate_meta", + }, + ), + ( + create_empty_meta_data_table, + { + "agg_topics_table": "aggregate_topics", + "agg_meta_table": "aggregate_meta", + }, + ), + ( + create_incorrect_meta_data_table, + { + "3333gjhmeta_table": "meta", + "agg_meta_table": "aggregate_meta", + "agg_topics_table": "aggregate_topics", + "data_tableFOOOBAR": "data", + "topifdkjadslkfcs_table": "topics", + }, + ), + ], +) @pytest.mark.postgresqlfuncts @pytest.mark.dbutils def test_read_tablenames_from_db_should_return_table_names( - get_container_func, ports_config + get_container_func, ports_config, seed_meta_data_table, expected_tables ): get_container, image = get_container_func with get_container( @@ -119,26 +188,26 @@ def test_read_tablenames_from_db_should_return_table_names( ) as container: port_on_host = ports_config["port_on_host"] wait_for_connection(container, port_on_host) - create_meta_data_table(container) + seed_meta_data_table(container) with get_postgresqlfuncts(port_on_host) as postgresqlfuncts: - expected_tables = { - "data_table": "data", - "topics_table": "topics", - "meta_table": "meta", - "agg_topics_table": "aggregate_topics", - "agg_meta_table": "aggregate_meta", - } - actual_tables = postgresqlfuncts.read_tablenames_from_db(METADATA_TABLE) assert actual_tables == expected_tables +@pytest.mark.parametrize( + "seed_meta_data_table", + [ + (create_meta_data_table), + (create_empty_meta_data_table), + (create_incorrect_meta_data_table), + ], +) @pytest.mark.postgresqlfuncts @pytest.mark.dbutils def test_setup_aggregate_historian_tables_should_create_aggregate_tables( - get_container_func, ports_config + get_container_func, ports_config, seed_meta_data_table ): get_container, image = get_container_func @@ -156,7 +225,7 @@ def test_setup_aggregate_historian_tables_should_create_aggregate_tables( assert agg_topic_table not in original_tables assert agg_meta_table not in original_tables - create_meta_data_table(container) + seed_meta_data_table(container) expected_agg_topic_fields = { "agg_topic_id", "agg_topic_name", @@ -177,6 +246,11 @@ def test_setup_aggregate_historian_tables_should_create_aggregate_tables( assert ( describe_table(port_on_host, agg_meta_table) == expected_agg_meta_fields ) + assert postgresqlfuncts.agg_topics_table == agg_topic_table + assert postgresqlfuncts.agg_meta_table == agg_meta_table + assert postgresqlfuncts.data_table == DATA_TABLE + assert postgresqlfuncts.topics_table == TOPICS_TABLE + assert postgresqlfuncts.meta_table == META_TABLE @pytest.mark.parametrize( @@ -190,8 +264,6 @@ def test_setup_aggregate_historian_tables_should_create_aggregate_tables( ), ], ) -@pytest.mark.postgresqlfuncts -@pytest.mark.dbutils def test_query_should_return_data( get_container_func, ports_config, topic_ids, id_name_map, expected_values ): @@ -214,8 +286,6 @@ def test_query_should_return_data( assert actual_values == expected_values -@pytest.mark.postgresqlfuncts -@pytest.mark.dbutils def test_insert_topic_should_return_topic_id(get_container_func, ports_config): get_container, image = get_container_func @@ -236,8 +306,6 @@ def test_insert_topic_should_return_topic_id(get_container_func, ports_config): assert actual_topic_id == expected_topic_id -@pytest.mark.postgresqlfuncts -@pytest.mark.dbutils def test_insert_agg_topic_should_return_agg_topic_id(get_container_func, ports_config): get_container, image = get_container_func @@ -263,8 +331,6 @@ def test_insert_agg_topic_should_return_agg_topic_id(get_container_func, ports_c assert get_data_in_table(port_on_host, AGG_TOPICS_TABLE)[0] == expected_data -@pytest.mark.postgresqlfuncts -@pytest.mark.dbutils def test_insert_meta_should_return_true(get_container_func, ports_config): get_container, image = get_container_func @@ -286,8 +352,6 @@ def test_insert_meta_should_return_true(get_container_func, ports_config): assert get_data_in_table(port_on_host, "meta")[0] == expected_data -@pytest.mark.postgresqlfuncts -@pytest.mark.dbutils def test_insert_data_should_return_true(get_container_func, ports_config): get_container, image = get_container_func @@ -310,8 +374,6 @@ def test_insert_data_should_return_true(get_container_func, ports_config): assert get_data_in_table(port_on_host, "data") == expected_data -@pytest.mark.postgresqlfuncts -@pytest.mark.dbutils def test_update_topic_should_return_true(get_container_func, ports_config): get_container, image = get_container_func @@ -335,8 +397,6 @@ def test_update_topic_should_return_true(get_container_func, ports_config): assert (actual_id, "soccer") == get_data_in_table(port_on_host, "topics")[0] -@pytest.mark.postgresqlfuncts -@pytest.mark.dbutils def test_get_aggregation_list_should_return_list(get_container_func, ports_config): get_container, image = get_container_func @@ -369,8 +429,6 @@ def test_get_aggregation_list_should_return_list(get_container_func, ports_confi assert postgresqlfuncts.get_aggregation_list() == expected_list -@pytest.mark.postgresqlfuncts -@pytest.mark.dbutils def test_insert_agg_topic_should_return_true(get_container_func, ports_config): get_container, image = get_container_func @@ -395,8 +453,6 @@ def test_insert_agg_topic_should_return_true(get_container_func, ports_config): assert get_data_in_table(port_on_host, AGG_TOPICS_TABLE)[0] == expected_data -@pytest.mark.postgresqlfuncts -@pytest.mark.dbutils def test_update_agg_topic_should_return_true(get_container_func, ports_config): get_container, image = get_container_func @@ -429,8 +485,6 @@ def test_update_agg_topic_should_return_true(get_container_func, ports_config): assert get_data_in_table(port_on_host, AGG_TOPICS_TABLE)[0] == expected_data -@pytest.mark.postgresqlfuncts -@pytest.mark.dbutils def test_insert_agg_meta_should_return_true(get_container_func, ports_config): get_container, image = get_container_func @@ -453,8 +507,6 @@ def test_insert_agg_meta_should_return_true(get_container_func, ports_config): assert get_data_in_table(port_on_host, AGG_META_TABLE)[0] == expected_data -@pytest.mark.postgresqlfuncts -@pytest.mark.dbutils def test_get_topic_map_should_return_maps(get_container_func, ports_config): get_container, image = get_container_func @@ -483,8 +535,6 @@ def test_get_topic_map_should_return_maps(get_container_func, ports_config): assert actual == expected -@pytest.mark.postgresqlfuncts -@pytest.mark.dbutils def test_get_agg_topics_should_return_list(get_container_func, ports_config): get_container, image = get_container_func @@ -511,8 +561,6 @@ def test_get_agg_topics_should_return_list(get_container_func, ports_config): assert actual_list == expected_list -@pytest.mark.postgresqlfuncts -@pytest.mark.dbutils def test_get_agg_topic_map_should_return_dict(get_container_func, ports_config): get_container, image = get_container_func @@ -537,10 +585,32 @@ def test_get_agg_topic_map_should_return_dict(get_container_func, ports_config): assert actual == expected -@pytest.mark.postgresqlfuncts -@pytest.mark.dbutils +@pytest.mark.parametrize( + "topic_1, topic_2, topic_3, topic_pattern, expected_result", + [ + ("'football'", "'foobar'", "'xzxzxccx'", "foo", {"football": 1, "foobar": 2}), + ("'football'", "'foobar'", "'xzxzxccx'", "ba", {"football": 1, "foobar": 2}), + ("'football'", "'foobar'", "'xzxzxccx'", "ccx", {"xzxzxccx": 3}), + ("'fotball'", "'foobar'", "'xzxzxccx'", "foo", {"foobar": 2}), + ("'football'", "'foooobar'", "'xzxzxccx'", "foooo", {"foooobar": 2}), + ( + "'FOOtball'", + "'ABCFOOoXYZ'", + "'XXXfOoOo'", + "foo", + {"FOOtball": 1, "ABCFOOoXYZ": 2, "XXXfOoOo": 3}, + ), + ], +) + def test_query_topics_by_pattern_should_return_matching_results( - get_container_func, ports_config + get_container_func, + ports_config, + topic_1, + topic_2, + topic_3, + topic_pattern, + expected_result, ): get_container, image = get_container_func @@ -550,27 +620,22 @@ def test_query_topics_by_pattern_should_return_matching_results( port_on_host = ports_config["port_on_host"] wait_for_connection(container, port_on_host) create_all_tables(container) - with get_postgresqlfuncts(port_on_host) as postgresqlfuncts: query = f""" INSERT INTO {TOPICS_TABLE} (topic_name) - VALUES ('football'); + VALUES ({topic_1}); INSERT INTO {TOPICS_TABLE} (topic_name) - VALUES ('foobar'); + VALUES ({topic_2}); INSERT INTO {TOPICS_TABLE} (topic_name) - VALUES ('xyzzzzzzzz'); + VALUES ({topic_3}); """ seed_database(container, query) - expected = {"football": 1, "foobar": 2} - topic_pattern = "foo" - actual = postgresqlfuncts.query_topics_by_pattern(topic_pattern) + actual_result = postgresqlfuncts.query_topics_by_pattern(topic_pattern) - assert actual == expected + assert actual_result == expected_result -@pytest.mark.postgresqlfuncts -@pytest.mark.dbutils def test_create_aggregate_store_should_succeed(get_container_func, ports_config): get_container, image = get_container_func @@ -596,8 +661,6 @@ def test_create_aggregate_store_should_succeed(get_container_func, ports_config) ) -@pytest.mark.postgresqlfuncts -@pytest.mark.dbutils def test_insert_aggregate_stmt_should_succeed(get_container_func, ports_config): get_container, image = get_container_func @@ -650,8 +713,6 @@ def test_insert_aggregate_stmt_should_succeed(get_container_func, ports_config): assert get_data_in_table(port_on_host, "avg_1776")[0] == expected_data -@pytest.mark.postgresqlfuncts -@pytest.mark.dbutils def test_collect_aggregate_stmt_should_return_rows(get_container_func, ports_config): get_container, image = get_container_func @@ -680,8 +741,6 @@ def test_collect_aggregate_stmt_should_return_rows(get_container_func, ports_con assert actual_aggregate == expected_aggregate -@pytest.mark.postgresqlfuncts -@pytest.mark.dbutils def test_collect_aggregate_stmt_should_raise_value_error( get_container_func, ports_config ): @@ -758,20 +817,6 @@ def create_historian_tables(container): return -def create_meta_data_table(container): - query = f""" - CREATE TABLE {METADATA_TABLE} - (table_id VARCHAR(512) PRIMARY KEY NOT NULL, - table_name VARCHAR(512) NOT NULL); - INSERT INTO {METADATA_TABLE} VALUES ('data_table', '{DATA_TABLE}'); - INSERT INTO {METADATA_TABLE} VALUES ('topics_table', '{TOPICS_TABLE}'); - INSERT INTO {METADATA_TABLE} VALUES ('meta_table', '{META_TABLE}'); - """ - seed_database(container, query) - - return - - def create_aggregate_historian_tables(container): query = f""" CREATE TABLE IF NOT EXISTS {AGG_TOPICS_TABLE} ( diff --git a/volttrontesting/platform/dbutils/test_sqlitefuncts.py b/volttrontesting/platform/dbutils/test_sqlitefuncts.py new file mode 100644 index 0000000000..6bcffe8370 --- /dev/null +++ b/volttrontesting/platform/dbutils/test_sqlitefuncts.py @@ -0,0 +1,455 @@ +from gevent import os, subprocess +import pytest + +from setuptools import glob + +from volttron.platform.dbutils.sqlitefuncts import SqlLiteFuncts + + +TOPICS_TABLE = "topics" +DATA_TABLE = "data" +META_TABLE = "meta" +METAMETA_TABLE = "metameta" +AGG_TOPICS_TABLE = "aggregate_topics" +AGG_META_TABLE = "aggregate_meta" +TABLE_PREFIX = "" + + +@pytest.mark.sqlitefuncts +@pytest.mark.dbutils +def test_setup_historian_tables(sqlitefuncts_db_not_initialized): + expected_tables = {"data", "meta", "topics"} + + sqlitefuncts_db_not_initialized.setup_historian_tables() + + actual_tables = get_tables() + + assert actual_tables == expected_tables + + +@pytest.mark.sqlitefuncts +@pytest.mark.dbutils +def test_record_table_definitions(sqlitefuncts_db_not_initialized): + table_defs = { + "table_prefix": "prefixtab", + "data_table": "data", + "topics_table": "topics", + "meta_table": "meta", + } + meta_table_name = "metameta" + init_historian_tables(sqlitefuncts_db_not_initialized) + expected_tables = {"data", "meta", "metameta", "topics"} + + sqlitefuncts_db_not_initialized.record_table_definitions( + table_defs, meta_table_name + ) + + actual_tables = get_tables() + assert actual_tables == expected_tables + + +@pytest.mark.sqlitefuncts +@pytest.mark.dbutils +def test_setup_aggregate_historian_tables(sqlitefuncts): + meta_table_name = "metameta" + expected_tables = { + "data", + "aggregate_meta", + "meta", + "aggregate_topics", + "topics", + "metameta", + } + + sqlitefuncts.setup_aggregate_historian_tables(meta_table_name) + + actual_tables = get_tables() + assert actual_tables == expected_tables + + +@pytest.mark.sqlitefuncts +@pytest.mark.dbutils +@pytest.mark.parametrize( + "topic_ids, id_name_map, expected_values", + [ + ([42], {42: "topic42"}, {"topic42": []}), + ([43], {43: "topic43"}, {"topic43": [("2020-06-01T12:30:59.000000", [2, 3])]}), + ], +) +def test_query(sqlitefuncts, topic_ids, id_name_map, expected_values): + init_database(sqlitefuncts) + query = """INSERT OR REPLACE INTO data VALUES('2020-06-01 12:30:59',43,'[2,3]')""" + query_db(query) + + actual_results = sqlitefuncts.query(topic_ids, id_name_map) + + assert actual_results == expected_values + + +@pytest.mark.sqlitefuncts +@pytest.mark.dbutils +@pytest.mark.parametrize( + "history_limit_timestamp, storage_limit_gb, expected_data", + [ + ("2020-06-01 12:30:59", None, []), + (None, 10, ["2000-06-01 12:30:59|43|[2,3]", "2000-06-01 12:30:58|42|[2,3]"]), + ("2020-06-01 12:30:59", 10, []), + ], +) +def test_manage_db_size( + sqlitefuncts, history_limit_timestamp, storage_limit_gb, expected_data +): + query = ( + "INSERT OR REPLACE INTO data VALUES('2000-06-01 12:30:59',43,'[2,3]'); " + "INSERT OR REPLACE INTO data VALUES('2000-06-01 12:30:58',42,'[2,3]')" + ) + + query_db(query) + data_before_resize = [ + "2000-06-01 12:30:59|43|[2,3]", + "2000-06-01 12:30:58|42|[2,3]", + ] + assert get_all_data(DATA_TABLE) == data_before_resize + + sqlitefuncts.manage_db_size(history_limit_timestamp, storage_limit_gb) + + assert get_all_data(DATA_TABLE) == expected_data + + +@pytest.mark.sqlitefuncts +@pytest.mark.dbutils +def test_insert_meta(sqlitefuncts): + assert get_all_data(META_TABLE) == [] + + topic_id = "44" + metadata = "foobar44" + expected_data = ['44|"foobar44"'] + + res = sqlitefuncts.insert_meta(topic_id, metadata) + sqlitefuncts.commit() + + assert res is True + assert get_all_data(META_TABLE) == expected_data + + +@pytest.mark.sqlitefuncts +@pytest.mark.dbutils +def test_insert_data(sqlitefuncts): + assert get_all_data(DATA_TABLE) == [] + + ts = "2001-09-11 08:46:00" + topic_id = "11" + data = "1wtc" + expected_data = ['2001-09-11 08:46:00|11|"1wtc"'] + + res = sqlitefuncts.insert_data(ts, topic_id, data) + sqlitefuncts.commit() + + assert res is True + assert get_all_data(DATA_TABLE) == expected_data + + +@pytest.mark.sqlitefuncts +@pytest.mark.dbutils +def test_insert_topic(sqlitefuncts): + assert get_all_data(TOPICS_TABLE) == [] + + topic = "football" + expected_data = ["1|football"] + + res = sqlitefuncts.insert_topic(topic) + sqlitefuncts.commit() + + assert res == 1 + assert get_all_data(TOPICS_TABLE) == expected_data + + +@pytest.mark.sqlitefuncts +@pytest.mark.dbutils +def test_update_topic(sqlitefuncts): + query = "INSERT INTO topics (topic_name) values ('football')" + query_db(query) + + assert get_all_data(TOPICS_TABLE) == ["1|football"] + + res = sqlitefuncts.update_topic("basketball", 1) + sqlitefuncts.commit() + + assert res is True + assert get_all_data("topics") == ["1|basketball"] + + +@pytest.mark.sqlitefuncts +@pytest.mark.dbutils +def test_get_aggregation_list(sqlitefuncts): + assert sqlitefuncts.get_aggregation_list() == [ + "AVG", + "MIN", + "MAX", + "COUNT", + "SUM", + "TOTAL", + "GROUP_CONCAT", + ] + + +@pytest.mark.sqlitefuncts +@pytest.mark.dbutils +def test_insert_agg_topic(sqlitefuncts): + assert get_all_data(AGG_TOPICS_TABLE) == [] + + topic = "agg_topics" + agg_type = "AVG" + agg_time_period = "2019" + expected_data = ["1|agg_topics|AVG|2019"] + + sqlitefuncts.insert_agg_topic(topic, agg_type, agg_time_period) + sqlitefuncts.commit() + + assert get_all_data(AGG_TOPICS_TABLE) == expected_data + + +@pytest.mark.sqlitefuncts +@pytest.mark.dbutils +def test_update_agg_topic(sqlitefuncts): + query = "INSERT INTO aggregate_topics (agg_topic_name, agg_type, agg_time_period) values ('cars', 'SUM', '2100ZULU')" + query_db(query) + + assert get_all_data(AGG_TOPICS_TABLE) == ["1|cars|SUM|2100ZULU"] + + new_agg_topic_name = "boats" + expected_data = ["1|cars|SUM|2100ZULU"] + + res = sqlitefuncts.update_agg_topic(1, new_agg_topic_name) + + assert res is True + assert get_all_data(AGG_TOPICS_TABLE) == expected_data + + +@pytest.mark.sqlitefuncts +@pytest.mark.dbutils +def test_insert_agg_meta(sqlitefuncts): + assert get_all_data(AGG_META_TABLE) == [] + + topic_id = 42 + metadata = "meaning of life" + expected_data = ['42|"meaning of life"'] + res = sqlitefuncts.insert_agg_meta(topic_id, metadata) + sqlitefuncts.commit() + + assert res is True + assert get_all_data(AGG_META_TABLE) == expected_data + + +@pytest.mark.sqlitefuncts +@pytest.mark.dbutils +def test_get_topic_map(sqlitefuncts): + query = "INSERT INTO topics (topic_name) values ('football');INSERT INTO topics (topic_name) values ('netball');" + query_db(query) + expected_topic_map = ( + {"football": 1, "netball": 2}, + {"football": "football", "netball": "netball"}, + ) + + assert get_all_data(TOPICS_TABLE) == ["1|football", "2|netball"] + + actual_topic_map = sqlitefuncts.get_topic_map() + + assert actual_topic_map == expected_topic_map + + +@pytest.mark.sqlitefuncts +@pytest.mark.dbutils +def test_get_agg_topics(sqlitefuncts): + query = ( + "INSERT INTO aggregate_topics (agg_topic_name, agg_type, agg_time_period ) " + "values('topic_name', 'AVG', '2001');" + ) + query_db(query) + sqlitefuncts.insert_agg_meta(1, {"configured_topics": "great books"}) + sqlitefuncts.commit() + expected_topics = [("topic_name", "AVG", "2001", "great books")] + + actual_topics = sqlitefuncts.get_agg_topics() + + assert actual_topics == expected_topics + + +@pytest.mark.sqlitefuncts +@pytest.mark.dbutils +def test_agg_topics_should_return_empty_on_nonexistent_table( + sqlitefuncts_db_not_initialized, +): + init_historian_tables(sqlitefuncts_db_not_initialized) + + actual_topic_map = sqlitefuncts_db_not_initialized.get_agg_topics() + + assert actual_topic_map == [] + + +@pytest.mark.sqlitefuncts +@pytest.mark.dbutils +def test_get_agg_topic_map(sqlitefuncts): + query = ( + "INSERT INTO aggregate_topics (agg_topic_name, agg_type, agg_time_period ) " + "values('topic_name', 'AVG', '2001');" + ) + query_db(query) + expected_acutal_topic_map = {("topic_name", "AVG", "2001"): 1} + + actual_topic_map = sqlitefuncts.get_agg_topic_map() + + assert actual_topic_map == expected_acutal_topic_map + + +@pytest.mark.sqlitefuncts +@pytest.mark.dbutils +def test_agg_topic_map_should_return_empty_on_nonexistent_table( + sqlitefuncts_db_not_initialized, +): + init_historian_tables(sqlitefuncts_db_not_initialized) + + actual_topic_map = sqlitefuncts_db_not_initialized.get_agg_topic_map() + + assert actual_topic_map == {} + + +@pytest.mark.sqlitefuncts +@pytest.mark.dbutils +@pytest.mark.parametrize( + "topic_1, topic_2, topic_3, topic_pattern, expected_topics", + [ + ("'football'", "'foobar'", "'xzxzxccx'", "foo", {"football": 1, "foobar": 2}), + ("'football'", "'foobar'", "'xzxzxccx'", "ba", {"football": 1, "foobar": 2}), + ("'football'", "'foobar'", "'xzxzxccx'", "ccx", {"xzxzxccx": 3}), + ("'fotball'", "'foobar'", "'xzxzxccx'", "foo", {"foobar": 2}), + ("'football'", "'foooobar'", "'xzxzxccx'", "foooo", {"foooobar": 2}), + ( + "'FOOtball'", + "'ABCFOOoXYZ'", + "'XXXfOoOo'", + "foo", + {"FOOtball": 1, "ABCFOOoXYZ": 2, "XXXfOoOo": 3}, + ), + ], +) +def test_query_topics_by_pattern( + sqlitefuncts, topic_1, topic_2, topic_3, topic_pattern, expected_topics +): + query = ( + f"INSERT INTO topics (topic_name) values ({topic_1});" + f"INSERT INTO topics (topic_name) values ({topic_2});" + f"INSERT INTO topics (topic_name) values ({topic_3});" + ) + query_db(query) + + actual_topics = sqlitefuncts.query_topics_by_pattern(topic_pattern) + + assert actual_topics == expected_topics + + +@pytest.mark.sqlitefuncts +@pytest.mark.dbutils +def test_create_aggregate_store(sqlitefuncts): + agg_type = "AVG" + agg_time_period = "1984" + expected_new_agg_table = "AVG_1984" + expected_indexes = ["0|idx_AVG_1984|0|c|0", "1|sqlite_autoindex_AVG_1984_1|1|u|0"] + + result = sqlitefuncts.create_aggregate_store(agg_type, agg_time_period) + + assert result is True + assert expected_new_agg_table in get_tables() + + actual_indexes = get_indexes(expected_new_agg_table) + assert actual_indexes == expected_indexes + + +@pytest.mark.sqlitefuncts +@pytest.mark.dbutils +def test_collect_aggregate(sqlitefuncts): + query = ( + "INSERT OR REPLACE INTO data values('2020-06-01 12:30:59', 42, '2');" + "INSERT OR REPLACE INTO data values('2020-06-01 12:31:59', 43, '8');" + ) + query_db(query) + + topic_ids = [42, 43] + agg_type = "avg" + expected_aggregate = (5.0, 2) + + actual_aggregate = sqlitefuncts.collect_aggregate(topic_ids, agg_type) + + assert actual_aggregate == expected_aggregate + + +def get_indexes(table): + res = query_db(f"""PRAGMA index_list({table})""") + return res.splitlines() + + +def get_tables(): + result = query_db(""".tables""") + res = set(result.replace("\n", "").split()) + return res + + +def get_all_data(table): + q = f"""SELECT * FROM {table}""" + res = query_db(q) + return res.splitlines() + + +def query_db(query): + output = subprocess.run( + ["sqlite3", "data/historian.sqlite", query], text=True, capture_output=True + ) + # check_returncode() will raise a CalledProcessError if the query fails + # see https://docs.python.org/3/library/subprocess.html#subprocess.CompletedProcess.returncode + output.check_returncode() + + return output.stdout + + +@pytest.fixture() +def sqlitefuncts_db_not_initialized(): + connect_params = {"database": "data/historian.sqlite"} + table_names = { + "data_table": DATA_TABLE, + "topics_table": TOPICS_TABLE, + "meta_table": META_TABLE, + "agg_topics_table": AGG_TOPICS_TABLE, + "agg_meta_table": AGG_META_TABLE, + } + client = SqlLiteFuncts(connect_params, table_names) + yield client + + # Teardown + if os.path.isdir("./data"): + files = glob.glob("./data/*", recursive=True) + for f in files: + os.remove(f) + os.rmdir("./data/") + + +@pytest.fixture() +def sqlitefuncts(sqlitefuncts_db_not_initialized): + init_database(sqlitefuncts_db_not_initialized) + yield sqlitefuncts_db_not_initialized + + +def init_database(sqlitefuncts_client): + sqlitefuncts_client.setup_historian_tables() + table_defs = { + "table_prefix": TABLE_PREFIX, + "data_table": DATA_TABLE, + "topics_table": TOPICS_TABLE, + "meta_table": META_TABLE, + } + meta_table_name = METAMETA_TABLE + sqlitefuncts_client.record_table_definitions(table_defs, meta_table_name) + sqlitefuncts_client.setup_aggregate_historian_tables(meta_table_name) + + +def init_historian_tables(sqlitefuncts_client): + sqlitefuncts_client.setup_historian_tables() diff --git a/volttrontesting/platform/security/test_aip_security.py b/volttrontesting/platform/security/test_aip_security.py index 01a124bab2..1fff10b882 100644 --- a/volttrontesting/platform/security/test_aip_security.py +++ b/volttrontesting/platform/security/test_aip_security.py @@ -1,19 +1,15 @@ import pwd -from datetime import datetime - import gevent import pytest + from mock import MagicMock -from volttron.platform import get_home, is_rabbitmq_available -from volttron.platform import get_services_core, get_examples -from volttron.platform.agent import utils +from volttron.platform import is_rabbitmq_available +from volttron.platform import get_services_core from volttron.platform.agent.utils import execute_command -from volttron.platform.messaging import headers as headers_mod from volttron.platform.vip.agent import * -from volttrontesting.fixtures.volttron_platform_fixtures import \ - build_wrapper, cleanup_wrapper, volttron_multi_messagebus -from volttrontesting.utils.utils import get_hostname_and_random_port, get_rand_vip, get_rand_ip_and_port +from volttrontesting.fixtures.volttron_platform_fixtures import build_wrapper, cleanup_wrapper +from volttrontesting.utils.utils import get_rand_vip HAS_RMQ = is_rabbitmq_available() @@ -31,12 +27,13 @@ INSTANCE_NAME2 = "volttron2" -def get_agent_user_from_dir(agent_name, agent_uuid): +def get_agent_user_from_dir(agent_uuid, home): """ + :param home: path to volttron home :param agent_uuid: :return: Unix user ID if installed Volttron agent """ - user_id_path = os.path.join(get_home(), "agents", agent_uuid, "USER_ID") + user_id_path = os.path.join(home, "agents", agent_uuid, "USER_ID") with open(user_id_path, 'r') as id_file: return id_file.readline() @@ -89,7 +86,7 @@ def stop_agent(): def security_agent(request, secure_volttron_instance): agent = secure_volttron_instance.install_agent( vip_identity="security_agent", - agent_dir="volttrontesting/platform/security/SecurityAgent", + agent_dir=f"{secure_volttron_instance.volttron_root}/volttrontesting/platform/security/SecurityAgent", start=False, config_file=None) @@ -98,8 +95,7 @@ def security_agent(request, secure_volttron_instance): assert secure_volttron_instance.is_agent_running(agent) users = [user[0] for user in pwd.getpwall()] - # TODO find an alternative for the agent name here - agent_user = get_agent_user_from_dir("securityagent-0.1", agent) + agent_user = get_agent_user_from_dir(agent, secure_volttron_instance.volttron_home) assert agent_user in users def stop_agent(): @@ -170,7 +166,7 @@ def test_agent_rpc(secure_volttron_instance, security_agent, query_agent): try: agent2 = secure_volttron_instance.install_agent( vip_identity="security_agent2", - agent_dir="volttrontesting/platform/security/SecurityAgent", + agent_dir=f"{secure_volttron_instance.volttron_root}/volttrontesting/platform/security/SecurityAgent", start=False, config_file=None) @@ -287,7 +283,7 @@ def test_vhome_file_permissions(secure_volttron_instance, security_agent, query_ try: agent2 = secure_volttron_instance.install_agent( vip_identity="security_agent2", - agent_dir="volttrontesting/platform/security/SecurityAgent", + agent_dir=f"{secure_volttron_instance.volttron_root}/volttrontesting/platform/security/SecurityAgent", start=False, config_file=None) @@ -330,7 +326,7 @@ def test_config_store_access(secure_volttron_instance, security_agent, query_age try: agent2 = secure_volttron_instance.install_agent( vip_identity="security_agent2", - agent_dir="volttrontesting/platform/security/SecurityAgent", + agent_dir=f"{secure_volttron_instance.volttron_root}/volttrontesting/platform/security/SecurityAgent", start=False, config_file=None) diff --git a/volttrontesting/platform/test_instance_setup.py b/volttrontesting/platform/test_instance_setup.py index 456d79b4a5..1f1ca1af86 100644 --- a/volttrontesting/platform/test_instance_setup.py +++ b/volttrontesting/platform/test_instance_setup.py @@ -70,6 +70,8 @@ def test_should_not_remove_config_vhome_when_debugging(monkeypatch): assert os.path.isdir(vhome) assert os.path.isdir(vhome) shutil.rmtree(vhome, ignore_errors=True) + assert not os.path.isdir(vhome) + def test_zmq_case_no_agents(monkeypatch): with create_vcfg_vhome() as vhome: @@ -79,6 +81,7 @@ def test_zmq_case_no_agents(monkeypatch): message_bus = "zmq" vip_address = "tcp://127.0.0.15" vip_port = "22916" + instance_name = "test_zmq" is_web_enabled = "N" is_vcp = "N" install_historian = "N" @@ -88,6 +91,7 @@ def test_zmq_case_no_agents(monkeypatch): vcfg_args = "\n".join([message_bus, vip_address, vip_port, + instance_name, is_web_enabled, is_vcp, install_historian, @@ -103,17 +107,17 @@ def test_zmq_case_no_agents(monkeypatch): text=True ) as vcfg: out, err = vcfg.communicate(vcfg_args) - # print("CWD is: {}".format(os.getcwd())) - # print("OUT is: {}".format(out)) - # print("ERROR is: {}".format(err)) + #print("CWD is: {}".format(os.getcwd())) + #print("OUT is: {}".format(out)) + #print("ERROR is: {}".format(err)) assert os.path.exists(config_path) config = ConfigParser() config.read(config_path) assert config.get('volttron', 'message-bus') == "zmq" assert config.get('volttron', 'vip-address') == "tcp://127.0.0.15:22916" - assert config.get('volttron', 'instance-name') == "volttron1" + assert config.get('volttron', 'instance-name').strip('"') == "test_zmq" assert not _is_agent_installed("listener") - assert not _is_agent_installed("master_driver") + assert not _is_agent_installed("platform_driver") assert not _is_agent_installed("platform_historian") assert not _is_agent_installed("vc ") assert not _is_agent_installed("vcp") @@ -142,9 +146,9 @@ def test_zmq_case_with_agents(monkeypatch): vcfg_args = "\n".join([message_bus, vip_address, vip_port, + instance_name, is_web_enabled, is_vcp, - instance_name, vc_hostname, vc_port, agent_autostart, @@ -171,9 +175,9 @@ def test_zmq_case_with_agents(monkeypatch): config.read(config_path) assert config.get('volttron', 'message-bus') == "zmq" assert config.get('volttron', 'vip-address') == "tcp://127.0.0.15:22916" - assert config.get('volttron', 'instance-name') == "test_zmq" + assert config.get('volttron', 'instance-name').strip('"') == "test_zmq" assert _is_agent_installed("listener") - assert _is_agent_installed("master_driver") + assert _is_agent_installed("platform_driver") assert _is_agent_installed("platform_historian") assert _is_agent_installed("vcp") assert not _is_agent_installed("vc ") @@ -189,6 +193,7 @@ def test_zmq_case_web_no_agents(monkeypatch): message_bus = "zmq" vip_address = "tcp://127.0.0.15" vip_port = "22916" + instance_name = "test_zmq" is_web_enabled = "Y" web_protocol = "https" web_port = "8443" @@ -208,6 +213,7 @@ def test_zmq_case_web_no_agents(monkeypatch): vcfg_args = "\n".join([message_bus, vip_address, vip_port, + instance_name, is_web_enabled, web_protocol, web_port, @@ -239,12 +245,12 @@ def test_zmq_case_web_no_agents(monkeypatch): config.read(config_path) assert config.get('volttron', 'message-bus') == "zmq" assert config.get('volttron', 'vip-address') == "tcp://127.0.0.15:22916" - assert config.get('volttron', 'instance-name') == "volttron1" + assert config.get('volttron', 'instance-name').strip('"') == "test_zmq" assert config.get('volttron', 'bind-web-address') == "{}{}{}".format("https://", get_hostname().lower(), ":8443") - assert config.get('volttron', 'web-ssl-cert') == os.path.join(vhome, "certificates", "certs", "master_web-server.crt") - assert config.get('volttron', 'web-ssl-key') == os.path.join(vhome, "certificates", "private", "master_web-server.pem") + assert config.get('volttron', 'web-ssl-cert') == os.path.join(vhome, "certificates", "certs", "platform_web-server.crt") + assert config.get('volttron', 'web-ssl-key') == os.path.join(vhome, "certificates", "private", "platform_web-server.pem") assert not _is_agent_installed("listener") - assert not _is_agent_installed("master_driver") + assert not _is_agent_installed("platform_driver") assert not _is_agent_installed("platform_historian") assert not _is_agent_installed("vc ") assert not _is_agent_installed("vcp") @@ -259,6 +265,7 @@ def test_zmq_case_web_with_agents(monkeypatch): message_bus = "zmq" vip_address = "tcp://127.0.0.15" vip_port = "22916" + instance_name = "test_zmq" is_web_enabled = "Y" web_protocol = "https" web_port = "8443" @@ -271,7 +278,6 @@ def test_zmq_case_web_with_agents(monkeypatch): ca_org_unit = "test-org-unit" is_vc = "N" is_vcp = "Y" - instance_name = "test_zmq" vc_hostname = "{}{}".format("https://", get_hostname()) vc_port = "8443" install_historian = "Y" @@ -282,6 +288,7 @@ def test_zmq_case_web_with_agents(monkeypatch): vcfg_args = "\n".join([message_bus, vip_address, vip_port, + instance_name, is_web_enabled, web_protocol, web_port, @@ -294,7 +301,6 @@ def test_zmq_case_web_with_agents(monkeypatch): ca_org_unit, is_vc, is_vcp, - instance_name, vc_hostname, vc_port, agent_autostart, @@ -321,12 +327,12 @@ def test_zmq_case_web_with_agents(monkeypatch): config.read(config_path) assert config.get('volttron', 'message-bus') == "zmq" assert config.get('volttron', 'vip-address') == "tcp://127.0.0.15:22916" - assert config.get('volttron', 'instance-name') == "test_zmq" + assert config.get('volttron', 'instance-name').strip('"') == "test_zmq" assert config.get('volttron', 'bind-web-address') == "{}{}{}".format("https://", get_hostname().lower(), ":8443") - assert config.get('volttron', 'web-ssl-cert') == os.path.join(vhome, "certificates", "certs", "master_web-server.crt") - assert config.get('volttron', 'web-ssl-key') == os.path.join(vhome, "certificates", "private", "master_web-server.pem") + assert config.get('volttron', 'web-ssl-cert') == os.path.join(vhome, "certificates", "certs", "platform_web-server.crt") + assert config.get('volttron', 'web-ssl-key') == os.path.join(vhome, "certificates", "private", "platform_web-server.pem") assert _is_agent_installed("listener") - assert _is_agent_installed("master_driver") + assert _is_agent_installed("platform_driver") assert _is_agent_installed("platform_historian") assert not _is_agent_installed("vc ") assert _is_agent_installed("vcp") @@ -341,6 +347,7 @@ def test_zmq_case_web_vc(monkeypatch): message_bus = "zmq" vip_address = "tcp://127.0.0.15" vip_port = "22916" + instance_name = "test_zmq" is_web_enabled = "Y" web_protocol = "https" web_port = "8443" @@ -353,7 +360,6 @@ def test_zmq_case_web_vc(monkeypatch): ca_org_unit = "test-org-unit" is_vc = "Y" is_vcp = "Y" - instance_name = "test_zmq" install_historian = "N" install_driver = "N" install_listener = "N" @@ -361,6 +367,7 @@ def test_zmq_case_web_vc(monkeypatch): vcfg_args = "\n".join([message_bus, vip_address, vip_port, + instance_name, is_web_enabled, web_protocol, web_port, @@ -374,7 +381,6 @@ def test_zmq_case_web_vc(monkeypatch): is_vc, agent_autostart, is_vcp, - instance_name, agent_autostart, install_historian, install_driver, @@ -395,13 +401,13 @@ def test_zmq_case_web_vc(monkeypatch): config.read(config_path) assert config.get('volttron', 'message-bus') == "zmq" assert config.get('volttron', 'vip-address') == "tcp://127.0.0.15:22916" - assert config.get('volttron', 'instance-name') == "test_zmq" + assert config.get('volttron', 'instance-name').strip('"') == "test_zmq" assert config.get('volttron', 'volttron-central-address') == "{}{}{}".format("https://", get_hostname().lower(), ":8443") assert config.get('volttron', 'bind-web-address') == "{}{}{}".format("https://", get_hostname().lower(), ":8443") - assert config.get('volttron', 'web-ssl-cert') == os.path.join(vhome, "certificates", "certs", "master_web-server.crt") - assert config.get('volttron', 'web-ssl-key') == os.path.join(vhome, "certificates", "private", "master_web-server.pem") + assert config.get('volttron', 'web-ssl-cert') == os.path.join(vhome, "certificates", "certs", "platform_web-server.crt") + assert config.get('volttron', 'web-ssl-key') == os.path.join(vhome, "certificates", "private", "platform_web-server.pem") assert not _is_agent_installed("listener") - assert not _is_agent_installed("master_driver") + assert not _is_agent_installed("platform_driver") assert not _is_agent_installed("platform_historian") assert _is_agent_installed("vc ") assert _is_agent_installed("vcp") @@ -416,6 +422,7 @@ def test_zmq_case_web_vc_with_agents(monkeypatch): message_bus = "zmq" vip_address = "tcp://127.0.0.15" vip_port = "22916" + instance_name = "test_zmq" is_web_enabled = "Y" web_protocol = "https" web_port = "8443" @@ -428,7 +435,6 @@ def test_zmq_case_web_vc_with_agents(monkeypatch): ca_org_unit = "test-org-unit" is_vc = "Y" is_vcp = "Y" - instance_name = "test_zmq" install_historian = "Y" install_driver = "Y" install_fake_device = "Y" @@ -437,6 +443,7 @@ def test_zmq_case_web_vc_with_agents(monkeypatch): vcfg_args = "\n".join([message_bus, vip_address, vip_port, + instance_name, is_web_enabled, web_protocol, web_port, @@ -450,7 +457,6 @@ def test_zmq_case_web_vc_with_agents(monkeypatch): is_vc, agent_autostart, is_vcp, - instance_name, agent_autostart, install_historian, agent_autostart, @@ -475,13 +481,13 @@ def test_zmq_case_web_vc_with_agents(monkeypatch): config.read(config_path) assert config.get('volttron', 'message-bus') == "zmq" assert config.get('volttron', 'vip-address') == "tcp://127.0.0.15:22916" - assert config.get('volttron', 'instance-name') == "test_zmq" + assert config.get('volttron', 'instance-name').strip('"') == "test_zmq" assert config.get('volttron', 'volttron-central-address') == "{}{}{}".format("https://", get_hostname().lower(), ":8443") assert config.get('volttron', 'bind-web-address') == "{}{}{}".format("https://", get_hostname().lower(), ":8443") - assert config.get('volttron', 'web-ssl-cert') == os.path.join(vhome, "certificates", "certs", "master_web-server.crt") - assert config.get('volttron', 'web-ssl-key') == os.path.join(vhome, "certificates", "private", "master_web-server.pem") + assert config.get('volttron', 'web-ssl-cert') == os.path.join(vhome, "certificates", "certs", "platform_web-server.crt") + assert config.get('volttron', 'web-ssl-key') == os.path.join(vhome, "certificates", "private", "platform_web-server.pem") assert _is_agent_installed("listener") - assert _is_agent_installed("master_driver") + assert _is_agent_installed("platform_driver") assert _is_agent_installed("platform_historian") assert _is_agent_installed("vc ") assert _is_agent_installed("vcp") @@ -511,6 +517,7 @@ def test_rmq_case_no_agents(monkeypatch): vcfg_args = "\n".join([message_bus, vip_address, vip_port, + instance_name, is_web_enabled, is_vcp, install_historian, @@ -532,9 +539,9 @@ def test_rmq_case_no_agents(monkeypatch): config.read(config_path) assert config.get('volttron', 'message-bus') == "rmq" assert config.get('volttron', 'vip-address') == "tcp://127.0.0.15:22916" - assert config.get('volttron', 'instance-name') == "test_rmq" + assert config.get('volttron', 'instance-name').strip('"') == "test_rmq" assert not _is_agent_installed("listener") - assert not _is_agent_installed("master_driver") + assert not _is_agent_installed("platform_driver") assert not _is_agent_installed("platform_historian") assert not _is_agent_installed("vc ") assert not _is_agent_installed("vcp") @@ -568,9 +575,9 @@ def test_rmq_case_with_agents(monkeypatch): vcfg_args = "\n".join([message_bus, vip_address, vip_port, + instance_name, is_web_enabled, is_vcp, - instance_name, vc_hostname, vc_port, agent_autostart, @@ -597,9 +604,9 @@ def test_rmq_case_with_agents(monkeypatch): config.read(config_path) assert config.get('volttron', 'message-bus') == "rmq" assert config.get('volttron', 'vip-address') == "tcp://127.0.0.15:22916" - assert config.get('volttron', 'instance-name') == "test_rmq" + assert config.get('volttron', 'instance-name').strip('"') == "test_rmq" assert _is_agent_installed("listener") - assert _is_agent_installed("master_driver") + assert _is_agent_installed("platform_driver") assert _is_agent_installed("platform_historian") assert _is_agent_installed("vcp") assert not _is_agent_installed("vc ") @@ -632,6 +639,7 @@ def test_rmq_case_web_no_agents(monkeypatch): vcfg_args = "\n".join([message_bus, vip_address, vip_port, + instance_name, is_web_enabled, web_port, is_vc, @@ -655,10 +663,10 @@ def test_rmq_case_web_no_agents(monkeypatch): config.read(config_path) assert config.get('volttron', 'message-bus') == "rmq" assert config.get('volttron', 'vip-address') == "tcp://127.0.0.15:22916" - assert config.get('volttron', 'instance-name') == "test_rmq" + assert config.get('volttron', 'instance-name').strip('"') == "test_rmq" assert config.get('volttron', 'bind-web-address') == "{}{}{}".format("https://", get_hostname(), ":8443") assert not _is_agent_installed("listener") - assert not _is_agent_installed("master_driver") + assert not _is_agent_installed("platform_driver") assert not _is_agent_installed("platform_historian") assert not _is_agent_installed("vc ") assert not _is_agent_installed("vcp") @@ -694,11 +702,11 @@ def test_rmq_case_web_with_agents(monkeypatch): vcfg_args = "\n".join([message_bus, vip_address, vip_port, + instance_name, is_web_enabled, web_port, is_vc, is_vcp, - instance_name, vc_hostname, vc_port, agent_autostart, @@ -724,10 +732,10 @@ def test_rmq_case_web_with_agents(monkeypatch): config.read(config_path) assert config.get('volttron', 'message-bus') == "rmq" assert config.get('volttron', 'vip-address') == "tcp://127.0.0.15:22916" - assert config.get('volttron', 'instance-name') == "test_rmq" + assert config.get('volttron', 'instance-name').strip('"') == "test_rmq" assert config.get('volttron', 'bind-web-address') == "{}{}{}".format("https://", get_hostname(), ":8443") assert _is_agent_installed("listener") - assert _is_agent_installed("master_driver") + assert _is_agent_installed("platform_driver") assert _is_agent_installed("platform_historian") assert not _is_agent_installed("vc ") assert _is_agent_installed("vcp") @@ -762,12 +770,12 @@ def test_rmq_case_web_vc(monkeypatch): vcfg_args = "\n".join([message_bus, vip_address, vip_port, + instance_name, is_web_enabled, web_port, is_vc, agent_autostart, is_vcp, - instance_name, agent_autostart, install_historian, install_driver, @@ -788,11 +796,11 @@ def test_rmq_case_web_vc(monkeypatch): config.read(config_path) assert config.get('volttron', 'message-bus') == "rmq" assert config.get('volttron', 'vip-address') == "tcp://127.0.0.15:22916" - assert config.get('volttron', 'instance-name') == "test_rmq" + assert config.get('volttron', 'instance-name').strip('"') == "test_rmq" assert config.get('volttron', 'volttron-central-address') == "{}{}{}".format("https://", get_hostname(), ":8443") assert config.get('volttron', 'bind-web-address') == "{}{}{}".format("https://", get_hostname(), ":8443") assert not _is_agent_installed("listener") - assert not _is_agent_installed("master_driver") + assert not _is_agent_installed("platform_driver") assert not _is_agent_installed("platform_historian") assert _is_agent_installed("vc ") assert _is_agent_installed("vcp") @@ -826,12 +834,12 @@ def test_rmq_case_web_vc_with_agents(monkeypatch): vcfg_args = "\n".join([message_bus, vip_address, vip_port, + instance_name, is_web_enabled, web_port, is_vc, agent_autostart, is_vcp, - instance_name, agent_autostart, install_historian, agent_autostart, @@ -856,11 +864,11 @@ def test_rmq_case_web_vc_with_agents(monkeypatch): config.read(config_path) assert config.get('volttron', 'message-bus') == "rmq" assert config.get('volttron', 'vip-address') == "tcp://127.0.0.15:22916" - assert config.get('volttron', 'instance-name') == "test_rmq" + assert config.get('volttron', 'instance-name').strip('"') == "test_rmq" assert config.get('volttron', 'volttron-central-address') == "{}{}{}".format("https://", get_hostname(), ":8443") assert config.get('volttron', 'bind-web-address') == "{}{}{}".format("https://", get_hostname(), ":8443") assert _is_agent_installed("listener") - assert _is_agent_installed("master_driver") + assert _is_agent_installed("platform_driver") assert _is_agent_installed("platform_historian") assert _is_agent_installed("vc ") assert _is_agent_installed("vcp") diff --git a/volttrontesting/platform/test_packaging.py b/volttrontesting/platform/test_packaging.py index 500301f448..4465b30182 100644 --- a/volttrontesting/platform/test_packaging.py +++ b/volttrontesting/platform/test_packaging.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- {{{ # vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et: # -# Copyright 2019, Battelle Memorial Institute. +# Copyright 2020, Battelle Memorial Institute. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/volttrontesting/platform/test_platform_init.py b/volttrontesting/platform/test_platform_init.py index 8829d06499..bb61e5e76b 100644 --- a/volttrontesting/platform/test_platform_init.py +++ b/volttrontesting/platform/test_platform_init.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- {{{ # vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et: # -# Copyright 2019, Battelle Memorial Institute. +# Copyright 2020, Battelle Memorial Institute. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/volttrontesting/platform/test_platform_rmq.py b/volttrontesting/platform/test_platform_rmq.py index 1ac2f87b90..61e8b2c9d9 100644 --- a/volttrontesting/platform/test_platform_rmq.py +++ b/volttrontesting/platform/test_platform_rmq.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- {{{ # vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et: # -# Copyright 2019, Battelle Memorial Institute. +# Copyright 2020, Battelle Memorial Institute. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/volttrontesting/platform/test_platform_web.py b/volttrontesting/platform/test_platform_web.py index a872cfa0ed..8c348c6d3f 100644 --- a/volttrontesting/platform/test_platform_web.py +++ b/volttrontesting/platform/test_platform_web.py @@ -2,7 +2,7 @@ import gevent -from volttron.platform.agent.known_identities import MASTER_WEB +from volttron.platform.agent.known_identities import PLATFORM_WEB from volttron.platform.vip.agent import Agent from volttrontesting.utils.platformwrapper import start_wrapper_platform from volttron.utils import get_hostname @@ -257,7 +257,7 @@ def test_register_path_route(web_instance): webdir, index_html = _build_web_dir(vi.volttron_home) agent = vi.build_agent(use_ipc=True) - agent.vip.rpc.call(MASTER_WEB, + agent.vip.rpc.call(PLATFORM_WEB, 'register_path_route', '', webdir).get(timeout=5) response = requests.get(vi.bind_web_address+"/index.html") assert index_html == response.text diff --git a/volttrontesting/platform/test_rmq_platform_shutdown.py b/volttrontesting/platform/test_rmq_platform_shutdown.py index c699c61ee4..acc23e7ee3 100644 --- a/volttrontesting/platform/test_rmq_platform_shutdown.py +++ b/volttrontesting/platform/test_rmq_platform_shutdown.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- {{{ # vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et: # -# Copyright 2019, Battelle Memorial Institute. +# Copyright 2020, Battelle Memorial Institute. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/volttrontesting/platform/test_rmq_reconnect.py b/volttrontesting/platform/test_rmq_reconnect.py index a7d5ecb33d..374c333811 100644 --- a/volttrontesting/platform/test_rmq_reconnect.py +++ b/volttrontesting/platform/test_rmq_reconnect.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- {{{ # vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et: # -# Copyright 2019, Battelle Memorial Institute. +# Copyright 2020, Battelle Memorial Institute. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/volttrontesting/platform/test_sqlite3_fix.py b/volttrontesting/platform/test_sqlite3_fix.py index 2fd0adf2ea..81983029e5 100644 --- a/volttrontesting/platform/test_sqlite3_fix.py +++ b/volttrontesting/platform/test_sqlite3_fix.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- {{{ # vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et: # -# Copyright 2019, Battelle Memorial Institute. +# Copyright 2020, Battelle Memorial Institute. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/volttrontesting/platform/test_vctl_commands.py b/volttrontesting/platform/test_vctl_commands.py new file mode 100644 index 0000000000..c623c80f03 --- /dev/null +++ b/volttrontesting/platform/test_vctl_commands.py @@ -0,0 +1,89 @@ +from pathlib import Path +import tempfile + +import gevent +import os +import pytest +from gevent import subprocess + +from volttron.platform import get_examples +import sys + +from volttron.platform import jsonapi +from volttron.platform.agent.utils import execute_command +from volttrontesting.utils.platformwrapper import with_os_environ + + +@pytest.mark.control +def test_install_agent_config_not_empty(volttron_instance): + listener_agent_dir = get_examples("ListenerAgent") + listener_agent_config = Path(listener_agent_dir).joinpath("config") + with with_os_environ(volttron_instance.env): + cmds = ["vctl", '--json', 'install', listener_agent_dir, '--agent-config', + listener_agent_config] + response = execute_command(cmds, volttron_instance.env) + + json_response = jsonapi.loads(response) + + agent_uuid = json_response['agent_uuid'] + config_path = Path(volttron_instance.volttron_home).joinpath( + f'agents/{agent_uuid}/listeneragent-3.3/listeneragent-3.3.dist-info/config') + with open(config_path) as fp: + with open(listener_agent_config) as fp2: + assert fp2.read() == fp.read() + + volttron_instance.remove_all_agents() + + +@pytest.mark.control +def test_install_agent_config_empty(volttron_instance): + listener_agent_dir = get_examples("ListenerAgent") + + with with_os_environ(volttron_instance.env): + cmds = ["vctl", '--json', 'install', listener_agent_dir] + + response = execute_command(cmds, volttron_instance.env) + + json_response = jsonapi.loads(response) + + agent_uuid = json_response['agent_uuid'] + config_path = Path(volttron_instance.volttron_home).joinpath( + f'agents/{agent_uuid}/listeneragent-3.3/listeneragent-3.3.dist-info/config') + with open(config_path) as fp: + config_data = jsonapi.loads(fp.read()) + assert {} == config_data + + volttron_instance.remove_all_agents() + + +@pytest.mark.control +def test_agent_filters(volttron_instance): + auuid = volttron_instance.install_agent( + agent_dir=get_examples("ListenerAgent"), start=True) + buuid = volttron_instance.install_agent( + agent_dir=get_examples("ListenerAgent"), start=True) + + # Verify all installed agents show up in list + with with_os_environ(volttron_instance.env): + p = subprocess.Popen(['volttron-ctl', 'list'], env=volttron_instance.env, + stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True) + agent_list = p.communicate() + assert "listeneragent-3.3_1" in str(agent_list) + assert "listeneragent-3.3_2" in str(agent_list) + + # Filter agent based on agent uuid + with with_os_environ(volttron_instance.env): + p = subprocess.Popen(['volttron-ctl', 'list', str(auuid)], env=volttron_instance.env, + stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True) + agent_list = p.communicate() + assert "listeneragent-3.3_1" in str(agent_list) + assert "listeneragent-3.3_2" not in str(agent_list) + + # Filter agent based on agent name + with with_os_environ(volttron_instance.env): + p = subprocess.Popen(['volttron-ctl', 'list', 'listeneragent-3.3_1'], env=volttron_instance.env, + stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True) + agent_list = p.communicate() + assert "listeneragent-3.3_1" in str(agent_list) + assert "listeneragent-3.3_2" not in str(agent_list) + diff --git a/volttrontesting/platform/web/test_admin.py b/volttrontesting/platform/web/test_admin.py index 2394f18b89..5a51722cf6 100644 --- a/volttrontesting/platform/web/test_admin.py +++ b/volttrontesting/platform/web/test_admin.py @@ -11,15 +11,35 @@ def user_pass(): yield 'admin', 'admin' -def test_can_create_admin_user(volttron_instance_web, user_pass): +def test_can_authenticate_admin_user(volttron_instance_web, user_pass): instance = volttron_instance_web - if instance.messagebus != 'rmq': - pytest.skip("Only for rmq at this point in time.") - return + # if instance.messagebus != 'rmq': + # pytest.skip("Only for rmq at this point in time.") + # return webadmin = instance.web_admin_api + user, password = user_pass + # + # resp = webadmin.create_web_admin(user, password) + # assert resp.ok + # # Allow file operation to run + # gevent.sleep(2) + + resp = webadmin.authenticate(user, password) + assert resp.ok + assert resp.headers.get('Content-Type') == 'text/plain' + + resp = webadmin.authenticate('fake', password) + assert resp.status_code == 401 # unauthorized + assert resp.headers.get('Content-Type') == 'text/html' + + +@pytest.mark.skip(reason="Can't test using platformwrapper. Needs to be unit test") +def test_can_create_admin_user(volttron_instance_web, user_pass): + instance = volttron_instance_web + webadmin = instance.web_admin_api user, password = user_pass resp = webadmin.create_web_admin(user, password) @@ -35,4 +55,3 @@ def test_can_create_admin_user(volttron_instance_web, user_pass): assert resp.status_code == 401 # unauthorized assert resp.headers.get('Content-Type') == 'text/html' - diff --git a/volttrontesting/platform/web/test_admin_endpoints.py b/volttrontesting/platform/web/test_admin_endpoints.py index 5b02010b66..01c9c3f692 100644 --- a/volttrontesting/platform/web/test_admin_endpoints.py +++ b/volttrontesting/platform/web/test_admin_endpoints.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- {{{ # vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et: # -# Copyright 2019, Battelle Memorial Institute. +# Copyright 2020, Battelle Memorial Institute. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -70,7 +70,7 @@ def test_admin_unauthorized(): @pytest.mark.web -def test_set_master_password_setup(): +def test_set_platform_password_setup(): with get_test_volttron_home(messagebus='zmq') as vhome: # Note these passwords are not right so we expect to be redirected back to the # first.html diff --git a/volttrontesting/platform/web/test_discovery.py b/volttrontesting/platform/web/test_discovery.py index e926302c2d..1743af774e 100644 --- a/volttrontesting/platform/web/test_discovery.py +++ b/volttrontesting/platform/web/test_discovery.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- {{{ # vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et: # -# Copyright 2019, Battelle Memorial Institute. +# Copyright 2020, Battelle Memorial Institute. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -51,11 +51,11 @@ def test_discovery_endpoint(volttron_instance_web): :return: """ wrapper = volttron_instance_web + + # Both http and https start with http + assert wrapper.bind_web_address.startswith('http') if wrapper.messagebus == 'rmq': - assert wrapper.bind_web_address.startswith('https') os.environ['REQUESTS_CA_BUNDLE'] = wrapper.requests_ca_bundle - else: - assert wrapper.bind_web_address.startswith('http') info = DiscoveryInfo.request_discovery_info(wrapper.bind_web_address) diff --git a/volttrontesting/platform/web/test_web_authentication.py b/volttrontesting/platform/web/test_web_authentication.py index ecc6d13763..d47facc5f2 100644 --- a/volttrontesting/platform/web/test_web_authentication.py +++ b/volttrontesting/platform/web/test_web_authentication.py @@ -13,7 +13,7 @@ from volttrontesting.utils.platformwrapper import create_volttron_home from volttrontesting.utils.utils import AgentMock from volttrontesting.utils.web_utils import get_test_web_env -from volttron.platform.web.master_web_service import MasterWebService +from volttron.platform.web.platform_web_service import PlatformWebService from volttron.platform.web.admin_endpoints import AdminEndpoints from volttron.platform.web.authenticate_endpoint import AuthenticateEndpoints from volttrontesting.fixtures.cert_fixtures import certs_profile_1 @@ -116,25 +116,25 @@ def test_authenticate_endpoint(scheme): @pytest.fixture() -def mock_masterweb_service(): - MasterWebService.__bases__ = (AgentMock.imitate(Agent, Agent()), ) - masterweb = MasterWebService(serverkey=MagicMock(), identity=MagicMock(), address=MagicMock(), bind_web_address=MagicMock()) - rpc_caller = masterweb.vip.rpc - masterweb._admin_endpoints = AdminEndpoints(rpc_caller=rpc_caller) - yield masterweb +def mock_platformweb_service(): + PlatformWebService.__bases__ = (AgentMock.imitate(Agent, Agent()),) + platformweb = PlatformWebService(serverkey=MagicMock(), identity=MagicMock(), address=MagicMock(), bind_web_address=MagicMock()) + rpc_caller = platformweb.vip.rpc + platformweb._admin_endpoints = AdminEndpoints(rpc_caller=rpc_caller) + yield platformweb @pytest.mark.web -def test_get_credentials(mock_masterweb_service): - mock_masterweb_service._admin_endpoints._pending_auths = mock_masterweb_service._admin_endpoints._rpc_caller.call(AUTH, 'get_authorization_failures') - mock_masterweb_service._admin_endpoints._denied_auths = mock_masterweb_service._admin_endpoints._rpc_caller.call(AUTH, 'get_authorization_denied') +def test_get_credentials(mock_platformweb_service): + mock_platformweb_service._admin_endpoints._pending_auths = mock_platformweb_service._admin_endpoints._rpc_caller.call(AUTH, 'get_authorization_pending') + mock_platformweb_service._admin_endpoints._denied_auths = mock_platformweb_service._admin_endpoints._rpc_caller.call(AUTH, 'get_authorization_denied') pass @pytest.mark.web -def test_accept_credential(mock_masterweb_service): - mock_masterweb_service._admin_endpoints._pending_auths = mock_masterweb_service._admin_endpoints._rpc_caller.call(AUTH, 'get_authorization_failures').get() - mock_masterweb_service._admin_endpoints._denied_auths = mock_masterweb_service._admin_endpoints._rpc_caller.call(AUTH, 'get_authorization_denied').get() +def test_accept_credential(mock_platformweb_service): + mock_platformweb_service._admin_endpoints._pending_auths = mock_platformweb_service._admin_endpoints._rpc_caller.call(AUTH, 'get_authorization_pending').get() + mock_platformweb_service._admin_endpoints._denied_auths = mock_platformweb_service._admin_endpoints._rpc_caller.call(AUTH, 'get_authorization_denied').get() pass diff --git a/volttrontesting/platform/web/test_webcsr.py b/volttrontesting/platform/web/test_webcsr.py index 8aa52d1604..1f446f476d 100644 --- a/volttrontesting/platform/web/test_webcsr.py +++ b/volttrontesting/platform/web/test_webcsr.py @@ -7,7 +7,7 @@ def test_can_change_auto_allow_csr(volttron_instance_web): """ Test the functionality of the platform wrapper's enable_auto_csr This allows the turning on and off of the csr auto accept through - the master.web service. The platform wrapper itself handles the + the platform.web service. The platform wrapper itself handles the assertion that the changes were made correctly. note this will only work with an rmq instance. diff --git a/volttrontesting/services/aggregate_historian/test_aggregate_historian.py b/volttrontesting/services/aggregate_historian/test_aggregate_historian.py index e32f7af69c..b1b4d04570 100644 --- a/volttrontesting/services/aggregate_historian/test_aggregate_historian.py +++ b/volttrontesting/services/aggregate_historian/test_aggregate_historian.py @@ -111,8 +111,9 @@ "host": "localhost", "port": 27017, "database": "mongo_test", - "user": "test", - "passwd": "test" + "user": "historian", + "passwd": "historian", + "authSource": "test" } } } @@ -123,11 +124,11 @@ 'connection': { 'type': 'postgresql', 'params': { - 'dbname': 'historian_test', - 'port': 5433, - 'host': '127.0.0.1', + 'dbname': 'test_historian', + 'port': 5432, + 'host': 'localhost', 'user' : 'historian', - 'password': 'volttron' + 'password': 'historian' }, }, } @@ -177,8 +178,10 @@ def setup_sqlite(connection_params, table_names): def setup_mongodb(connection_params, table_names): - print ("setup mongodb") + print("setup mongodb") mongo_conn_str = 'mongodb://{user}:{passwd}@{host}:{port}/{database}' + if "authSource" in connection_params: + mongo_conn_str = mongo_conn_str + "?authSource={authSource}" params = connection_params mongo_conn_str = mongo_conn_str.format(**params) mongo_client = pymongo.MongoClient(mongo_conn_str) diff --git a/volttrontesting/services/historian/test_base_historian.py b/volttrontesting/services/historian/test_base_historian.py index b4cc3e004c..e7a8dcc2c6 100644 --- a/volttrontesting/services/historian/test_base_historian.py +++ b/volttrontesting/services/historian/test_base_historian.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- {{{ # vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et: # -# Copyright 2019, Battelle Memorial Institute. +# Copyright 2020, Battelle Memorial Institute. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -44,17 +44,23 @@ STATUS_KEY_BACKLOGGED, STATUS_KEY_CACHE_COUNT, STATUS_KEY_PUBLISHING, - STATUS_KEY_CACHE_FULL) + STATUS_KEY_CACHE_FULL, + STATUS_KEY_TIME_ERROR) import pytest from volttron.platform.agent import utils from volttron.platform.messaging import headers as headers_mod from volttron.platform.messaging.health import * +from volttron.platform.messaging import topics + from time import sleep from datetime import datetime import random import gevent import os +import sqlite3 + +from volttron.platform.agent.known_identities import CONFIGURATION_STORE class Historian(BaseHistorian): @@ -68,62 +74,13 @@ def query_historian(self, **kwargs): pass -def prep_config(volttron_home): - src_driver = os.getcwd() + '/services/core/MasterDriverAgent/example_configurations/test_fakedriver.config' - new_driver = volttron_home + '/test_fakedriver.config' - shutil.copy(src_driver, new_driver) - - with open(new_driver, 'r+') as f: - config = jsonapi.load(f) - config['registry_config'] = os.getcwd() + '/services/core/MasterDriverAgent/example_configurations/fake.csv' - f.seek(0) - f.truncate() - jsonapi.dump(config, f) - - master_config = { - "agentid": "master_driver", - "driver_config_list": [new_driver] - } - - return master_config - - foundtopic = False - def listener(peer, sender, bus, topic, headers, message): global foundtopic foundtopic = True -@pytest.mark.xfail(reason="This won't work on all machines because of " - "hardcoded paths.") -def test_base_historian(volttron_instance): - global foundtopic - v1 = volttron_instance - assert v1.is_running() - - master_config = prep_config(v1.volttron_home) - master_uuid = v1.install_agent(agent_dir=get_services_core("MasterDriverAgent"), - config_file=master_config) - gevent.sleep(2) - assert v1.is_agent_running(master_uuid) - db = Historian(address=v1.vip_address[0], - backup_storage_limit_gb=0.00002) - gevent.spawn(db.core.run).join(0) - - agent = v1.dynamic_agent - gevent.sleep(2) - agent.vip.pubsub.subscribe('pubsub', 'backupdb/nomore', callback=listener) - - for _ in range(0, 60): - gevent.sleep(1) - if foundtopic: - break - - assert foundtopic - - class BasicHistorian(BaseHistorian): def __init__(self, **kwargs): super(BasicHistorian, self).__init__(**kwargs) @@ -156,8 +113,10 @@ def client_agent(request, volttron_instance): yield agent agent.core.stop() + alert_publishes = [] + def message_handler(peer, sender, bus, topic, headers, message): alert_publishes.append(Status.from_json(message)) @@ -176,16 +135,16 @@ def test_cache_backlog(request, volttron_instance, client_agent): identity = 'platform.historian' historian = volttron_instance.build_agent(agent_class=BasicHistorian, - identity=identity, - submit_size_limit=2, - max_time_publishing=1, - retry_period=1.0, - backup_storage_limit_gb=0.0001) # 100K - DEVICES_ALL_TOPIC = "devices/Building/LAB/Device/all" - gevent.sleep(5) #wait for historian to be fully up - print("\n** test_basic_function for {}**".format( - request.keywords.node.name)) + identity=identity, + submit_size_limit=2, + max_time_publishing=1, + retry_period=1.0, + backup_storage_limit_gb=0.0001, + enable_store=True) # 100K + # give it a second to finish setting up backup and finish subscribing + gevent.sleep(0.5) + DEVICES_ALL_TOPIC = "devices/Building/LAB/Device/all" # Publish fake data. The format mimics the format used by VOLTTRON drivers. # Make some random readings. Randome readings are going to be # within the tolerance here. @@ -236,7 +195,7 @@ def test_cache_backlog(request, volttron_instance, client_agent): # Cache count can be 0 even if we are backlogged and cache is full because # cache might have just got deleted - #assert status["context"][STATUS_KEY_CACHE_COUNT] > 0 + # assert status["context"][STATUS_KEY_CACHE_COUNT] > 0 # Cache need not be full if it is backlogged. but if cache is full backlogged should be true # and alert should be sent @@ -268,6 +227,178 @@ def test_cache_backlog(request, volttron_instance, client_agent): gevent.sleep(2) +@pytest.mark.historian +def test_time_tolerance_check(request, volttron_instance, client_agent): + """ + Test time_tolerance check + """ + global alert_publishes + historian = None + + alert_publishes = [] + db_connection = None + try: + # subscribe to alerts + client_agent.vip.pubsub.subscribe("pubsub", "alerts/BasicHistorian", message_handler) + + identity = 'platform.historian' + # agent install should fail if type of time_tolerance is bad + with pytest.raises(ValueError) as e: + historian = volttron_instance.build_agent(agent_class=BasicHistorian, + identity=identity, + submit_size_limit=5, + max_time_publishing=5, + retry_period=1.0, + backup_storage_limit_gb=0.0001, + time_tolerance="invalid", + enable_store=True) + assert "could not convert string to float: 'invalid'" in str(e.value) + print(e) + + historian = volttron_instance.build_agent(agent_class=BasicHistorian, + identity=identity, + submit_size_limit=5, + max_time_publishing=5, + retry_period=1.0, + backup_storage_limit_gb=0.0001, + time_tolerance=5, + enable_store=True) + DEVICES_ALL_TOPIC = "devices/Building/LAB/Device/all" + gevent.sleep(5) #wait for historian to be fully up + historian.publish_sleep = 0 + import pathlib + p = pathlib.Path(__file__).parent.parent.parent.parent.absolute() + print(f"Path to backupdb is {os.path.join(p,'backup.sqlite')}") + db_connection = sqlite3.connect(os.path.join(p,"backup.sqlite")) + c = db_connection.cursor() + try: + c.execute("DELETE FROM time_error") + db_connection.commit() + except: + pass # might fail with no such table. ignore + + # Publish fake data. The format mimics the format used by VOLTTRON drivers. + # Make some random readings. Randome readings are going to be + # within the tolerance here. + format_spec = "{0:.13f}" + oat_reading = random.uniform(30, 100) + mixed_reading = oat_reading + random.uniform(-5, 5) + damper_reading = random.uniform(0, 100) + + float_meta = {'units': 'F', 'tz': 'UTC', 'type': 'float'} + percent_meta = {'units': '%', 'tz': 'UTC', 'type': 'float'} + + # Create a message for all points. + all_message = [{'OutsideAirTemperature': oat_reading, + 'MixedAirTemperature': mixed_reading, + 'DamperSignal': damper_reading}, + {'OutsideAirTemperature': float_meta, + 'MixedAirTemperature': float_meta, + 'DamperSignal': percent_meta + }] + from datetime import timedelta + d_now = datetime.utcnow() - timedelta(minutes=10) + # publish records with invalid timestamp + for i in range(2): + now = utils.format_timestamp(d_now) + headers = { + headers_mod.DATE: now, headers_mod.TIMESTAMP: now + } + client_agent.vip.pubsub.publish('pubsub', + DEVICES_ALL_TOPIC, + headers=headers, + message=all_message) + d_now = d_now + timedelta(seconds=1) + + gevent.sleep(2) + status = client_agent.vip.rpc.call("platform.historian", "health.get_status").get(timeout=10) + print(f"STATUS: {status}") + assert status["status"] == STATUS_BAD + assert status["context"][STATUS_KEY_TIME_ERROR] + + c.execute("SELECT count(ts) from time_error") + initial_count = c.fetchone()[0] + print(f" initial count is {initial_count} type {type(initial_count)}") + assert initial_count > 0 + + # Make cache full.. time_error records should get deleted to make space + # Test publish slow or backlogged + historian.publish_sleep = 2 + d_now = datetime.utcnow() + from datetime import timedelta + for i in range(100): + now = utils.format_timestamp(d_now) + headers = { + headers_mod.DATE: now, headers_mod.TIMESTAMP: now + } + client_agent.vip.pubsub.publish('pubsub', + DEVICES_ALL_TOPIC, + headers=headers, + message=all_message) + d_now = d_now + timedelta(milliseconds=1) + if i % 10 == 0: + # So that we don't send a huge batch to only get deleted from cache right after + # inserting. Dumping a big batch in one go will make the the cache size to be + # over the limit so right after insert, cache size will be checked and cleanup + # will be delete records + gevent.sleep(0.5) + gevent.sleep(0.00001) # yield to historian thread to do the publishing + + gevent.sleep(4) + status = client_agent.vip.rpc.call("platform.historian", "health.get_status").get(timeout=10) + print(f"STATUS: {status}") + assert status["status"] == STATUS_BAD + assert status["context"][STATUS_KEY_CACHE_FULL] + assert status["context"][STATUS_KEY_BACKLOGGED] + # if cache got full, records from time_error should have got deleted before deleting valid records + # we inserted less than 100 records so all time_error records should have got deleted + # and time_error_stat should be false + assert not status["context"][STATUS_KEY_TIME_ERROR] + c.execute("SELECT count(ts) from time_error") + new_count = c.fetchone()[0] + assert new_count == 0 + + print("Updating time tolerance topics") + + # Change config to modify topic for time tolerance check + historian.publish_sleep = 0 + json_config = """{"time_tolerance_topics":["record"]}""" + historian.vip.rpc.call(CONFIGURATION_STORE, 'manage_store', + identity, "config", json_config, config_type="json").get() + gevent.sleep(2) + + d_now = datetime.utcnow() - timedelta(minutes=10) + client_agent.vip.pubsub.publish('pubsub', + DEVICES_ALL_TOPIC, + headers=headers, + message=all_message) + gevent.sleep(5) + status = client_agent.vip.rpc.call("platform.historian", "health.get_status").get(timeout=10) + print(f"STATUS: {status}") + assert status["status"] == STATUS_GOOD + # publish records with invalid timestamp + now = utils.format_timestamp(d_now) + headers = { + headers_mod.DATE: now, headers_mod.TIMESTAMP: now + } + client_agent.vip.pubsub.publish('pubsub', + topics.RECORD(subtopic="test"), + headers=headers, + message="test") + gevent.sleep(5) + status = client_agent.vip.rpc.call("platform.historian", "health.get_status").get(timeout=10) + print(f"GOT STATUS {status}") + assert status["status"] == STATUS_BAD + assert status["context"][STATUS_KEY_TIME_ERROR] + finally: + if historian: + historian.core.stop() + if db_connection: + db_connection.close() + # wait for cleanup to complete + gevent.sleep(2) + + @pytest.mark.historian def test_health_stuff(request, volttron_instance, client_agent): """ @@ -282,11 +413,12 @@ def test_health_stuff(request, volttron_instance, client_agent): submit_size_limit=2, max_time_publishing=0.5, retry_period=1.0, - backup_storage_limit_gb=0.0001) # 100K - DEVICES_ALL_TOPIC = "devices/Building/LAB/Device/all" + backup_storage_limit_gb=0.0001, + enable_store=True) # 100K + # give it some time to finish setting up backup and finish subscribing + gevent.sleep(0.5) - print("\n** test_basic_function for {}**".format( - request.keywords.node.name)) + DEVICES_ALL_TOPIC = "devices/Building/LAB/Device/all" # Publish fake data. The format mimics the format used by VOLTTRON drivers. # Make some random readings. Randome readings are going to be @@ -315,7 +447,6 @@ def test_health_stuff(request, volttron_instance, client_agent): headers = { headers_mod.DATE: now, headers_mod.TIMESTAMP: now } - print("Published time in header: " + now) for _ in range(10): client_agent.vip.pubsub.publish('pubsub', @@ -440,7 +571,10 @@ def test_failing_historian(request, volttron_instance, client_agent): submit_size_limit=2, max_time_publishing=0.5, retry_period=1.0, - backup_storage_limit_gb=0.0001) # 100K + backup_storage_limit_gb=0.0001, + enable_store=True) # 100K + # give it some time to finish setting up backup and finish subscribing + gevent.sleep(0.5) assert fail_historian.setup_run assert not fail_historian.teardown_run @@ -558,9 +692,6 @@ def test_failing_historian(request, volttron_instance, client_agent): DEVICES_ALL_TOPIC = "devices/Building/LAB/Device/all" - print("\n** test_basic_function for {}**".format( - request.keywords.node.name)) - float_meta = {'units': 'F', 'tz': 'UTC', 'type': 'float'} # Create a message for all points. @@ -635,16 +766,13 @@ def test_additional_custom_topics(request, volttron_instance, client_agent): historian = volttron_instance.build_agent(agent_class=BasicHistorian, identity=identity, - submit_size_limit=2, - max_time_publishing=0.5, - retry_period=1.0, - backup_storage_limit_gb=0.0001, - custom_topics={'capture_device_data': [CUSTOM_TOPIC]}) # 100K + custom_topics={'capture_device_data': [CUSTOM_TOPIC]}, + enable_store=True) # 100K + # give it some time to finish setting up backup and finish subscribing + gevent.sleep(0.5) - print("\n** test_basic_function for {}**".format( - request.keywords.node.name)) - + #volttron_instance.dynamic_agent.vip.pubsub.subscribe('pubsub', 'devices', callback=listener) # Publish fake data. The format mimics the format used by VOLTTRON drivers. # Make some random readings. Randome readings are going to be # within the tolerance here. @@ -672,7 +800,6 @@ def test_additional_custom_topics(request, volttron_instance, client_agent): headers = { headers_mod.DATE: now, headers_mod.TIMESTAMP: now } - print("Published time in header: " + now) for _ in range(2): client_agent.vip.pubsub.publish('pubsub', @@ -722,18 +849,14 @@ def test_restricting_topics(request, volttron_instance, client_agent): historian = volttron_instance.build_agent(agent_class=BasicHistorian, identity=identity, - submit_size_limit=2, - max_time_publishing=0.5, - retry_period=1.0, - backup_storage_limit_gb=0.0001, capture_device_data=False, capture_log_data=False, capture_analysis_data=False, capture_record_data=False, - custom_topics={'capture_device_data': [CUSTOM_TOPIC]}) # 100K - - print("\n** test_basic_function for {}**".format( - request.keywords.node.name)) + custom_topics={'capture_device_data': [CUSTOM_TOPIC]}, + enable_store=True) # 100K + # give it some time to finish setting up backup and finish subscribing + gevent.sleep(0.5) # Publish fake data. The format mimics the format used by VOLTTRON drivers. # Make some random readings. Randome readings are going to be @@ -762,7 +885,6 @@ def test_restricting_topics(request, volttron_instance, client_agent): headers = { headers_mod.DATE: now, headers_mod.TIMESTAMP: now } - print("Published time in header: " + now) for _ in range(2): client_agent.vip.pubsub.publish('pubsub', @@ -775,7 +897,7 @@ def test_restricting_topics(request, volttron_instance, client_agent): headers=headers, message=all_message).get(timeout=10) - gevent.sleep(2.0) + gevent.sleep(1.0) assert len(historian.seen) == 6 # only records published to custom topic found_device_topic = 0 diff --git a/volttrontesting/services/historian/test_base_historian_unit.py b/volttrontesting/services/historian/test_base_historian_unit.py new file mode 100644 index 0000000000..c7be09d426 --- /dev/null +++ b/volttrontesting/services/historian/test_base_historian_unit.py @@ -0,0 +1,117 @@ +import datetime +from datetime import timedelta +import os +from shutil import rmtree +from time import sleep + +import pytest +from pytz import UTC + +from utils.utils import AgentMock +from volttron.platform.agent.base_historian import BaseHistorianAgent, Agent + +CACHE_NAME = "backup.sqlite" +HISTORIAN_DB = "./data/historian.sqlite" + + +def test_base_historian_agent_should_filter_duplicates(base_historian_agent): + # Add duplicates to queue + # Uniqueness is defined as a combination of topic and timestamp + # Thus a duplicate has the same topic and timestamp + for num in range(40, 43): + base_historian_agent._capture_record_data( + peer=None, + sender=None, + bus=None, + topic="duplicate_topic", + headers={ + "Date": "2015-11-17 21:24:10.189393+00:00", + "TimeStamp": "2015-11-17 21:24:10.189393+00:00", + }, + message=f"last_duplicate_{num}", + ) + + # Add unique records to queue + for num in range(2, 5): + base_historian_agent._capture_record_data( + peer=None, + sender=None, + bus=None, + topic=f"unique_record_topic{num}", + headers={ + "Date": f"2020-11-17 21:2{num}:10.189393+00:00", + "TimeStamp": f"2020-11-17 21:2{num}:10.189393+00:00", + }, + message=f"unique_record_{num}", + ) + + # Since this is a unit test, we have to "manually start" the base_historian to get the workflow going + base_historian_agent.start_process_thread() + # Adding sleep to ensure that all data gets publised in the cache before testing + sleep(3) + + expected_to_publish_list = [ + { + "_id": 3, + "timestamp": datetime.datetime( + 2015, 11, 17, 21, 24, 10, 189393, tzinfo=UTC + ), + "source": "record", + "topic": "duplicate_topic", + "value": "last_duplicate_42", + "headers": { + "Date": "2015-11-17 21:24:10.189393+00:00", + "TimeStamp": "2015-11-17 21:24:10.189393+00:00", + }, + "meta": {}, + } + ] + + + # When the base_historian is handling duplicates from the cache, the base_historian is expected to make multiple calls to publish_to_historian + # in which each call contains exactly one duplicate record. More importanly, the base_historian is also expected to make each call to publish_to_historian + # in the order in which the duplicates were initially inserted into the cache (i.e. First-in, First Out, FIFO) + # In this specific case, we have three duplicates that need to be processed. Thus, publish_to_historian will get called thrice. + # On the first call, publish_to_historian will publish 'unique_record_2', 'unique_record_3', 'unique_record_4' AND 'last_duplicate_40' + # On the second call, publish_to_historian will publish last_duplicate_41 + # On the third and final call, publish_to_historian will publish last_duplicate_42 + # Since it is difficult to validate every call except the last call, we will simply validate that the last call + # did indeed publish exactly one duplicate record that was the last duplicate record inserted into the cache. + assert base_historian_agent.last_to_publish_list == expected_to_publish_list + + +BaseHistorianAgent.__bases__ = (AgentMock.imitate(Agent, Agent()),) + + +class BaseHistorianAgentTestWrapper(BaseHistorianAgent): + def __init__(self, **kwargs): + self.last_to_publish_list = "" + super(BaseHistorianAgentTestWrapper, self).__init__(**kwargs) + + def publish_to_historian(self, to_publish_list): + self.report_all_handled() + self.last_to_publish_list = to_publish_list + + def record_table_definitions(self, meta_table_name): + pass + + +@pytest.fixture() +def base_historian_agent(): + base_historian = BaseHistorianAgentTestWrapper() + # default is 300 seconds or 5 minutes; setting to 1 second so tests don't take so long + base_historian._retry_period = 1.0 + # When SQLHistorian is normally started on the platform, this attribute is set. + # Since the SQLHistorian is being tested without the volttron platform, + # this attribute must be set so that the test can run + base_historian._max_time_publishing = timedelta(float(1)) + + yield base_historian + # Teardown + # the backup database is an sqlite database with the name "backup.sqlite". + # the db is created if it doesn't exist; see the method: BackupDatabase._setupdb(check_same_thread) for details + # also, delete the historian database for this test, which is an sqlite db in folder /data + if os.path.exists("./data"): + rmtree("./data") + if os.path.exists(CACHE_NAME): + os.remove(CACHE_NAME) diff --git a/volttrontesting/services/historian/test_historian.py b/volttrontesting/services/historian/test_historian.py index 78bcd2e2a5..35a526f62a 100644 --- a/volttrontesting/services/historian/test_historian.py +++ b/volttrontesting/services/historian/test_historian.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- {{{ # vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et: # -# Copyright 2019, Battelle Memorial Institute. +# Copyright 2020, Battelle Memorial Institute. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -173,9 +173,9 @@ crate_platform = { "source_historian": get_services_core("CrateHistorian"), - "schema": "test", "connection": { "type": "crate", + "schema": "test", "params": { "host": "http://localhost:4200", "debug": False @@ -209,9 +209,9 @@ "host": "localhost", "port": 27017, "database": "mongo_test", - "user": "test", - "passwd": "test", - "authSource": "admin" + "user": "historian", + "passwd": "historian", + "authSource": "test" } } } @@ -221,11 +221,11 @@ 'connection': { 'type': 'postgresql', 'params': { - 'dbname': 'historian_test', + 'dbname': 'test_historian', 'port': 5432, 'host': 'localhost', 'user' : 'historian', - 'password': 'volttron' + 'password': 'historian' }, }, } @@ -355,7 +355,8 @@ def cleanup_mongodb(db_connection, truncate_tables): def cleanup_crate(db_connection, truncate_tables): crate_utils.drop_schema(db_connection, truncate_tables, - schema=crate_platform['schema']) + schema=crate_platform['connection']['schema']) + def cleanup_postgresql(connection, truncate_tables): print('cleanup_postgreql({!r}, {!r})'.format(connection, truncate_tables)) diff --git a/volttrontesting/services/historian/test_multiplatform.py b/volttrontesting/services/historian/test_multiplatform.py index 5595e1ca2a..1641ac4206 100644 --- a/volttrontesting/services/historian/test_multiplatform.py +++ b/volttrontesting/services/historian/test_multiplatform.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- {{{ # vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et: # -# Copyright 2019, Battelle Memorial Institute. +# Copyright 2020, Battelle Memorial Institute. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -322,6 +322,8 @@ def test_all_platform_subscription_zmq(request, get_zmq_volttron_instances): downstream2.shutdown_platform() +@pytest.mark.skip(message='How we set up ssl auth for federation has changed. ' + 'federated_rmq_instance fixture needs to be fixed') @pytest.mark.historian @pytest.mark.multiplatform def test_all_platform_subscription_rmq(request, federated_rmq_instances): @@ -383,7 +385,7 @@ def test_all_platform_subscription_rmq(request, federated_rmq_instances): DEVICES_ALL_TOPIC, headers=headers, message=all_message).get(timeout=10) - gevent.sleep(5) + gevent.sleep(10) ## Query from consumer to verify diff --git a/volttrontesting/services/market_service/test_market_service.py b/volttrontesting/services/market_service/test_market_service.py index 046766ec06..41758a06ac 100644 --- a/volttrontesting/services/market_service/test_market_service.py +++ b/volttrontesting/services/market_service/test_market_service.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- {{{ # vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et: # -# Copyright 2019, Battelle Memorial Institute. +# Copyright 2020, Battelle Memorial Institute. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/volttrontesting/services/tagging/test_tagging.py b/volttrontesting/services/tagging/test_tagging.py index 771f83dc1d..d22764388d 100644 --- a/volttrontesting/services/tagging/test_tagging.py +++ b/volttrontesting/services/tagging/test_tagging.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- {{{ # vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et: # -# Copyright 2019, Battelle Memorial Institute. +# Copyright 2020, Battelle Memorial Institute. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -56,10 +56,7 @@ try: import pymongo - - # Disabling mongo tagging service for now - # Need to fix mongo gevent loop error - HAS_PYMONGO = False + HAS_PYMONGO = True except: HAS_PYMONGO = False pymongo_skipif = pytest.mark.skipif(not HAS_PYMONGO, @@ -96,11 +93,14 @@ mongodb_config = {"source": get_services_core("MongodbTaggingService"), "connection": {"type": "mongodb", - "params": {"host": "localhost", "port": 27017, - "database": "mongo_test", - "user": "test", - "passwd": "test", - "authSource": "admin"}}} + "params": { + "host": "localhost", + "port": 27017, + "database": "mongo_test", + "user": "historian", + "passwd": "historian", + "authSource": "test" + }}} sqlite_historian = { "source": get_services_core("SQLHistorian"), @@ -124,16 +124,16 @@ "params": {"host": "localhost", "port": 27017, "database": "mongo_test", - "user": "test", - "passwd": "test", - "authSource": "admin"} + "user": "historian", + "passwd": "historian", + "authSource": "test"} } } crate_historian = { "source": get_services_core("CrateHistorian"), - "schema": "testing_historian", "connection": { + "schema": "testing_historian", "type": "crate", "params": { "host": "http://localhost:4200", @@ -166,7 +166,9 @@ def setup_sqlite(config): def setup_mongodb(config): print("setup mongodb") connection_params = config['connection']['params'] - mongo_conn_str = 'mongodb://{user}:{passwd}@{host}:{port}/{database}?authSource={authSource}' + mongo_conn_str = 'mongodb://{user}:{passwd}@{host}:{port}/{database}' + if "authSource" in connection_params: + mongo_conn_str = mongo_conn_str + "?authSource={authSource}" params = connection_params mongo_conn_str = mongo_conn_str.format(**params) mongo_client = pymongo.MongoClient(mongo_conn_str) @@ -204,7 +206,7 @@ def cleanup_mongodb(db_connection, truncate_tables): def cleanup_crate(db_connection, truncate_tables): - crate_utils.drop_schema(db_connection, truncate_tables, schema=crate_historian["schema"]) + crate_utils.drop_schema(db_connection, truncate_tables, schema=crate_historian['connection']["schema"]) @pytest.fixture(scope="module") @@ -226,7 +228,7 @@ def stop_agent(): @pytest.fixture(scope="module", params=[ sqlite_config, - # pytest.param(mongodb_config, marks=pymongo_skipif) + pytest.param(mongodb_config, marks=pymongo_skipif) ]) def tagging_service(request, volttron_instance): global connection_type, db_connection, tagging_service_id diff --git a/volttrontesting/services/test_masterweb.py b/volttrontesting/services/test_platformweb.py similarity index 86% rename from volttrontesting/services/test_masterweb.py rename to volttrontesting/services/test_platformweb.py index e8fb06c711..ff8a944f5e 100644 --- a/volttrontesting/services/test_masterweb.py +++ b/volttrontesting/services/test_platformweb.py @@ -1,7 +1,7 @@ """ -This file tests the MasterWebService as it is used in the base platform. Most +This file tests the PlatformWebService as it is used in the base platform. Most of the tests in here are not integration tests, but unit tests to test the -functionality of the MasterWebService agent. +functionality of the PlatformWebService agent. """ import binascii import contextlib @@ -18,12 +18,12 @@ from werkzeug.wrappers import Response from volttron.platform import jsonapi -from volttron.platform.agent.known_identities import MASTER_WEB +from volttron.platform.agent.known_identities import PLATFORM_WEB from volttron.platform.keystore import KeyStore from volttron.platform.vip.agent import Agent from volttron.platform.vip.agent.subsystems.web import ResourceType from volttron.platform.vip.socket import decode_key -from volttron.platform.web import MasterWebService +from volttron.platform.web import PlatformWebService from volttron.platform.web.admin_endpoints import AdminEndpoints from volttron.utils import get_random_key from volttrontesting.utils.platformwrapper import create_volttron_home @@ -34,32 +34,32 @@ #from volttrontesting.utils.platformwrapper import create_volttron_home from volttrontesting.fixtures.cert_fixtures import certs_profile_1 -# Patch the MasterWebService so the underlying Agent interfaces are mocked -# so we can just test the things that the MasterWebService is responsible for. -MasterWebService.__bases__ = (AgentMock.imitate(Agent, Agent()),) +# Patch the PlatformWebService so the underlying Agent interfaces are mocked +# so we can just test the things that the PlatformWebService is responsible for. +PlatformWebService.__bases__ = (AgentMock.imitate(Agent, Agent()),) #TODO add tests for new RPC calls @pytest.fixture() -def master_web_service(): +def platform_web_service(): serverkey = "serverkey" mock_aip = mock.Mock() - yield MasterWebService(serverkey=serverkey, identity=MASTER_WEB, address="tcp://stuff", - bind_web_address="http://v2:8888") + yield PlatformWebService(serverkey=serverkey, identity=PLATFORM_WEB, address="tcp://stuff", + bind_web_address="http://v2:8888") @contextlib.contextmanager -def get_master_web(bind_web_address="http://v2:8080", **kwargs) -> MasterWebService: +def get_platform_web(bind_web_address="http://v2:8080", **kwargs) -> PlatformWebService: """ - Create a new MasterWebService instance with a mocked aip. + Create a new PlatformWebService instance with a mocked aip. - :return: MasterWebService + :return: PlatformWebService """ serverkey = "serverkey" - mws = MasterWebService(serverkey=serverkey, identity=MASTER_WEB, address="tcp://stuff", - bind_web_address=bind_web_address, **kwargs) + mws = PlatformWebService(serverkey=serverkey, identity=PLATFORM_WEB, address="tcp://stuff", + bind_web_address=bind_web_address, **kwargs) mws.startupagent(sender='testweb') # original_volttron_home = os.environ.get('VOLTTRON_HOME') # new_volttron_home = create_volttron_home() @@ -79,11 +79,11 @@ def get_master_web(bind_web_address="http://v2:8080", **kwargs) -> MasterWebServ def get_server_response(env_fixture, ws): """ - Use the `MasterWebService` instance passed to call the app_routing function with + Use the `PlatformWebService` instance passed to call the app_routing function with the environment and a mocked start_response function. :param env_fixture: environment to run in - :param ws: MasterWebServer instance. + :param ws: PlatformWebServer instance. :return: tuple """ mocked_start_response = mock.MagicMock() @@ -103,13 +103,13 @@ def get_server_response(env_fixture, ws): return mocked_start_response, response -def add_points_of_interest(ws: MasterWebService, endpoints: dict): +def add_points_of_interest(ws: PlatformWebService, endpoints: dict): """ Adds endpoints based upon type. - The three t ypes of + The three types of - :param ws: The masterwebservice object + :param ws: The platformwebservice object :param endpoints: A dictionary of endpoints """ for k, v in endpoints.items(): @@ -149,7 +149,7 @@ def test_authenticate_endpoint(scheme): adminep.add_user(user, passwd, groups=['foo', 'read-only']) expected_claims = dict(groups=['foo', 'read-only']) - with get_master_web(**kwargs) as mw: + with get_platform_web(**kwargs) as mw: data = urlencode(dict(username=user, password=passwd)).encode('utf-8') assert len(data) > 0 @@ -241,8 +241,8 @@ def _construct_query_mock(core): bind_web_address = f"{scheme}://{host}:{port}" serverkey = decode_key(keystore.public) - mws = MasterWebService(serverkey=serverkey, identity=MASTER_WEB, address=address, - bind_web_address=bind_web_address, **config_params) + mws = PlatformWebService(serverkey=serverkey, identity=PLATFORM_WEB, address=address, + bind_web_address=bind_web_address, **config_params) mws.startupagent(sender='testweb') env = get_test_web_env("/discovery/") @@ -259,7 +259,7 @@ def _construct_query_mock(core): assert address == response.get('vip-address') -# def test_masterweb_has_discovery(): +# def test_platformweb_has_discovery(): # web_secret = "my secret key" # # def _construct_query_mock(core): @@ -271,7 +271,7 @@ def _construct_query_mock(core): # return MockQuery(**kv) # # with mock.patch('volttron.platform.vip.agent.subsystems.query.Query', _construct_query_mock): -# with get_master_web(web_secret_key=web_secret) as mw: +# with get_platform_web(web_secret_key=web_secret) as mw: # env = get_test_web_env("/discovery/") # mocked_start_response, response = get_server_response(env, mw) # @@ -280,7 +280,7 @@ def _construct_query_mock(core): @pytest.mark.web def test_path_route(): - with get_master_web(web_secret_key="oh my goodnes") as ws: + with get_platform_web(web_secret_key="oh my goodnes") as ws: # Stage 1 create a temp dir and add index.html to that directory tempdir = tempfile.mkdtemp(prefix="web") html = """sweetYay I am here""" @@ -327,8 +327,8 @@ def test_path_route(): @pytest.mark.web -def test_register_route(master_web_service: MasterWebService): - ws = master_web_service +def test_register_route(platform_web_service: PlatformWebService): + ws = platform_web_service fn_mock = mock.Mock() fn_mock.__name__ = "test_register_route" interest = {'/web': {'type': 'agent_route', 'fn': fn_mock}} @@ -346,8 +346,8 @@ def test_register_route(master_web_service: MasterWebService): @pytest.mark.web -def test_register_endpoint(master_web_service: MasterWebService): - ws = master_web_service +def test_register_endpoint(platform_web_service: PlatformWebService): + ws = platform_web_service fn_mock = mock.Mock() fn_mock.__name__ = "test_register_endpoint" interest = {"/battle/one": {'type': 'endpoint'}} diff --git a/volttrontesting/services/weather/test_base_weather.py b/volttrontesting/services/weather/test_base_weather.py index 76d560ae00..55f4c209ac 100644 --- a/volttrontesting/services/weather/test_base_weather.py +++ b/volttrontesting/services/weather/test_base_weather.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- {{{ # vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et: # -# Copyright 2019, Battelle Memorial Institute. +# Copyright 2020, Battelle Memorial Institute. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -71,6 +71,8 @@ "name": "fake4"} } +DATABASE_FILE = None + @pytest.fixture(scope="module") def query_agent(request, volttron_instance): @@ -205,6 +207,10 @@ def weather(request, volttron_instance): ) gevent.sleep(2) + global DATABASE_FILE + DATABASE_FILE = agent._database_file + assert DATABASE_FILE.endswith("weather.sqlite") + yield agent agent.core.stop() request.addfinalizer(remove_temp_file) @@ -228,7 +234,7 @@ def test_create_tables(weather): connection = weather._cache._sqlite_conn cursor = connection.cursor() - assert os.path.isfile(weather._database_file) + assert os.path.isfile(DATABASE_FILE) weather._cache.create_tables() @@ -282,7 +288,8 @@ def test_manage_cache_size(volttron_instance): connection = weather._cache._sqlite_conn cursor = connection.cursor() - assert os.path.isfile("weather.sqlite") + database_file = weather._cache._db_file_path + assert os.path.isfile(database_file) for service_name in weather._api_services: query = "DELETE FROM {};".format(service_name) @@ -962,7 +969,6 @@ def test_poll_location(volttron_instance, query_agent): def test_poll_multiple_locations(volttron_instance, query_agent, config, result_topics): gevent.sleep(1) - agent = None query_agent.poll_callback.reset_mock() try: @@ -1039,9 +1045,8 @@ def test_poll_errors(volttron_instance, query_agent, config, def delete_database_file(): - db_path = "weather.sqlite" - if os.path.isfile(db_path): - os.remove(db_path) + if os.path.isfile(DATABASE_FILE): + os.remove(DATABASE_FILE) @pytest.mark.weather2 @@ -1057,8 +1062,8 @@ def test_unhandled_cache_store_exception(volttron_instance, weather, conn.commit() # workaround to open the file in read only mode weather._cache._sqlite_conn.close() - os.chmod(weather._database_file, 0o444) - weather._cache._sqlite_conn = sqlite3.connect(weather._database_file) + os.chmod(DATABASE_FILE, 0o444) + weather._cache._sqlite_conn = sqlite3.connect(DATABASE_FILE) query_agent.alert_callback.reset_mock() results1 = query_agent.vip.rpc.call(identity, "get_current_weather", @@ -1105,8 +1110,8 @@ def test_unhandled_cache_store_exception(volttron_instance, weather, assert results1["observation_time"] != results2["observation_time"] finally: weather._cache._sqlite_conn.close() - os.chmod(weather._database_file, 0o666) - weather._cache._sqlite_conn = sqlite3.connect(weather._database_file) + os.chmod(DATABASE_FILE, 0o666) + weather._cache._sqlite_conn = sqlite3.connect(DATABASE_FILE) @pytest.mark.weather2 @@ -1155,4 +1160,4 @@ def test_unhandled_cache_read_exception(volttron_instance, weather, assert read_warning finally: # make sure the cache is ready to be used again - weather._cache._sqlite_conn = sqlite3.connect(weather._database_file) + weather._cache._sqlite_conn = sqlite3.connect(DATABASE_FILE) diff --git a/volttrontesting/subsystems/test_config_store.py b/volttrontesting/subsystems/test_config_store.py index 5b53cb749c..003cc17707 100644 --- a/volttrontesting/subsystems/test_config_store.py +++ b/volttrontesting/subsystems/test_config_store.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- {{{ # vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et: # -# Copyright 2019, Battelle Memorial Institute. +# Copyright 2020, Battelle Memorial Institute. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -64,7 +64,8 @@ def reset_results(self): def _module_config_test_agent(request, volttron_instance): agent = volttron_instance.build_agent(identity='config_test_agent', - agent_class=_config_test_agent) + agent_class=_config_test_agent, + enable_store=True) def cleanup(): agent.core.stop() @@ -371,7 +372,8 @@ def __init__(self, **kwargs): self.setup_callback() agent = volttron_instance.build_agent(identity='test_default_agent', - agent_class=test_default_agent) + agent_class=test_default_agent, + enable_store=True) # Give the agent a chance to process it's configurations. gevent.sleep(1.0) @@ -412,7 +414,8 @@ def __init__(self, **kwargs): self.setup_callback(actions="DELETE", pattern="delete/*") agent = volttron_instance.build_agent(identity='test_agent_sub_options', - agent_class=test_sub_pattern_agent) + agent_class=test_sub_pattern_agent, + enable_store=True) # Give the agent a chance to process it's configurations. gevent.sleep(1.0) diff --git a/volttrontesting/subsystems/test_pubsub.py b/volttrontesting/subsystems/test_pubsub.py index 13f902b926..ce8421698a 100644 --- a/volttrontesting/subsystems/test_pubsub.py +++ b/volttrontesting/subsystems/test_pubsub.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- {{{ # vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et: # -# Copyright 2019, Battelle Memorial Institute. +# Copyright 2020, Battelle Memorial Institute. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/volttrontesting/testutils/test_docker_wrapper.py b/volttrontesting/testutils/test_docker_wrapper.py index 50986a5dc3..67fa4a7d56 100644 --- a/volttrontesting/testutils/test_docker_wrapper.py +++ b/volttrontesting/testutils/test_docker_wrapper.py @@ -30,7 +30,7 @@ def test_docker_wrapper_should_throw_runtime_error_on_false_image_when_pull(): with create_container("not_a_real_image", ports={"4200/tcp": 4200}) as container: container.logs() - assert "404 Client Error: Not Found" in str(execinfo.value) + assert "404 Client Error" in str(execinfo.value) @pytest.mark.skipif(SKIP_DOCKER, reason=SKIP_REASON) @@ -41,5 +41,5 @@ def test_docker_wrapper_should_throw_runtime_error_when_ports_clash(): with create_container("crate", ports={"4200/tcp": port}) as container2: assert container2.status == 'running' - assert "500 Server Error: Internal Server Error" in str(execinfo.value) + assert "500 Server Error" in str(execinfo.value) diff --git a/volttrontesting/testutils/test_getinstance_1.py b/volttrontesting/testutils/test_getinstance_1.py new file mode 100644 index 0000000000..a445d0fc2a --- /dev/null +++ b/volttrontesting/testutils/test_getinstance_1.py @@ -0,0 +1,15 @@ +import pytest + +from volttrontesting.utils.platformwrapper import PlatformWrapper + + +@pytest.mark.wrapper +def test_fixture_returns_correct_number_of_instances(get_volttron_instances): + num_instances = 4 + wrappers = get_volttron_instances(num_instances, should_start=False) + + assert num_instances == len(wrappers) + for w in wrappers: + assert isinstance(w, PlatformWrapper) + + assert not w.is_running() diff --git a/volttrontesting/testutils/test_getinstance_2.py b/volttrontesting/testutils/test_getinstance_2.py new file mode 100644 index 0000000000..28214a6880 --- /dev/null +++ b/volttrontesting/testutils/test_getinstance_2.py @@ -0,0 +1,15 @@ +import pytest + +from volttrontesting.utils.platformwrapper import PlatformWrapper + + +@pytest.mark.wrapper +def test_fixture_starts_platforms(get_volttron_instances): + num_instances = 5 + wrappers = get_volttron_instances(num_instances) + + assert num_instances == len(wrappers) + for w in wrappers: + assert isinstance(w, PlatformWrapper) + assert w.is_running() + w.shutdown_platform() diff --git a/volttrontesting/testutils/test_getinstance_3.py b/volttrontesting/testutils/test_getinstance_3.py new file mode 100644 index 0000000000..db0bd36d32 --- /dev/null +++ b/volttrontesting/testutils/test_getinstance_3.py @@ -0,0 +1,33 @@ +import pytest + +from volttron.platform import get_examples + + +@pytest.mark.skip(reason="To test actions on github") +@pytest.mark.wrapper +def test_can_install_listeners_on_different_instances(get_volttron_instances): + + num_instances = 3 + wrappers = get_volttron_instances(num_instances, True) + + wrapper_uuid = [] + assert num_instances == len(wrappers) + for w in wrappers: + assert w.is_running() + auuid = w.install_agent( + agent_dir=get_examples("ListenerAgent"), config_file={"message": "So Happpy"}, + start=True + ) + assert auuid + assert w.is_agent_running(auuid) + wrapper_uuid.append((w, auuid)) + + # Make sure that the installed agents are for different instances + for w, aid in wrapper_uuid: + for w1, aid1 in wrapper_uuid: + if id(w1) == id(w): + assert w1.is_agent_running(aid) + else: + # Note using w to compare the installed agent on w to the agent installed on w1 + with pytest.raises(FileNotFoundError): + w.get_agent_identity(aid1) diff --git a/volttrontesting/testutils/test_getinstances_fixture.py b/volttrontesting/testutils/test_getinstances_fixture.py deleted file mode 100644 index 17be35644a..0000000000 --- a/volttrontesting/testutils/test_getinstances_fixture.py +++ /dev/null @@ -1,28 +0,0 @@ -import pytest - -from volttrontesting.fixtures.volttron_platform_fixtures import cleanup_wrappers -from volttrontesting.utils.platformwrapper import PlatformWrapper - - -@pytest.mark.wrapper -def test_fixture_returns_correct_number_of_instances(get_volttron_instances): - - num_instances = 5 - wrappers = get_volttron_instances(num_instances, False) - - assert num_instances == len(wrappers) - for w in wrappers: - assert isinstance(w, PlatformWrapper) - assert not w.is_running() - - -@pytest.mark.wrapper -def test_fixture_starts_platforms(get_volttron_instances): - num_instances = 5 - wrappers = get_volttron_instances(num_instances) - - assert num_instances == len(wrappers) - for w in wrappers: - assert isinstance(w, PlatformWrapper) - assert w.is_running() - w.shutdown_platform() diff --git a/volttrontesting/testutils/test_multimessagebus_fixture.py b/volttrontesting/testutils/test_multimessagebus_fixture.py index c389c0359c..7681d4ad05 100644 --- a/volttrontesting/testutils/test_multimessagebus_fixture.py +++ b/volttrontesting/testutils/test_multimessagebus_fixture.py @@ -1,6 +1,8 @@ import pytest import requests +from volttrontesting.fixtures.volttron_platform_fixtures import cleanup_wrapper + @pytest.fixture def web_bound_correctly(volttron_multi_messagebus): @@ -11,6 +13,9 @@ def web_bound_correctly(volttron_multi_messagebus): yield source, sink + cleanup_wrapper(source) + cleanup_wrapper(sink) + def test_correct_number_of_instances(web_bound_correctly): diff --git a/volttrontesting/testutils/test_platformwrapper.py b/volttrontesting/testutils/test_platformwrapper.py index fb2c75f455..d515515574 100644 --- a/volttrontesting/testutils/test_platformwrapper.py +++ b/volttrontesting/testutils/test_platformwrapper.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- {{{ # vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et: # -# Copyright 2019, Battelle Memorial Institute. +# Copyright 2020, Battelle Memorial Institute. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -35,42 +35,18 @@ # BATTELLE for the UNITED STATES DEPARTMENT OF ENERGY # under Contract DE-AC05-76RL01830 # }}} -import faulthandler -faulthandler.enable() - - from configparser import ConfigParser -import gevent -import pytest import time import os +import requests +import gevent +import pytest from mock import MagicMock from volttron.platform import get_services_core, get_examples, jsonapi -from volttrontesting.utils.platformwrapper import PlatformWrapper -from volttrontesting.utils.utils import get_rand_tcp_address -from volttrontesting.utils.platform_process import VolttronProcess, VolttronRuntimeOptions, AgentProcess -from time import sleep - -# -# def test_volttron_process(): -# rto = VolttronRuntimeOptions() -# p2 = VolttronProcess(runtime_options=rto) -# p = VolttronProcess(runtime_options=rto) -# p.start() -# p2.start() -# # a = AgentProcess("/home/osboxes/repos/volttron-develop/examples/ListenerAgent/listener/agent.py", -# # p.volttron_home, "/home/osboxes/repos/volttron-develop/examples/ListenerAgent/config") -# # a.start() -# -# sleep(5) -# # a.terminate() -# # a.join() -# p2.shutdown() -# p2.join() -# p.shutdown() -# p.join() +from volttrontesting.utils.platformwrapper import PlatformWrapper, with_os_environ +from volttrontesting.utils.utils import get_rand_tcp_address, get_rand_http_address @pytest.mark.parametrize("messagebus, ssl_auth", [ @@ -79,7 +55,6 @@ # , ('rmq', True) ]) def test_can_create(messagebus, ssl_auth): - p = PlatformWrapper(messagebus=messagebus, ssl_auth=ssl_auth) try: assert not p.is_running() @@ -87,6 +62,7 @@ def test_can_create(messagebus, ssl_auth): p.startup_platform(vip_address=get_rand_tcp_address()) assert p.is_running() + assert p.dynamic_agent.vip.ping("").get(timeout=2) finally: if p: p.shutdown_platform() @@ -94,7 +70,28 @@ def test_can_create(messagebus, ssl_auth): assert not p.is_running() +@pytest.mark.parametrize("messagebus, https_enabled", [ + ('zmq', False) + # TODO: Test enable generation of certs to support https + # , ('zmq', True) + # , ('zmq', False) + # , ('rmq', True) +]) +def test_can_create_web_enabled(messagebus: str, https_enabled: bool): + p = PlatformWrapper(messagebus=messagebus) + try: + assert not p.is_running() + assert p.volttron_home.startswith("/tmp/tmp") + http_address = get_rand_http_address(https=https_enabled) + p.startup_platform(vip_address=get_rand_tcp_address(), bind_web_address=http_address) + assert p.is_running() + response = requests.get(http_address, verify=False) + assert response.ok + finally: + if p: + p.shutdown_platform() + assert not p.is_running() @pytest.mark.wrapper @@ -111,7 +108,6 @@ def test_volttron_config_created(volttron_instance): @pytest.mark.wrapper def test_can_restart_platform_without_addresses_changing(get_volttron_instances): - inst_forward, inst_target = get_volttron_instances(2) original_vip = inst_forward.vip_address @@ -126,7 +122,6 @@ def test_can_restart_platform_without_addresses_changing(get_volttron_instances) @pytest.mark.wrapper def test_can_restart_platform(volttron_instance): - orig_vip = volttron_instance.vip_address orig_vhome = volttron_instance.volttron_home orig_bus = volttron_instance.messagebus @@ -170,9 +165,9 @@ def test_instance_writes_to_instances_file(volttron_instance): assert the_instance_entry['volttron-home'] == vi.volttron_home +@pytest.mark.skip(reason="To test actions on github") @pytest.mark.wrapper def test_can_install_listener(volttron_instance): - vi = volttron_instance assert vi is not None assert vi.is_running() @@ -181,6 +176,7 @@ def test_can_install_listener(volttron_instance): auuid = vi.install_agent(agent_dir=get_examples("ListenerAgent"), start=False) assert auuid is not None + time.sleep(1) started = vi.start_agent(auuid) assert started @@ -229,16 +225,23 @@ def test_reinstall_agent(volttron_instance): } } } - for i in range(0, 5): - print("Counter: {}".format(i)) + auuid = volttron_instance.install_agent( + agent_dir=get_services_core("SQLHistorian"), + config_file=sqlite_config, + start=True, + vip_identity='test_historian') + assert volttron_instance.is_agent_running(auuid) + + newuuid = volttron_instance.install_agent( + agent_dir=get_services_core("SQLHistorian"), + config_file=sqlite_config, + start=True, + force=True, + vip_identity='test_historian') - auuid = volttron_instance.install_agent( - agent_dir=get_services_core("SQLHistorian"), - config_file=sqlite_config, - start=True, - vip_identity='test_historian') - assert volttron_instance.is_agent_running(auuid) - volttron_instance.remove_agent(auuid) + assert volttron_instance.is_agent_running(newuuid) + assert auuid != newuuid and auuid is not None + volttron_instance.remove_agent(newuuid) @pytest.mark.wrapper @@ -345,55 +348,57 @@ def test_can_publish(volttron_instance): assert messages['test/world']['message'] == 'got data' +@pytest.mark.skip(reason="To test actions on github") @pytest.mark.wrapper -def test_fixture_returns_single_if_one_requested(get_volttron_instances): - wrapper = get_volttron_instances(1, False) - assert isinstance(wrapper, PlatformWrapper) +def test_can_install_multiple_listeners(volttron_instance): + assert volttron_instance.is_running() + volttron_instance.remove_all_agents() + uuids = [] + num_listeners = 3 + try: + for x in range(num_listeners): + identity = "listener_" + str(x) + auuid = volttron_instance.install_agent( + agent_dir=get_examples("ListenerAgent"), config_file={ + "agentid": identity, + "message": "So Happpy"}) + assert auuid + uuids.append(auuid) + time.sleep(4) + + for u in uuids: + assert volttron_instance.is_agent_running(u) + + agent_list = volttron_instance.dynamic_agent.vip.rpc('control', 'list_agents').get(timeout=5) + print('Agent List: {}'.format(agent_list)) + assert len(agent_list) == num_listeners + finally: + for x in uuids: + try: + volttron_instance.remove_agent(x) + except: + print('COULDN"T REMOVE AGENT') -@pytest.mark.wrapper -def test_can_install_listener_on_two_platforms(get_volttron_instances): - wrapper1, wrapper2 = get_volttron_instances(2) +def test_will_update_throws_typeerror(): + # Note dictionary for os.environ must be string=string for key=value - global messages - clear_messages() - auuid = wrapper1.install_agent( - agent_dir=get_examples("ListenerAgent"), - start=False) - assert auuid is not None - started = wrapper1.start_agent(auuid) - print('STARTED: ', started) - listening = wrapper1.build_agent() - listening.vip.pubsub.subscribe(peer='pubsub', - prefix='heartbeat/listeneragent', - callback=onmessage) + to_update = dict(shanty=dict(holy="cow")) + #with pytest.raises(TypeError): + with with_os_environ(to_update): + print("Should not reach here") - # sleep for 10 seconds and at least one heartbeat should have been - # published - # because it's set to 5 seconds. - time_start = time.time() + to_update = dict(bogus=35) +# with pytest.raises(TypeError): + with with_os_environ(to_update): + print("Should not reach here") - clear_messages() - auuid2 = wrapper2.install_agent( - agent_dir=get_examples("ListenerAgent"), - start=True) - assert auuid2 is not None - started2 = wrapper2.start_agent(auuid2) - print('STARTED: ', started2) - listening = wrapper2.build_agent() - listening.vip.pubsub.subscribe(peer='pubsub', - prefix='heartbeat/listeneragent', - callback=onmessage) - # sleep for 10 seconds and at least one heartbeat should have been - # published - # because it's set to 5 seconds. - time_start = time.time() +def test_will_update_environ(): + to_update = dict(farthing="50") + with with_os_environ(to_update): + assert os.environ.get("farthing") == "50" - print('Awaiting heartbeat response.') - while not messages_contains_prefix( - 'heartbeat/listeneragent') and time.time() < time_start + 10: - gevent.sleep(0.2) + assert "farthing" not in os.environ - assert messages_contains_prefix('heartbeat/listeneragent') diff --git a/volttrontesting/testutils/test_platformwrapper2.py b/volttrontesting/testutils/test_platformwrapper2.py deleted file mode 100644 index 825699b4e6..0000000000 --- a/volttrontesting/testutils/test_platformwrapper2.py +++ /dev/null @@ -1,100 +0,0 @@ -import gevent -import pytest -import warnings -import os - -from gevent import subprocess - -from volttron.platform import get_examples -from volttrontesting.utils.utils import get_rand_port -from volttrontesting.utils.platformwrapper import PlatformWrapper -from gevent.subprocess import Popen - - -@pytest.mark.wrapper -def test_can_cleanup_installed_listener(): - try: - import psutil - except: - warnings.warn('No psutil module present for this test') - return - wrapper = PlatformWrapper() - - address="tcp://127.0.0.1:{}".format(get_rand_port()) - wrapper.startup_platform(address) - - assert wrapper is not None - assert wrapper.is_running() - - auuid = wrapper.install_agent(agent_dir=get_examples("ListenerAgent"), - vip_identity="listener", - start=False) - assert auuid is not None - started = wrapper.start_agent(auuid) - assert isinstance(started, int) - assert psutil.pid_exists(started) - - wrapper.shutdown_platform() - # give operating system enough time to update pids. - gevent.sleep(0.1) - assert not psutil.pid_exists(started) - - -@pytest.mark.wrapper -def test_pid_file(): - try: - import psutil - except: - warnings.warn('No psutil module present for this test') - return - wrapper = PlatformWrapper() - - address="tcp://127.0.0.1:{}".format(get_rand_port()) - wrapper.startup_platform(address) - - assert wrapper is not None - assert wrapper.is_running() - pid_file = os.path.join(wrapper.volttron_home, "VOLTTRON_PID") - assert os.path.exists(pid_file) - with open(pid_file, 'r') as pf: - assert psutil.pid_exists(int(pf.read().strip())) - wrapper.skip_cleanup = True - wrapper.shutdown_platform() - # give operating system enough time to update pids. - gevent.sleep(0.1) - assert not os.path.exists(pid_file) - - # Check overwrite of pid file. In case last shutdown was not clean - with open(pid_file, 'w') as pf: - pf.write('abcd') - - wrapper = PlatformWrapper() - - address = "tcp://127.0.0.1:{}".format(get_rand_port()) - wrapper.startup_platform(address) - - assert wrapper is not None - assert wrapper.is_running() - pid_file = os.path.join(wrapper.volttron_home, "VOLTTRON_PID") - assert os.path.exists(pid_file) - with open(pid_file, 'r') as pf: - pid_str = pf.read().strip() - assert psutil.pid_exists(int(pid_str)) - - # test start-volttron script we don't start a second volttron process if one - # is already running - env = os.environ.copy() - env["VOLTTRON_HOME"] = wrapper.volttron_home - vsource_home = os.path.dirname(os.path.dirname(os.path.dirname(os.path.realpath(__file__)))) - process = Popen(["./start-volttron"], cwd=vsource_home, env=env, stderr=subprocess.PIPE, - stdout=subprocess.PIPE) - (output, error) = process.communicate() - assert process.returncode == 1 - assert "VOLTTRON with process id " + pid_str + " is already running" in \ - output.decode("utf-8") - - - - - - diff --git a/volttrontesting/testutils/test_single_instance.py b/volttrontesting/testutils/test_single_instance.py deleted file mode 100644 index e0847ff473..0000000000 --- a/volttrontesting/testutils/test_single_instance.py +++ /dev/null @@ -1,67 +0,0 @@ -import pytest -import gevent - -from volttron.platform import get_examples - - -@pytest.mark.wrapper -def test_can_install_listeners(volttron_instance): - assert volttron_instance.is_running() - uuids = [] - num_listeners = 5 - - try: - for x in range(num_listeners): - identity = "listener_" + str(x) - auuid = volttron_instance.install_agent( - agent_dir=get_examples("ListenerAgent"), config_file={ - "agentid": identity, - "message": "So Happpy"}) - assert auuid - uuids.append(auuid) - gevent.sleep(0.5) - - for u in uuids: - assert volttron_instance.is_agent_running(u) - - agent = volttron_instance.build_agent() - agent_list = agent.vip.rpc('control', 'list_agents').get(timeout=5) - print('Agent List: {}'.format(agent_list)) - assert len(agent_list) == num_listeners - finally: - for x in uuids: - try: - volttron_instance.remove_agent(x) - except: - print('COULDN"T REMOVE AGENT') - - -@pytest.mark.wrapper -def test_can_install_listeners_vi(volttron_instance): - assert volttron_instance.is_running() - uuids = [] - num_listeners = 5 - - try: - for x in range(num_listeners): - identity = "listener_" + str(x) - auuid = volttron_instance.install_agent( - agent_dir=get_examples("ListenerAgent"), - start=True, - config_file={ - "agentid": identity, - "message": "So Happpy"}) - assert auuid - uuids.append(auuid) - gevent.sleep(0.5) - - agent = volttron_instance.build_agent() - agent_list = agent.vip.rpc('control', 'list_agents').get(timeout=5) - print('Agent List: {}'.format(agent_list)) - assert len(agent_list) == num_listeners - finally: - for x in uuids: - try: - volttron_instance.remove_agent(x) - except: - print('COULDN"T REMOVE AGENT') diff --git a/volttrontesting/utils/__init__.py b/volttrontesting/utils/__init__.py index a36a34777b..16e0b81948 100644 --- a/volttrontesting/utils/__init__.py +++ b/volttrontesting/utils/__init__.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- {{{ # vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et: # -# Copyright 2019, Battelle Memorial Institute. +# Copyright 2020, Battelle Memorial Institute. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/volttrontesting/utils/platformwrapper.py b/volttrontesting/utils/platformwrapper.py index 885dab3e8f..bce25595a9 100644 --- a/volttrontesting/utils/platformwrapper.py +++ b/volttrontesting/utils/platformwrapper.py @@ -1,8 +1,10 @@ import configparser as configparser -from datetime import datetime import logging import os +from pathlib import Path +from typing import Optional, Union import uuid +from urllib.parse import urlencode import psutil import shutil @@ -10,19 +12,22 @@ import tempfile import time import re -from contextlib import closing +from contextlib import closing, contextmanager from os.path import dirname from subprocess import CalledProcessError import gevent import gevent.subprocess as subprocess import requests + +from volttron.platform.vip.socket import encode_key, decode_key +from volttrontesting.fixtures.cert_fixtures import certs_profile_2 from .agent_additions import (add_volttron_central, add_volttron_central_platform) from gevent.fileobject import FileObject from gevent.subprocess import Popen from volttron.platform import packaging, jsonapi -from volttron.platform.agent.known_identities import MASTER_WEB, CONTROL +from volttron.platform.agent.known_identities import PLATFORM_WEB, CONTROL, CONTROL_CONNECTION, PROCESS_IDENTITIES from volttron.platform.certs import Certs from volttron.platform.agent import utils from volttron.platform.agent.utils import (strip_comments, @@ -34,12 +39,12 @@ from volttron.platform.keystore import KeyStore, KnownHostsStore from volttron.platform.vip.agent import Agent from volttron.platform.vip.agent.connection import Connection -from volttrontesting.utils.utils import get_rand_http_address +from volttrontesting.utils.utils import get_rand_http_address, get_rand_vip, get_hostname_and_random_port, \ + get_rand_ip_and_port from volttrontesting.utils.utils import get_rand_tcp_address from volttrontesting.fixtures.rmq_test_setup import create_rmq_volttron_setup from volttron.utils.rmq_setup import start_rabbit, stop_rabbit - utils.setup_logging() _log = logging.getLogger(__name__) @@ -144,15 +149,22 @@ def start_wrapper_platform(wrapper, with_http=False, with_tcp=True, # Please note, if 'with_http'==True, then instance name needs to be provided assert not wrapper.is_running() - # Will returen https if messagebus rmq - bind_address = get_rand_http_address(wrapper.messagebus == 'rmq') if with_http else None + address = get_rand_vip() + if wrapper.ssl_auth: + hostname, port = get_hostname_and_random_port() + bind_address = 'https://{hostname}:{port}'.format(hostname=hostname, port=port) + else: + bind_address = "http://{}".format(get_rand_ip_and_port()) + + # Will return https if messagebus rmq + # bind_address = get_rand_http_address(wrapper.messagebus == 'rmq') if with_http else None vc_http = bind_address vc_tcp = get_rand_tcp_address() if with_tcp else None if add_local_vc_address: ks = KeyStore(os.path.join(wrapper.volttron_home, 'keystore')) ks.generate() - if wrapper.messagebus == 'rmq': + if wrapper.ssl_auth is True: volttron_central_address = vc_http else: volttron_central_address = vc_tcp @@ -182,9 +194,37 @@ def create_volttron_home() -> str: # This is needed to run tests with volttron's secure mode. Without this # default permissions for folders under /tmp directory doesn't not have read or execute for group or others os.chmod(volttron_home, 0o755) + # Move volttron_home to be one level below the mkdir so that + # the volttron.log file is not part of the same folder for + # observer. + volttron_home = os.path.join(volttron_home, "volttron_home") + os.makedirs(volttron_home) return volttron_home +@contextmanager +def with_os_environ(update_env: dict): + """ + Wrapper function for updating os environment and returning it to the previous state. This function + should be used whenever a modification to os.environ is necessary. The restoration of the environment + after the call will happen automatically + + Exaample:: + + with with_os_environ(self.env): + print('within self.env context now') + + :param update_env: + :return: + """ + copy_env = os.environ.copy() + os.environ.update(update_env) + try: + yield + finally: + os.environ = copy_env + + class PlatformWrapper: def __init__(self, messagebus=None, ssl_auth=False, instance_name=None, secure_agent_users=False, remote_platform_ca=None): @@ -203,10 +243,18 @@ def __init__(self, messagebus=None, ssl_auth=False, instance_name=None, self._instance_shutdown = False self.volttron_home = create_volttron_home() + # log file is one level above volttron_home now + self.log_path = os.path.join(os.path.dirname(self.volttron_home), "volttron.log") self.packaged_dir = os.path.join(self.volttron_home, "packaged") os.makedirs(self.packaged_dir) + bin_dir = str(Path(sys.executable).parent) + path = os.environ['PATH'] + if bin_dir not in path: + path = bin_dir + ":" + path + if VOLTTRON_ROOT not in path: + path = VOLTTRON_ROOT + ":" + path # in the context of this platform it is very important not to # use the main os.environ for anything. self.env = { @@ -215,18 +263,22 @@ def __init__(self, messagebus=None, ssl_auth=False, instance_name=None, 'DEBUG_MODE': os.environ.get('DEBUG_MODE', ''), 'DEBUG': os.environ.get('DEBUG', ''), 'SKIP_CLEANUP': os.environ.get('SKIP_CLEANUP', ''), - 'PATH': VOLTTRON_ROOT + ':' + os.environ['PATH'], + 'PATH': path, # RABBITMQ requires HOME env set 'HOME': os.environ.get('HOME'), # Elixir (rmq pre-req) requires locale to be utf-8 'LANG': "en_US.UTF-8", 'LC_ALL': "en_US.UTF-8", - 'PYTHONDONTWRITEBYTECODE': '1' + 'PYTHONDONTWRITEBYTECODE': '1', + 'VOLTTRON_ROOT': VOLTTRON_ROOT } self.volttron_root = VOLTTRON_ROOT - volttron_exe = os.path.dirname(sys.executable) + '/volttron' - assert os.path.exists(volttron_exe) + self.vctl_exe = str(Path(sys.executable).parent.joinpath('volttron-ctl')) + self.volttron_exe = str(Path(sys.executable).parent.joinpath('volttron')) + + assert Path(self.vctl_exe).exists() + assert Path(self.volttron_exe).exists() self.python = sys.executable assert os.path.exists(self.python) @@ -268,58 +320,66 @@ def __init__(self, messagebus=None, ssl_auth=False, instance_name=None, self.ssl_auth = ssl_auth self.instance_name = instance_name if not self.instance_name: - self.instance_name = os.path.basename(self.volttron_home) + self.instance_name = os.path.basename(os.path.dirname(self.volttron_home)) - # Set the VOLTTRON_HOME for this process...note this - # seems tricky but this platform should start up before - # the rest so it should work out ok. - os.environ['VOLTTRON_HOME'] = self.volttron_home + with with_os_environ(self.env): - # Writes the main volttron config file for this instance. - store_message_bus_config(self.messagebus, self.instance_name) + # Writes the main volttron config file for this instance. + store_message_bus_config(self.messagebus, self.instance_name) - self.remote_platform_ca = remote_platform_ca - self.requests_ca_bundle = None - self.dynamic_agent = None + self.remote_platform_ca = remote_platform_ca + self.requests_ca_bundle = None + self.dynamic_agent = None - if self.messagebus == 'rmq': - self.rabbitmq_config_obj = create_rmq_volttron_setup(vhome=self.volttron_home, - ssl_auth=self.ssl_auth, - env=self.env, - instance_name=self.instance_name, - secure_agent_users=secure_agent_users) + if self.messagebus == 'rmq': + self.rabbitmq_config_obj = create_rmq_volttron_setup(vhome=self.volttron_home, + ssl_auth=self.ssl_auth, + env=self.env, + instance_name=self.instance_name, + secure_agent_users=secure_agent_users) self.certsobj = Certs(os.path.join(self.volttron_home, "certificates")) - self.debug_mode = self.env.get('DEBUG_MODE', False) - if not self.debug_mode: - self.debug_mode = self.env.get('DEBUG', False) - self.skip_cleanup = self.env.get('SKIP_CLEANUP', False) + self.debug_mode = self.env.get('DEBUG_MODE', False) + if not self.debug_mode: + self.debug_mode = self.env.get('DEBUG', False) + self.skip_cleanup = self.env.get('SKIP_CLEANUP', False) - self._web_admin_api = None + self._web_admin_api = None @property def web_admin_api(self): return self._web_admin_api + def get_identity_keys(self, identity: str): + with with_os_environ(self.env): + if not Path(KeyStore.get_agent_keystore_path(identity)).exists(): + raise PlatformWrapperError(f"Invalid identity keystore {identity}") + + with open(KeyStore.get_agent_keystore_path(identity)) as ks: + return jsonapi.loads(ks.read()) + def logit(self, message): print('{}: {}'.format(self.volttron_home, message)) def allow_all_connections(self): """ Add a /.*/ entry to the auth.json file. """ - entry = AuthEntry(credentials="/.*/", comments="Added by platformwrapper") - authfile = AuthFile(self.volttron_home + "/auth.json") - try: - authfile.add(entry) - except AuthFileEntryAlreadyExists: - pass + with with_os_environ(self.env): + entry = AuthEntry(credentials="/.*/", comments="Added by platformwrapper") + authfile = AuthFile(self.volttron_home + "/auth.json") + try: + authfile.add(entry) + except AuthFileEntryAlreadyExists: + pass - if self.messagebus == 'rmq' and self.bind_web_address is not None: - self.enable_auto_csr() - self.web_admin_api.create_web_admin('admin', 'admin') + if self.messagebus == 'rmq' and self.bind_web_address is not None: + self.enable_auto_csr() + # if self.bind_web_address is not None: + # self.web_admin_api.create_web_admin('admin', 'admin', self.messagebus) def get_agent_identity(self, agent_uuid): + identity = None path = os.path.join(self.volttron_home, 'agents/{}/IDENTITY'.format(agent_uuid)) with open(path) as f: identity = f.read().strip() @@ -332,7 +392,7 @@ def get_agent_by_identity(self, identity): def build_connection(self, peer=None, address=None, identity=None, publickey=None, secretkey=None, serverkey=None, - capabilities=[], **kwargs): + capabilities: Optional[dict] = None, **kwargs): self.logit('Building connection to {}'.format(peer)) os.environ.update(self.env) self.allow_all_connections() @@ -376,7 +436,7 @@ def build_connection(self, peer=None, address=None, identity=None, def build_agent(self, address=None, should_spawn=True, identity=None, publickey=None, secretkey=None, serverkey=None, - agent_class=Agent, **kwargs): + agent_class=Agent, capabilities: Optional[dict] = None, **kwargs): """ Build an agent connnected to the passed bus. By default the current instance that this class wraps will be the @@ -404,12 +464,11 @@ def build_agent(self, address=None, should_spawn=True, identity=None, if serverkey is None: serverkey = self.serverkey if publickey is None: - self.logit('generating new public secret key pair') - keyfile = tempfile.mktemp(".keys", "agent", self.volttron_home) - keys = KeyStore(keyfile) - keys.generate() - publickey = keys.public - secretkey = keys.secret + self.logit(f'generating new public secret key pair {KeyStore.get_agent_keystore_path(identity=identity)}') + ks = KeyStore(KeyStore.get_agent_keystore_path(identity=identity)) + # ks.generate() + publickey = ks.public + secretkey = ks.secret if address is None: self.logit('Using vip-address {address}'.format( @@ -425,6 +484,18 @@ def build_agent(self, address=None, should_spawn=True, identity=None, if 'enable_store' not in kwargs: kwargs['enable_store'] = False + + if capabilities is None: + capabilities = dict(edit_config_store=dict(identity=identity)) + entry = AuthEntry(user_id=identity, credentials=publickey, + capabilities=capabilities, + comments="Added by platform wrapper") + authfile = AuthFile() + authfile.add(entry, overwrite=False, no_error=True) + # allow 2 seconds here for the auth to be updated in auth service + # before connecting to the platform with the agent. + # + gevent.sleep(2) agent = agent_class(address=address, identity=identity, publickey=publickey, secretkey=secretkey, serverkey=serverkey, @@ -434,19 +505,13 @@ def build_agent(self, address=None, should_spawn=True, identity=None, **kwargs) self.logit('platformwrapper.build_agent.address: {}'.format(address)) - # Automatically add agent's credentials to auth.json file - if publickey: - self.logit(f'Adding publickey to auth.json {publickey} {identity}') - self._append_allow_curve_key(publickey, agent.core.identity) - if should_spawn: - self.logit('platformwrapper.build_agent spawning') + self.logit(f'platformwrapper.build_agent spawning for identity {identity}') event = gevent.event.Event() - gevent.spawn(agent.core.run, event) # .join(0) + gevent.spawn(agent.core.run, event) event.wait(timeout=2) - gevent.sleep(2) - hello = agent.vip.hello().get(timeout=15) - assert len(hello) > 0 + router_ping = agent.vip.ping("").get(timeout=30) + assert len(router_ping) > 0 agent.publickey = publickey return agent @@ -475,10 +540,7 @@ def _append_allow_curve_key(self, publickey, identity): else: entry = AuthEntry(credentials=publickey, comments="Added by platform wrapper. No identity passed") authfile = AuthFile(self.volttron_home + "/auth.json") - try: - authfile.add(entry, overwrite=True) - except AuthFileEntryAlreadyExists: - pass + authfile.add(entry, no_error=True) def add_vc(self): os.environ.update(self.env) @@ -492,41 +554,41 @@ def add_vcp(self): def is_auto_csr_enabled(self): assert self.messagebus == 'rmq', 'Only available for rmq messagebus' assert self.bind_web_address, 'Must have a web based instance' - return self.dynamic_agent.vip.rpc(MASTER_WEB, 'is_auto_allow_csr').get() + return self.dynamic_agent.vip.rpc(PLATFORM_WEB, 'is_auto_allow_csr').get() def enable_auto_csr(self): assert self.messagebus == 'rmq', 'Only available for rmq messagebus' assert self.bind_web_address, 'Must have a web based instance' - self.dynamic_agent.vip.rpc(MASTER_WEB, 'auto_allow_csr', True).get() + self.dynamic_agent.vip.rpc(PLATFORM_WEB, 'auto_allow_csr', True).get() assert self.is_auto_csr_enabled() def disable_auto_csr(self): assert self.messagebus == 'rmq', 'Only available for rmq messagebus' assert self.bind_web_address, 'Must have a web based instance' - self.dynamic_agent.vip.rpc(MASTER_WEB, 'auto_allow_csr', False).get() + self.dynamic_agent.vip.rpc(PLATFORM_WEB, 'auto_allow_csr', False).get() assert not self.is_auto_csr_enabled() def add_capabilities(self, publickey, capabilities): - if isinstance(capabilities, str) or isinstance(capabilities, dict): - capabilities = [capabilities] - auth_path = self.volttron_home + "/auth.json" - auth = AuthFile(auth_path) - entry = auth.find_by_credentials(publickey)[0] - caps = entry.capabilities - - if isinstance(capabilities, list): - for c in capabilities: - self.add_capability(c, caps) - else: - self.add_capability(capabilities, caps) - auth.add(entry, overwrite=True) - _log.debug("Updated entry is {}".format(entry)) - # Minimum sleep of 2 seconds seem to be needed in order for auth updates to get propagated to peers. - # This slow down is not an issue with file watcher but rather vip.peerlist(). peerlist times out - # when invoked in quick succession. add_capabilities updates auth.json, gets the peerlist and calls all peers' - # auth.update rpc call. So sleeping here instead expecting individual test cases to sleep for long - gevent.sleep(2) - + with with_os_environ(self.env): + if isinstance(capabilities, str) or isinstance(capabilities, dict): + capabilities = [capabilities] + auth_path = self.volttron_home + "/auth.json" + auth = AuthFile(auth_path) + entry = auth.find_by_credentials(publickey)[0] + caps = entry.capabilities + + if isinstance(capabilities, list): + for c in capabilities: + self.add_capability(c, caps) + else: + self.add_capability(capabilities, caps) + auth.add(entry, overwrite=True) + _log.debug("Updated entry is {}".format(entry)) + # Minimum sleep of 2 seconds seem to be needed in order for auth updates to get propagated to peers. + # This slow down is not an issue with file watcher but rather vip.peerlist(). peerlist times out + # when invoked in quick succession. add_capabilities updates auth.json, gets the peerlist and calls all peers' + # auth.update rpc call. So sleeping here instead expecting individual test cases to sleep for long + gevent.sleep(2) @staticmethod def add_capability(entry, capabilites): @@ -551,249 +613,320 @@ def startup_platform(self, vip_address, auth_dict=None, msgdebug=False, setupmode=False, agent_monitor_frequency=600, - timeout=60): - # Update OS env to current platform's env so get_home() call will result - # in correct home director. Without this when more than one test instance are created, get_home() - # will return home dir of last started platform wrapper instance - os.environ.update(self.env) + timeout=60, + # Allow the AuthFile to be preauthenticated with keys for service agents. + perform_preauth_service_agents=True): + + with with_os_environ(self.env): + # Update OS env to current platform's env so get_home() call will result + # in correct home director. Without this when more than one test instance are created, get_home() + # will return home dir of last started platform wrapper instance + os.environ.update(self.env) + + # Add check and raise error if the platform is already running for this instance. + if self.is_running(): + raise PlatformWrapperError("Already running platform") + + self.vip_address = vip_address + self.mode = mode + self.volttron_central_address = volttron_central_address + self.volttron_central_serverkey = volttron_central_serverkey + self.bind_web_address = bind_web_address + + if perform_preauth_service_agents: + authfile = AuthFile() + if not authfile.read_allow_entries(): + # if this is a brand new auth.json + # pre-seed all of the volttron process identities before starting the platform + for identity in PROCESS_IDENTITIES: + if identity == PLATFORM_WEB: + capabilities = dict(allow_auth_modifications=None) + else: + capabilities = dict(edit_config_store=dict(identity="/.*/")) + + ks = KeyStore(KeyStore.get_agent_keystore_path(identity)) + entry = AuthEntry(credentials=encode_key(decode_key(ks.public)), + user_id=identity, + capabilities=capabilities, + comments='Added by pre-seeding.') + authfile.add(entry) + + # Control connection needs to be added so that vctl can connect easily + identity = CONTROL_CONNECTION + capabilities = dict(edit_config_store=dict(identity="/.*/")) + ks = KeyStore(KeyStore.get_agent_keystore_path(identity)) + entry = AuthEntry(credentials=encode_key(decode_key(ks.public)), + user_id=identity, + capabilities=capabilities, + comments='Added by pre-seeding.') + authfile.add(entry) + + identity = "dynamic_agent" + capabilities = dict(edit_config_store=dict(identity="/.*/"), allow_auth_modifications=None) + # Lets cheat a little because this is a wrapper and add the dynamic agent in here as well + ks = KeyStore(KeyStore.get_agent_keystore_path(identity)) + entry = AuthEntry(credentials=encode_key(decode_key(ks.public)), + user_id=identity, + capabilities=capabilities, + comments='Added by pre-seeding.') + authfile.add(entry) + + msgdebug = self.env.get('MSG_DEBUG', False) + enable_logging = self.env.get('ENABLE_LOGGING', False) + + if self.debug_mode: + self.skip_cleanup = True + enable_logging = True + msgdebug = True + + self.logit("Starting Platform: {}".format(self.volttron_home)) + assert self.mode in MODES, 'Invalid platform mode set: ' + str(mode) + opts = None + + # see main.py for how we handle pub sub addresses. + ipc = 'ipc://{}{}/run/'.format( + '@' if sys.platform.startswith('linux') else '', + self.volttron_home) + self.local_vip_address = ipc + 'vip.socket' + self.set_auth_dict(auth_dict) + + web_ssl_cert = None + web_ssl_key = None + if self.messagebus == 'rmq' and bind_web_address: + self.env['REQUESTS_CA_BUNDLE'] = self.certsobj.cert_file(self.certsobj.root_ca_name) + + # Enable SSL for ZMQ + elif self.messagebus == 'zmq' and self.ssl_auth and bind_web_address: + web_certs = certs_profile_2(os.path.join(self.volttron_home, "certificates")) + web_ssl_cert = web_certs['server_certs'][0]['cert_file'] + web_ssl_key = web_certs['server_certs'][0]['key_file'] + # # Add platform key to known-hosts file: + # known_hosts = KnownHostsStore() + # known_hosts.add(opts.vip_local_address, encode_key(publickey)) + # for addr in opts.vip_address: + # known_hosts.add(addr, encode_key(publickey)) + + if self.bind_web_address: + # Create web users for platform web authentication + # from volttron.platform.web.admin_endpoints import AdminEndpoints + # from volttrontesting.utils.web_utils import get_test_web_env + # adminep = AdminEndpoints() + # params = urlencode(dict(username='admin', password1='admin', password2='admin')) + # env = get_test_web_env("/admin/setpassword", method='POST') # , input_data=input) + # response = adminep.admin(env, params) + # print(f"RESPONSE 1: {response}") + self.discovery_address = "{}/discovery/".format( + self.bind_web_address) + + # Only available if vc is installed! + self.jsonrpc_endpoint = "{}/vc/jsonrpc".format( + self.bind_web_address) + + if self.remote_platform_ca: + ca_bundle_file = os.path.join(self.volttron_home, "cat_ca_certs") + with open(ca_bundle_file, 'w') as cf: + if self.ssl_auth: + with open(self.certsobj.cert_file(self.certsobj.root_ca_name)) as f: + cf.write(f.read()) + with open(self.remote_platform_ca) as f: + cf.write(f.read()) + os.chmod(ca_bundle_file, 0o744) + self.env['REQUESTS_CA_BUNDLE'] = ca_bundle_file + os.environ['REQUESTS_CA_BUNDLE'] = self.env['REQUESTS_CA_BUNDLE'] + # This file will be passed off to the main.py and available when + # the platform starts up. + self.requests_ca_bundle = self.env.get('REQUESTS_CA_BUNDLE') + + self.opts = {'verify_agents': False, + 'volttron_home': self.volttron_home, + 'vip_address': vip_address, + 'vip_local_address': ipc + 'vip.socket', + 'publish_address': ipc + 'publish', + 'subscribe_address': ipc + 'subscribe', + 'bind_web_address': bind_web_address, + 'volttron_central_address': volttron_central_address, + 'volttron_central_serverkey': volttron_central_serverkey, + 'secure_agent_users': self.secure_agent_users, + 'platform_name': None, + 'log': self.log_path, + 'log_config': None, + 'monitor': True, + 'autostart': True, + 'log_level': logging.DEBUG, + 'verboseness': logging.DEBUG, + 'web_ca_cert': self.requests_ca_bundle} + + pconfig = os.path.join(self.volttron_home, 'config') + config = {} + + # Add platform's public key to known hosts file + publickey = self.keystore.public + known_hosts_file = os.path.join(self.volttron_home, 'known_hosts') + known_hosts = KnownHostsStore(known_hosts_file) + known_hosts.add(self.opts['vip_local_address'], publickey) + known_hosts.add(self.opts['vip_address'], publickey) + + # Set up the configuration file based upon the passed parameters. + parser = configparser.ConfigParser() + parser.add_section('volttron') + parser.set('volttron', 'vip-address', vip_address) + if bind_web_address: + parser.set('volttron', 'bind-web-address', bind_web_address) + if web_ssl_cert: + parser.set('volttron', 'web-ssl-cert', web_ssl_cert) + if web_ssl_key: + parser.set('volttron', 'web-ssl-key', web_ssl_key) + if volttron_central_address: + parser.set('volttron', 'volttron-central-address', + volttron_central_address) + if volttron_central_serverkey: + parser.set('volttron', 'volttron-central-serverkey', + volttron_central_serverkey) + if self.instance_name: + parser.set('volttron', 'instance-name', + self.instance_name) + if self.messagebus: + parser.set('volttron', 'message-bus', + self.messagebus) + if self.secure_agent_users: + parser.set('volttron', 'secure-agent-users', + str(self.secure_agent_users)) + # In python3 option values must be strings. + parser.set('volttron', 'agent-monitor-frequency', + str(agent_monitor_frequency)) - self.vip_address = vip_address - self.mode = mode - self.volttron_central_address = volttron_central_address - self.volttron_central_serverkey =volttron_central_serverkey - self.bind_web_address = bind_web_address - if self.bind_web_address: - self.discovery_address = "{}/discovery/".format( - self.bind_web_address) + self.logit( + "Platform will run on message bus type {} ".format(self.messagebus)) + self.logit("writing config to: {}".format(pconfig)) - # Only available if vc is installed! - self.jsonrpc_endpoint = "{}/vc/jsonrpc".format( - self.bind_web_address) + if self.ssl_auth: + certsdir = os.path.join(self.volttron_home, 'certificates') - msgdebug = self.env.get('MSG_DEBUG', False) - enable_logging = self.env.get('ENABLE_LOGGING', False) + self.certsobj = Certs(certsdir) - if self.debug_mode: - self.skip_cleanup = True - enable_logging = True - msgdebug = True - - self.logit("Starting Platform: {}".format(self.volttron_home)) - assert self.mode in MODES, 'Invalid platform mode set: ' + str(mode) - opts = None - - # see main.py for how we handle pub sub addresses. - ipc = 'ipc://{}{}/run/'.format( - '@' if sys.platform.startswith('linux') else '', - self.volttron_home) - self.local_vip_address = ipc + 'vip.socket' - self.set_auth_dict(auth_dict) - - if self.messagebus == 'rmq' and bind_web_address: - self.env['REQUESTS_CA_BUNDLE'] = self.certsobj.cert_file(self.certsobj.root_ca_name) - - if self.remote_platform_ca: - ca_bundle_file = os.path.join(self.volttron_home, "cat_ca_certs") - with open(ca_bundle_file, 'w') as cf: - if self.messagebus == 'rmq': - with open(self.certsobj.cert_file(self.certsobj.root_ca_name)) as f: - cf.write(f.read()) - with open(self.remote_platform_ca) as f: - cf.write(f.read()) - os.chmod(ca_bundle_file, 0o744) - self.env['REQUESTS_CA_BUNDLE'] = ca_bundle_file - os.environ['REQUESTS_CA_BUNDLE'] = self.env['REQUESTS_CA_BUNDLE'] - # This file will be passed off to the main.py and available when - # the platform starts up. - self.requests_ca_bundle = self.env.get('REQUESTS_CA_BUNDLE') - - self.opts = {'verify_agents': False, - 'volttron_home': self.volttron_home, - 'vip_address': vip_address, - 'vip_local_address': ipc + 'vip.socket', - 'publish_address': ipc + 'publish', - 'subscribe_address': ipc + 'subscribe', - 'bind_web_address': bind_web_address, - 'volttron_central_address': volttron_central_address, - 'volttron_central_serverkey': volttron_central_serverkey, - 'secure_agent_users': self.secure_agent_users, - 'platform_name': None, - 'log': os.path.join(self.volttron_home, 'volttron.log'), - 'log_config': None, - 'monitor': True, - 'autostart': True, - 'log_level': logging.DEBUG, - 'verboseness': logging.DEBUG, - 'web_ca_cert': self.requests_ca_bundle} - - pconfig = os.path.join(self.volttron_home, 'config') - config = {} - - # Add platform's public key to known hosts file - publickey = self.keystore.public - known_hosts_file = os.path.join(self.volttron_home, 'known_hosts') - known_hosts = KnownHostsStore(known_hosts_file) - known_hosts.add(self.opts['vip_local_address'], publickey) - known_hosts.add(self.opts['vip_address'], publickey) - - # Set up the configuration file based upon the passed parameters. - parser = configparser.ConfigParser() - parser.add_section('volttron') - parser.set('volttron', 'vip-address', vip_address) - if bind_web_address: - parser.set('volttron', 'bind-web-address', bind_web_address) - if volttron_central_address: - parser.set('volttron', 'volttron-central-address', - volttron_central_address) - if volttron_central_serverkey: - parser.set('volttron', 'volttron-central-serverkey', - volttron_central_serverkey) - if self.instance_name: - parser.set('volttron', 'instance-name', - self.instance_name) - if self.messagebus: - parser.set('volttron', 'message-bus', - self.messagebus) - if self.secure_agent_users: - parser.set('volttron', 'secure-agent-users', - str(self.secure_agent_users)) - # In python3 option values must be strings. - parser.set('volttron', 'agent-monitor-frequency', - str(agent_monitor_frequency)) - - self.logit( - "Platform will run on message bus type {} ".format(self.messagebus)) - self.logit("writing config to: {}".format(pconfig)) - - if self.ssl_auth: - certsdir = os.path.join(self.volttron_home, 'certificates') - - self.certsobj = Certs(certsdir) - - if self.mode == UNRESTRICTED: - with open(pconfig, 'w') as cfg: - parser.write(cfg) - - elif self.mode == RESTRICTED: - if not RESTRICTED_AVAILABLE: - raise ValueError("restricted is not available.") - - certsdir = os.path.join(self.volttron_home, 'certificates') - - print ("certsdir", certsdir) - self.certsobj = Certs(certsdir) - - with closing(open(pconfig, 'w')) as cfg: - cfg.write(PLATFORM_CONFIG_RESTRICTED.format(**config)) - else: - raise PlatformWrapperError( - "Invalid platform mode specified: {}".format(mode)) - - log = os.path.join(self.volttron_home, 'volttron.log') - - cmd = ['volttron'] - # if msgdebug: - # cmd.append('--msgdebug') - if enable_logging: - cmd.append('-vv') - cmd.append('-l{}'.format(log)) - if setupmode: - cmd.append('--setup-mode') - - from pprint import pprint - print('process environment: ') - pprint(self.env) - print('popen params: {}'.format(cmd)) - self.p_process = Popen(cmd, env=self.env, stdout=subprocess.PIPE, - stderr=subprocess.PIPE, universal_newlines=True) - - assert self.p_process is not None - # A None value means that the process is still running. - # A negative means that the process exited with an error. - assert self.p_process.poll() is None - - utils.wait_for_volttron_startup(self.volttron_home, timeout) - - self.serverkey = self.keystore.public - assert self.serverkey - - # Use dynamic_agent so we can look and see the agent with peerlist. - if not setupmode: - gevent.sleep(2) - self.dynamic_agent = self.build_agent(identity="dynamic_agent") - assert self.dynamic_agent is not None - assert isinstance(self.dynamic_agent, Agent) - has_control = False - times = 0 - while not has_control and times < 10: - times += 1 - try: - has_control = CONTROL in self.dynamic_agent.vip.peerlist().get(timeout=.2) - self.logit("Has control? {}".format(has_control)) - except gevent.Timeout: - pass - - if not has_control: - self.shutdown_platform() - raise Exception("Couldn't connect to core platform!") - - def subscribe_to_all(peer, sender, bus, topic, headers, messages): - logged = "{} --------------------Pubsub Message--------------------\n".format( - utils.format_timestamp(datetime.now())) - logged += "PEER: {}\n".format(peer) - logged += "SENDER: {}\n".format(sender) - logged += "Topic: {}\n".format(topic) - logged += "headers: {}\n".format([str(k) + '=' + str(v) for k, v in headers.items()]) - logged += "message: {}\n".format(messages) - logged += "-------------------------------------------------------\n" - self.logit(logged) - - self.dynamic_agent.vip.pubsub.subscribe('pubsub', '', subscribe_to_all).get() - - if bind_web_address: - times = 0 - has_discovery = False - error_was = None - - while times < 10: - times += 1 - try: - if self.ssl_auth: - resp = requests.get(self.discovery_address, - verify=self.certsobj.cert_file(self.certsobj.root_ca_name)) - else: - resp = requests.get(self.discovery_address) - if resp.ok: - self.logit("Has discovery address for {}".format(self.discovery_address)) - if self.requests_ca_bundle: - self.logit("Using REQUESTS_CA_BUNDLE: {}".format(self.requests_ca_bundle)) - else: - self.logit("Not using requests_ca_bundle for message bus: {}".format(self.messagebus)) - has_discovery = True - break - except Exception as e: - gevent.sleep(0.5) - error_was = e - self.logit("Connection error found {}".format(e)) - if not has_discovery: - if error_was: - raise error_was - raise Exception("Couldn't connect to discovery platform.") - - # Now that we know we have web and we are using ssl then we - # can enable the WebAdminApi. - if self.ssl_auth: - self._web_admin_api = WebAdminApi(self) - - gevent.sleep(10) + if self.mode == UNRESTRICTED: + with open(pconfig, 'w') as cfg: + parser.write(cfg) + + elif self.mode == RESTRICTED: + if not RESTRICTED_AVAILABLE: + raise ValueError("restricted is not available.") + certsdir = os.path.join(self.volttron_home, 'certificates') + + print("certsdir", certsdir) + self.certsobj = Certs(certsdir) + + with closing(open(pconfig, 'w')) as cfg: + cfg.write(PLATFORM_CONFIG_RESTRICTED.format(**config)) + else: + raise PlatformWrapperError( + "Invalid platform mode specified: {}".format(mode)) + + cmd = [self.volttron_exe] + # if msgdebug: + # cmd.append('--msgdebug') + if enable_logging: + cmd.append('-vv') + cmd.append('-l{}'.format(self.log_path)) + if setupmode: + cmd.append('--setup-mode') + + from pprint import pprint + print('process environment: ') + pprint(self.env) + print('popen params: {}'.format(cmd)) + self.p_process = Popen(cmd, env=self.env, stdout=subprocess.PIPE, + stderr=subprocess.PIPE, universal_newlines=True) + + # A None value means that the process is still running. + # A negative means that the process exited with an error. + assert self.p_process.poll() is None + + utils.wait_for_volttron_startup(self.volttron_home, timeout) + + self.serverkey = self.keystore.public + assert self.serverkey + + # Use dynamic_agent so we can look and see the agent with peerlist. + if not setupmode: + gevent.sleep(2) + self.dynamic_agent = self.build_agent(identity="dynamic_agent") + assert self.dynamic_agent is not None + assert isinstance(self.dynamic_agent, Agent) + has_control = False + times = 0 + while not has_control and times < 10: + times += 1 + try: + has_control = CONTROL in self.dynamic_agent.vip.peerlist().get(timeout=.2) + self.logit("Has control? {}".format(has_control)) + except gevent.Timeout: + pass + + if not has_control: + self.shutdown_platform() + raise Exception("Couldn't connect to core platform!") + + # def subscribe_to_all(peer, sender, bus, topic, headers, messages): + # logged = "{} --------------------Pubsub Message--------------------\n".format( + # utils.format_timestamp(datetime.now())) + # logged += "PEER: {}\n".format(peer) + # logged += "SENDER: {}\n".format(sender) + # logged += "Topic: {}\n".format(topic) + # logged += "headers: {}\n".format([str(k) + '=' + str(v) for k, v in headers.items()]) + # logged += "message: {}\n".format(messages) + # logged += "-------------------------------------------------------\n" + # self.logit(logged) + # + # self.dynamic_agent.vip.pubsub.subscribe('pubsub', '', subscribe_to_all).get() + + if bind_web_address: + # Now that we know we have web and we are using ssl then we + # can enable the WebAdminApi. + # if self.ssl_auth: + self._web_admin_api = WebAdminApi(self) + self._web_admin_api.create_web_admin("admin", "admin") + times = 0 + has_discovery = False + error_was = None + + while times < 10: + times += 1 + try: + if self.ssl_auth: + resp = requests.get(self.discovery_address, + verify=self.certsobj.cert_file(self.certsobj.root_ca_name)) + else: + resp = requests.get(self.discovery_address) + if resp.ok: + self.logit("Has discovery address for {}".format(self.discovery_address)) + if self.requests_ca_bundle: + self.logit("Using REQUESTS_CA_BUNDLE: {}".format(self.requests_ca_bundle)) + else: + self.logit("Not using requests_ca_bundle for message bus: {}".format(self.messagebus)) + has_discovery = True + break + except Exception as e: + gevent.sleep(0.5) + error_was = e + self.logit("Connection error found {}".format(e)) + if not has_discovery: + if error_was: + raise error_was + raise Exception("Couldn't connect to discovery platform.") + + + if self.is_running(): + self._instance_shutdown = False def is_running(self): - return utils.is_volttron_running(self.volttron_home) + with with_os_environ(self.env): + return utils.is_volttron_running(self.volttron_home) def direct_sign_agentpackage_creator(self, package): assert RESTRICTED, "Auth not available" - print ("wrapper.certsobj", self.certsobj.cert_dir) + print("wrapper.certsobj", self.certsobj.cert_dir) assert ( auth.sign_as_creator(package, 'creator', certsobj=self.certsobj)), "Signing as {} failed.".format( @@ -819,26 +952,25 @@ def _aip(self): aip.setup() return aip - def _install_agent(self, wheel_file, start, vip_identity): - self.logit('Creating channel for sending the agent.') - gevent.sleep(0.3) - self.logit('calling control install agent.') - self.logit("VOLTTRON_HOME SETTING: {}".format( - self.env['VOLTTRON_HOME'])) - env = self.env.copy() - cmd = ['volttron-ctl', '-vv', 'install', wheel_file] - if vip_identity: - cmd.extend(['--vip-identity', vip_identity]) + def __install_agent_wheel__(self, wheel_file, start, vip_identity): + with with_os_environ(self.env): + self.__wait_for_control_connection_to_exit__() - res = execute_command(cmd, env=env, logger=_log) - assert res, "failed to install wheel:{}".format(wheel_file) - agent_uuid = res.split(' ')[-2] - self.logit(agent_uuid) + self.logit("VOLTTRON_HOME SETTING: {}".format( + self.env['VOLTTRON_HOME'])) + env = self.env.copy() + cmd = ['volttron-ctl', '--json', 'install', wheel_file] + if vip_identity: + cmd.extend(['--vip-identity', vip_identity]) - if start: - self.start_agent(agent_uuid) - return agent_uuid + res = execute_command(cmd, env=env, logger=_log) + assert res, "failed to install wheel:{}".format(wheel_file) + agent_uuid = res.split(' ')[-2] + self.logit(agent_uuid) + if start: + self.start_agent(agent_uuid) + return agent_uuid def install_multiple_agents(self, agent_configs): """ @@ -859,18 +991,25 @@ def install_multiple_agents(self, agent_configs): In order for this method to be called the platform must be currently running. """ - if not self.is_running(): - raise PlatformWrapperError("Instance isn't running!") results = [] + with with_os_environ(self.env): + if not self.is_running(): + raise PlatformWrapperError("Instance isn't running!") + - for path, config, start in agent_configs: - results = self.install_agent(agent_dir=path, config_file=config, - start=start) + for path, config, start in agent_configs: + results = self.install_agent(agent_dir=path, config_file=config, + start=start) return results - def install_agent(self, agent_wheel=None, agent_dir=None, config_file=None, - start=True, vip_identity=None, startup_time=2, force=False): + def install_agent(self, agent_wheel: Optional[str] = None, + agent_dir: Optional[str] = None, + config_file: Optional[Union[dict, str]] = None, + start: bool = True, + vip_identity: Optional[str] = None, + startup_time: int = 5, + force: bool = False): """ Install and optionally start an agent on the instance. @@ -895,169 +1034,214 @@ def install_agent(self, agent_wheel=None, agent_dir=None, config_file=None, Should this overwrite the current or not. :return: """ - os.environ.update(self.env) - assert self.is_running(), "Instance must be running to install agent." - assert agent_wheel or agent_dir, "Invalid agent_wheel or agent_dir." - assert isinstance(startup_time, int), "Startup time should be an integer." - - if agent_wheel: - assert not agent_dir - assert not config_file - assert os.path.exists(agent_wheel) - wheel_file = agent_wheel - agent_uuid = self._install_agent(wheel_file, start, vip_identity) - - # Now if the agent_dir is specified. - temp_config = None - if agent_dir: - assert not agent_wheel - temp_config = os.path.join(self.volttron_home, - os.path.basename(agent_dir) + "_config_file") - if isinstance(config_file, dict): - from os.path import join, basename - temp_config = join(self.volttron_home, - basename(agent_dir) + "_config_file") - with open(temp_config, "w") as fp: - fp.write(jsonapi.dumps(config_file)) - config_file = temp_config - elif not config_file: - if os.path.exists(os.path.join(agent_dir, "config")): - config_file = os.path.join(agent_dir, "config") - else: + with with_os_environ(self.env): + self.__wait_for_control_connection_to_exit__() + assert self.is_running(), "Instance must be running to install agent." + assert agent_wheel or agent_dir, "Invalid agent_wheel or agent_dir." + assert isinstance(startup_time, int), "Startup time should be an integer." + + if agent_wheel: + assert not agent_dir + assert not config_file + assert os.path.exists(agent_wheel) + wheel_file = agent_wheel + agent_uuid = self.__install_agent_wheel__(wheel_file, False, vip_identity) + + # Now if the agent_dir is specified. + temp_config = None + if agent_dir: + assert not agent_wheel + temp_config = os.path.join(self.volttron_home, + os.path.basename(agent_dir) + "_config_file") + if isinstance(config_file, dict): from os.path import join, basename temp_config = join(self.volttron_home, basename(agent_dir) + "_config_file") with open(temp_config, "w") as fp: - fp.write(jsonapi.dumps({})) + fp.write(jsonapi.dumps(config_file)) config_file = temp_config - elif os.path.exists(config_file): - pass # config_file already set! - else: - raise ValueError("Can't determine correct config file.") - - script = os.path.join(self.volttron_root, - "scripts/install-agent.py") - cmd = [self.python, script, - "--volttron-home", self.volttron_home, - "--volttron-root", self.volttron_root, - "--agent-source", agent_dir, - "--config", config_file, - "--json", - "--agent-start-time", str(startup_time)] - - if force: - cmd.extend(["--force"]) - if vip_identity: - cmd.extend(["--vip-identity", vip_identity]) - if start: - cmd.extend(["--start"]) + elif not config_file: + if os.path.exists(os.path.join(agent_dir, "config")): + config_file = os.path.join(agent_dir, "config") + else: + from os.path import join, basename + temp_config = join(self.volttron_home, + basename(agent_dir) + "_config_file") + with open(temp_config, "w") as fp: + fp.write(jsonapi.dumps({})) + config_file = temp_config + elif os.path.exists(config_file): + pass # config_file already set! + else: + raise ValueError("Can't determine correct config file.") - stdout = execute_command(cmd, logger=_log, - err_prefix="Error installing agent") + cmd = [self.vctl_exe, "--json", "install", agent_dir, "--agent-config", config_file] - self.logit(stdout) - # Because we are no longer silencing output from the install, the - # the results object is now much more verbose. Our assumption is - # that the result we are looking for is the only JSON block in - # the output + if force: + cmd.extend(["--force"]) + if vip_identity: + cmd.extend(["--vip-identity", vip_identity]) + # vctl install with start seem to have a auth issue. For now start after install + # if start: + # cmd.extend(["--start"]) - match = re.search(r'^({.*})', stdout, flags=re.M | re.S) - if match: - results = match.group(0) - else: - raise ValueError( - "The results were not found in the command output") - self.logit("here are the results: {}".format(results)) + stdout = execute_command(cmd, logger=_log, env=self.env, + err_prefix="Error installing agent") - # - # Response from results is expected as follows depending on - # parameters, note this is a json string so parse to get dictionary - # { - # "started": true, - # "agent_pid": 26241, - # "starting": true, - # "agent_uuid": "ec1fd94e-922a-491f-9878-c392b24dbe50" - # } - assert results - - resultobj = jsonapi.loads(str(results)) + # Because we are no longer silencing output from the install, the + # the results object is now much more verbose. Our assumption is + # that the result we are looking for is the only JSON block in + # the output + match = re.search(r'^({.*})', stdout, flags=re.M | re.S) + if match: + results = match.group(0) + else: + raise ValueError( + "The results were not found in the command output") + self.logit("here are the results: {}".format(results)) + + # + # Response from results is expected as follows depending on + # parameters, note this is a json string so parse to get dictionary + # { + # "started": true, + # "agent_pid": 26241, + # "starting": true, + # "agent_uuid": "ec1fd94e-922a-491f-9878-c392b24dbe50" + # } + assert results + + resultobj = jsonapi.loads(str(results)) + + # if start: + # assert resultobj['started'] + agent_uuid = resultobj['agent_uuid'] + + assert agent_uuid is not None + time.sleep(5) if start: - assert resultobj['started'] - agent_uuid = resultobj['agent_uuid'] - - assert agent_uuid is not None + # call start after install for now. vctl install with start seem to have auth issues. + self.start_agent(agent_uuid) + assert self.is_agent_running(agent_uuid) - if start: - assert self.is_agent_running(agent_uuid) + # remove temp config_file + if temp_config and os.path.isfile(temp_config): + os.remove(temp_config) - # remove temp config_file - if temp_config and os.path.isfile(temp_config): - os.remove(temp_config) + return agent_uuid - return agent_uuid + def __wait_for_control_connection_to_exit__(self, timeout: int = 20): + """ + Call the dynamic agent's peerlist method until the control connection is no longer connected or + timeout is reached + :param timeout: + :return: + """ + with with_os_environ(self.env): + self.logit("Waiting for control_connection to exit") + disconnected = False + timer_start = time.time() + while not disconnected: + peers = self.dynamic_agent.vip.peerlist().get(timeout=20) + disconnected = CONTROL_CONNECTION not in peers + if disconnected: + break + self.logit(f"Waiting for control connection to disconnect: {peers} time: {timer_start - time.time()} timeout is {timeout}") + if time.time() - timer_start > timeout: + raise PlatformWrapperError(f"Failed for {CONTROL_CONNECTION} to exit in a timely manner.") + time.sleep(0.5) + + if not disconnected: + raise PlatformWrapperError("Control connection did not stop properly") def start_agent(self, agent_uuid): - self.logit('Starting agent {}'.format(agent_uuid)) - self.logit("VOLTTRON_HOME SETTING: {}".format( - self.env['VOLTTRON_HOME'])) - cmd = ['volttron-ctl'] - cmd.extend(['start', agent_uuid]) - p = Popen(cmd, env=self.env, - stdout=sys.stdout, stderr=sys.stderr, universal_newlines=True) - p.wait() + with with_os_environ(self.env): + self.logit('Starting agent {}'.format(agent_uuid)) + self.logit("VOLTTRON_HOME SETTING: {}".format( + self.env['VOLTTRON_HOME'])) + if not self.is_running(): + raise PlatformWrapperError("Instance must be running before starting agent") - # Confirm agent running - cmd = ['volttron-ctl'] - cmd.extend(['status', agent_uuid]) - res = execute_command(cmd, env=self.env) - # 776 TODO: Timing issue where check fails - time.sleep(.1) - self.logit("Subprocess res is {}".format(res)) - assert 'running' in res - pidpos = res.index('[') + 1 - pidend = res.index(']') - pid = int(res[pidpos: pidend]) - - assert psutil.pid_exists(pid), \ - "The pid associated with agent {} does not exist".format(pid) - - self.started_agent_pids.append(pid) - return pid + self.__wait_for_control_connection_to_exit__() + + cmd = [self.vctl_exe, '--json'] + cmd.extend(['start', agent_uuid]) + result = execute_command(cmd, self.env) + + self.__wait_for_control_connection_to_exit__() + + # Confirm agent running + cmd = [self.vctl_exe, '--json'] + cmd.extend(['status', agent_uuid]) + res = execute_command(cmd, env=self.env) + + result = jsonapi.loads(res) + # 776 TODO: Timing issue where check fails + time.sleep(3) + self.logit("Subprocess res is {}".format(res)) + assert 'running' in res + pidpos = res.index('[') + 1 + pidend = res.index(']') + pid = int(res[pidpos: pidend]) + + assert psutil.pid_exists(pid), \ + "The pid associated with agent {} does not exist".format(pid) + + self.started_agent_pids.append(pid) + + self.__wait_for_control_connection_to_exit__() + + return pid def stop_agent(self, agent_uuid): - # Confirm agent running - _log.debug("STOPPING AGENT: {}".format(agent_uuid)) + with with_os_environ(self.env): + # Confirm agent running + self.__wait_for_control_connection_to_exit__() - cmd = ['volttron-ctl'] - cmd.extend(['stop', agent_uuid]) - res = execute_command(cmd, env=self.env, logger=_log, - err_prefix="Error stopping agent") - return self.agent_pid(agent_uuid) + _log.debug("STOPPING AGENT: {}".format(agent_uuid)) + + cmd = [self.vctl_exe] + cmd.extend(['stop', agent_uuid]) + res = execute_command(cmd, env=self.env, logger=_log, + err_prefix="Error stopping agent") + return self.agent_pid(agent_uuid) def list_agents(self): - agent_list = self.dynamic_agent.vip.rpc('control', 'list_agents').get(timeout=10) - return agent_list + with with_os_environ(self.env): + agent_list = self.dynamic_agent.vip.rpc('control', 'list_agents').get(timeout=10) + return agent_list def remove_agent(self, agent_uuid): """Remove the agent specified by agent_uuid""" - _log.debug("REMOVING AGENT: {}".format(agent_uuid)) - - cmd = ['volttron-ctl'] - cmd.extend(['remove', agent_uuid]) - res = execute_command(cmd, env=self.env, logger=_log, - err_prefix="Error removing agent") - return self.agent_pid(agent_uuid) + with with_os_environ(self.env): + _log.debug("REMOVING AGENT: {}".format(agent_uuid)) + self.__wait_for_control_connection_to_exit__() + cmd = [self.vctl_exe] + cmd.extend(['remove', agent_uuid]) + res = execute_command(cmd, env=self.env, logger=_log, + err_prefix="Error removing agent") + pid = None + try: + pid = self.agent_pid(agent_uuid) + except RuntimeError: + self.logit("Runtime error occured successfully as it was expected") + finally: + if pid is not None: + raise RuntimeError(f"Expected runtime error for looking at removed agent. {agent_uuid}") def remove_all_agents(self): - if self._instance_shutdown: - return - agent_list = self.dynamic_agent.vip.rpc('control', 'list_agents').get(timeout=10) - for agent_props in agent_list: - self.dynamic_agent.vip.rpc('control', 'remove_agent', agent_props['uuid']).get(timeout=10) + with with_os_environ(self.env): + if self._instance_shutdown: + return + agent_list = self.dynamic_agent.vip.rpc('control', 'list_agents').get(timeout=10) + for agent_props in agent_list: + self.dynamic_agent.vip.rpc('control', 'remove_agent', agent_props['uuid']).get(timeout=10) + time.sleep(0.2) def is_agent_running(self, agent_uuid): - return self.agent_pid(agent_uuid) is not None + with with_os_environ(self.env): + return self.agent_pid(agent_uuid) is not None def agent_pid(self, agent_uuid): """ @@ -1066,8 +1250,9 @@ def agent_pid(self, agent_uuid): :param agent_uuid: :return: """ + self.__wait_for_control_connection_to_exit__() # Confirm agent running - cmd = ['volttron-ctl'] + cmd = [self.vctl_exe] cmd.extend(['status', agent_uuid]) pid = None try: @@ -1125,7 +1310,7 @@ def confirm_agent_running(self, agent_name, max_retries=5, retries = 0 while not running and retries < max_retries: status = self.test_aip.status_agents() - print ("Status", status) + print("Status", status) if len(status) > 0: status_name = status[0][1] assert status_name == agent_name @@ -1142,21 +1327,30 @@ def setup_federation(self, config_path): Set up federation using the given config path :param config_path: path to federation config yml file. """ - _log.debug("Setting up federation using config : {}".format(config_path)) + with with_os_environ(self.env): + _log.debug("Setting up federation using config : {}".format(config_path)) - cmd = ['vcfg'] - cmd.extend(['--vhome', self.volttron_home, '--instance-name', self.instance_name,'--rabbitmq', - "federation", config_path]) - execute_command(cmd, env=self.env, logger=_log, - err_prefix="Error setting up federation") + cmd = ['vcfg'] + cmd.extend(['--vhome', self.volttron_home, '--instance-name', self.instance_name, '--rabbitmq', + "federation", config_path]) + execute_command(cmd, env=self.env, logger=_log, + err_prefix="Error setting up federation") def restart_platform(self): - self.shutdown_platform() - self.startup_platform(vip_address=self.vip_address, - bind_web_address=self.bind_web_address, - volttron_central_address=self.volttron_central_address, - volttron_central_serverkey=self.volttron_central_serverkey) - gevent.sleep(1) + with with_os_environ(self.env): + original_skip_cleanup = self.skip_cleanup + self.skip_cleanup = True + self.shutdown_platform() + self.skip_cleanup = original_skip_cleanup + # since this is a restart, we don't want to do an update/overwrite of services. + self.startup_platform(vip_address=self.vip_address, + bind_web_address=self.bind_web_address, + volttron_central_address=self.volttron_central_address, + volttron_central_serverkey=self.volttron_central_serverkey, + perform_preauth_service_agents=False) + # we would need to reset shutdown flag so that platform is properly cleaned up on the next shutdown call + self._instance_shutdown = False + gevent.sleep(1) def stop_platform(self): """ @@ -1165,43 +1359,42 @@ def stop_platform(self): maintain the context of the platform. :return: """ + with with_os_environ(self.env): + if not self.is_running(): + return - if not self.is_running(): - return - - # Update OS env to current platform's env so get_home() call will result - # in correct home director. Without this when more than one test instance are created, get_home() - # will return home dir of last started platform wrapper instance - os.environ.update(self.env) - - self.dynamic_agent.vip.rpc(CONTROL, "shutdown").get() - self.dynamic_agent.core.stop() - if self.p_process is not None: - try: - gevent.sleep(0.2) - self.p_process.terminate() - gevent.sleep(0.2) - except OSError: - self.logit('Platform process was terminated.') - else: - self.logit("platform process was null") - # - # cmd = ['volttron-ctl'] - # cmd.extend(['shutdown', '--platform']) - # try: - # execute_command(cmd, env=self.env, logger=_log, - # err_prefix="Error shutting down platform") - # except RuntimeError: - # if self.p_process is not None: - # try: - # gevent.sleep(0.2) - # self.p_process.terminate() - # gevent.sleep(0.2) - # except OSError: - # self.logit('Platform process was terminated.') - # else: - # self.logit("platform process was null") - # gevent.sleep(1) + self.dynamic_agent.vip.rpc(CONTROL, "shutdown").get() + self.dynamic_agent.core.stop() + if self.p_process is not None: + try: + gevent.sleep(0.2) + self.p_process.terminate() + gevent.sleep(0.2) + except OSError: + self.logit('Platform process was terminated.') + else: + self.logit("platform process was null") + # + # cmd = [self.vctl_exe] + # cmd.extend(['shutdown', '--platform']) + # try: + # execute_command(cmd, env=self.env, logger=_log, + # err_prefix="Error shutting down platform") + # except RuntimeError: + # if self.p_process is not None: + # try: + # gevent.sleep(0.2) + # self.p_process.terminate() + # gevent.sleep(0.2) + # except OSError: + # self.logit('Platform process was terminated.') + # else: + # self.logit("platform process was null") + # gevent.sleep(1) + + def __remove_home_directory__(self): + self.logit('Removing {}'.format(self.volttron_home)) + shutil.rmtree(Path(self.volttron_home).parent, ignore_errors=True) def shutdown_platform(self): """ @@ -1210,60 +1403,61 @@ def shutdown_platform(self): pids are still running then kill them. """ - # Update OS env to current platform's env so get_home() call will result - # in correct home director. Without this when more than one test instance are created, get_home() - # will return home dir of last started platform wrapper instance - os.environ.update(self.env) - - # Handle cascading calls from multiple levels of fixtures. - if self._instance_shutdown: - return - - if not self.is_running(): - return - - running_pids = [] - if self.dynamic_agent: # because we are not creating dynamic agent in setupmode - for agnt in self.list_agents(): - pid = self.agent_pid(agnt['uuid']) - if pid is not None and int(pid) > 0: - running_pids.append(int(pid)) + with with_os_environ(self.env): + # Handle cascading calls from multiple levels of fixtures. + if self._instance_shutdown: + self.logit(f"Instance already shutdown {self._instance_shutdown}") + return + + if not self.is_running(): + self.logit(f"Instance not running {self.is_running()} and skip cleanup: {self.skip_cleanup}") + if not self.skip_cleanup: + self.__remove_home_directory__() + return + + running_pids = [] + if self.dynamic_agent: # because we are not creating dynamic agent in setupmode + for agnt in self.list_agents(): + pid = self.agent_pid(agnt['uuid']) + if pid is not None and int(pid) > 0: + running_pids.append(int(pid)) + if not self.skip_cleanup: + self.remove_all_agents() + # don't wait indefinetly as shutdown will not throw an error if RMQ is down/has cert errors + self.dynamic_agent.vip.rpc(CONTROL, 'shutdown').get(timeout=10) + self.dynamic_agent.core.stop() + self.dynamic_agent = None + + if self.p_process is not None: + try: + gevent.sleep(0.2) + self.p_process.terminate() + gevent.sleep(0.2) + except OSError: + self.logit('Platform process was terminated.') + pid_file = "{vhome}/VOLTTRON_PID".format(vhome=self.volttron_home) + try: + self.logit(f"Remove PID file: {pid_file}") + os.remove(pid_file) + except OSError: + self.logit('Error while removing VOLTTRON PID file {}'.format(pid_file)) + else: + self.logit("platform process was null") + + for pid in running_pids: + if psutil.pid_exists(pid): + self.logit("TERMINATING: {}".format(pid)) + proc = psutil.Process(pid) + proc.terminate() + + self.logit(f"VHOME: {self.volttron_home}, Skip clean up flag is {self.skip_cleanup}") + if self.messagebus == 'rmq': + self.logit("Calling rabbit shutdown") + stop_rabbit(rmq_home=self.rabbitmq_config_obj.rmq_home, env=self.env, quite=True) if not self.skip_cleanup: - self.remove_all_agents() - # don't wait indefinetly as shutdown will not throw an error if RMQ is down/has cert errors - self.dynamic_agent.vip.rpc(CONTROL, 'shutdown').get(timeout=10) - self.dynamic_agent.core.stop() - - if self.p_process is not None: - try: - gevent.sleep(0.2) - self.p_process.terminate() - gevent.sleep(0.2) - except OSError: - self.logit('Platform process was terminated.') - pid_file = "{vhome}/VOLTTRON_PID".format(vhome=self.volttron_home) - try: - os.remove(pid_file) - except OSError: - self.logit('Error while removing VOLTTRON PID file {}'.format(pid_file)) - else: - self.logit("platform process was null") - - for pid in running_pids: - if psutil.pid_exists(pid): - self.logit("TERMINATING: {}".format(pid)) - proc = psutil.Process(pid) - proc.terminate() - - print(" Skip clean up flag is {}".format(self.skip_cleanup)) - if self.messagebus == 'rmq': - print("Calling rabbit shutdown") - stop_rabbit(rmq_home=self.rabbitmq_config_obj.rmq_home, env=self.env, quite=True) - if not self.skip_cleanup: - self.logit('Removing {}'.format(self.volttron_home)) - shutil.rmtree(self.volttron_home, ignore_errors=True) + self.__remove_home_directory__() - self._instance_shutdown = True + self._instance_shutdown = True def __repr__(self): return str(self) @@ -1322,35 +1516,50 @@ def __init__(self, platform_wrapper: PlatformWrapper = None): platform_wrapper = PlatformWrapper() assert platform_wrapper.is_running(), "Platform must be running" assert platform_wrapper.bind_web_address, "Platform must have web address" - assert platform_wrapper.ssl_auth, "Platform must be ssl enabled" + # assert platform_wrapper.ssl_auth, "Platform must be ssl enabled" self._wrapper = platform_wrapper self.bind_web_address = self._wrapper.bind_web_address self.certsobj = self._wrapper.certsobj - def create_web_admin(self, username, password): - """ Creates a global master user for the platform https interface. + def create_web_admin(self, username, password, messagebus='rmq'): + """ Creates a global administrator user for the platform https interface. :param username: :param password: :return: """ + from volttron.platform.web.admin_endpoints import AdminEndpoints + from volttrontesting.utils.web_utils import get_test_web_env + + # params = urlencode(dict(username='admin', password1='admin', password2='admin')) + # env = get_test_web_env("/admin/setpassword", method='POST') # , input_data=input) + # adminep = AdminEndpoints() + # resp = adminep.admin(env, params) + # # else: data = dict(username=username, password1=password, password2=password) - url = self.bind_web_address +"/admin/setpassword" - #resp = requests.post(url, data=data, + url = self.bind_web_address + "/admin/setpassword" + # resp = requests.post(url, data=data, # verify=self.certsobj.remote_cert_bundle_file()) - resp = requests.post(url, data=data, - verify=self.certsobj.cert_file( - name=self.certsobj.root_ca_name)) + + if self._wrapper.ssl_auth: + resp = requests.post(url, data=data, + verify=self.certsobj.cert_file(self.certsobj.root_ca_name)) + else: + resp = requests.post(url, data=data, verify=False) + print(f"RESPONSE: {resp}") return resp def authenticate(self, username, password): data = dict(username=username, password=password) - url = self.bind_web_address+"/authenticate" + url = self.bind_web_address + "/authenticate" # Passing dictionary to the data argument will automatically pass as # application/x-www-form-urlencoded to the request - #resp = requests.post(url, data=data, + # resp = requests.post(url, data=data, # verify=self.certsobj.remote_cert_bundle_file()) - resp = requests.post(url, data=data, verify=self.certsobj.cert_file( - self.certsobj.root_ca_name)) + if self._wrapper.ssl_auth: + resp = requests.post(url, data=data, + verify=self.certsobj.cert_file(self.certsobj.root_ca_name)) + else: + resp = requests.post(url, data=data, verify=False) return resp diff --git a/volttrontesting/utils/web_utils.py b/volttrontesting/utils/web_utils.py index df91324405..e6537963bb 100644 --- a/volttrontesting/utils/web_utils.py +++ b/volttrontesting/utils/web_utils.py @@ -20,7 +20,7 @@ def get_test_web_env(path, input_data: bytes = None, query_string='', url_scheme :param query_string: form or other data to be used as input to the environment. :param url_scheme: the scheme used to set the environment (http, https, ws, wss) :param method: REQUEST_METHOD used for this request (GET, POST, PUT etc) - :return: A dictionary to be passed to the app_routing function in the masterwebservice + :return: A dictionary to be passed to the app_routing function in the platformwebservice """ if path is None: raise ValueError("Invalid path specified. Cannot be None.") diff --git a/volttrontesting/zmq/test_zmq.py b/volttrontesting/zmq/test_zmq.py index 60b334a1b6..789c3fe707 100644 --- a/volttrontesting/zmq/test_zmq.py +++ b/volttrontesting/zmq/test_zmq.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- {{{ # vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et: # -# Copyright 2019, Battelle Memorial Institute. +# Copyright 2020, Battelle Memorial Institute. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/volttrontesting/zmq/test_zmqsub.py b/volttrontesting/zmq/test_zmqsub.py index b535e687d9..abd49cb3a4 100644 --- a/volttrontesting/zmq/test_zmqsub.py +++ b/volttrontesting/zmq/test_zmqsub.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- {{{ # vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et: # -# Copyright 2019, Battelle Memorial Institute. +# Copyright 2020, Battelle Memorial Institute. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License.