diff --git a/.gitignore b/.gitignore index 87049d3c03..15c361832c 100644 --- a/.gitignore +++ b/.gitignore @@ -28,3 +28,5 @@ __pycache__ start-rabbitmq stop-rabbitmq rabbitmq.log +.coverage +htmlcov/ diff --git a/README.md b/README.md index 05a034f8ab..4bb55c13c9 100644 --- a/README.md +++ b/README.md @@ -1,4 +1,5 @@ ![image](docs/source/images/VOLLTRON_Logo_Black_Horizontal_with_Tagline.png) +[![Codacy Badge](https://api.codacy.com/project/badge/Grade/fcf58045b4804edf8f4d3ecde3016f76)](https://app.codacy.com/gh/VOLTTRON/volttron?utm_source=github.com&utm_medium=referral&utm_content=VOLTTRON/volttron&utm_campaign=Badge_Grade_Settings) VOLTTRON™ is an open source platform for distributed sensing and control. The platform provides services for collecting and storing data from buildings and @@ -273,10 +274,10 @@ tail volttron.log Listener agent heartbeat publishes appear in the logs as: ```sh -2016-10-17 18:17:52,245 (listeneragent-3.2 11367) listener.agent INFO: Peer: 'pubsub', Sender: 'listeneragent-3.2_1' -:, Bus: u'', Topic: 'heartbeat/listeneragent-3.2_1', Headers: -{'Date': '2016-10-18T01:17:52.239724+00:00', 'max_compatible_version': u'', 'min_compatible_version': '3.0'}, -Message: {'status': 'GOOD', 'last_updated': '2016-10-18T01:17:47.232972+00:00', 'context': 'hello'} +2020-04-20 18:49:31,395 (listeneragent-3.3 13458) __main__ INFO: Peer: pubsub, Sender: listeneragent-3.2_1:, Bus: , Topic: heartbeat/listeneragent-3.2_1, Headers: {'TimeStamp': '2020-04-20T18:49:31.393651+00:00', 'min_compatible_version': '3.0', 'max_compatible_version': ''}, Message: +'GOOD' +2020-04-20 18:49:36,394 (listeneragent-3.3 13458) __main__ INFO: Peer: pubsub, Sender: listeneragent-3.2_1:, Bus: , Topic: heartbeat/listeneragent-3.2_1, Headers: {'TimeStamp': '2020-04-20T18:49:36.392294+00:00', 'min_compatible_version': '3.0', 'max_compatible_version': ''}, Message: +'GOOD' ``` To top the platform run the following command: diff --git a/ci-integration/virtualization/requirements_test.txt b/ci-integration/virtualization/requirements_test.txt index 6f11e8a3c9..7d67750050 100644 --- a/ci-integration/virtualization/requirements_test.txt +++ b/ci-integration/virtualization/requirements_test.txt @@ -4,4 +4,5 @@ pytest-timeout mock websocket-client numpy>1.13<2 -pandas \ No newline at end of file +pandas +mysql-connector-python-rf diff --git a/docs/source/community_resources/index.rst b/docs/source/community_resources/index.rst index 83a562ddfc..1c174ac23f 100644 --- a/docs/source/community_resources/index.rst +++ b/docs/source/community_resources/index.rst @@ -12,19 +12,19 @@ Slack Channel ^^^^^^^^^^^^^ volttron-community.slack.com is where the |VOLTTRON| community at large can ask questions and meet with others -using |VOLTTRON| Signup via https://volttron-community.signup.team/ +using |VOLTTRON|. To be added to Slack please email the VOLTTRON team at +`volttron@pnnl.gov `__. Mailing List ^^^^^^^^^^^^ -Join the mailing list by emailing -`volttron@pnnl.gov `__. +Join the mailing list by emailing `volttron@pnnl.gov `__. Stack Overflow ^^^^^^^^^^^^^^ The VOLTTRON community supports questions being asked and answered through Stack Overflow. The questions tagged with -the volttron tag can be found at http://stackoverflow.com/questions/tagged/volttron. +the `volttron` tag can be found at http://stackoverflow.com/questions/tagged/volttron. Office Hours ^^^^^^^^^^^^ diff --git a/docs/source/core_services/control/AgentStatus.rst b/docs/source/core_services/control/AgentStatus.rst index f9869cafcc..f744741f5a 100644 --- a/docs/source/core_services/control/AgentStatus.rst +++ b/docs/source/core_services/control/AgentStatus.rst @@ -21,9 +21,8 @@ platform along with their uuid, associated `tag `__, and which was installed. Agents can be controlled with this using "--name ". Note, if multiple instances of a wheel are installed they will all have the same name and can be controlled as a group. -- `TAG `__ is a user provided tag which makes it simpler to - track and refer to agents. Using "--tag " agents can be controlled - using this +- `TAG `__ is a user-provided tag which makes it simpler to + track and refer to agents. Agents can be controlled by using "--tag". - PRI is the priority for agents which have been "enabled" using the ``vctl enable`` command. When enabled, agents will be automatically started in priority order along with the platform. diff --git a/docs/source/core_services/drivers/driver_configuration/ecobee_web_driver.rst b/docs/source/core_services/drivers/driver_configuration/ecobee_web_driver.rst index aa3fbed71f..7cd84fb189 100644 --- a/docs/source/core_services/drivers/driver_configuration/ecobee_web_driver.rst +++ b/docs/source/core_services/drivers/driver_configuration/ecobee_web_driver.rst @@ -1,273 +1,293 @@ +.. _ecobee-web-driver: + +************* Ecobee Driver -============= +************* -The Ecobee driver is an implementation of a VOLTTRON driver frame work Interface. -An instance of a VOLTTRON Interface serves as an interface between the VOLTTRON -Master Driver agent and some device. In the case of the Ecobee driver, the -interface is responsible for providing a way for the Master Driver to retrieve -data from and set values for thermostats configured for a user using the Ecobee -remote API (https://www.ecobee.com/home/developer/api/introduction/index.shtml) +The Ecobee driver is an implementation of a :ref:`VOLTTRON driver framework ` Interface. +In this case, the Master Driver issues commands to the Ecobee driver to collect data from and send control signals to +`Ecobee's remote web API `_ -Configuration File ------------------- +.. note:: -The Ecobee driver uses two configuration files, similar to many other VOLTTRON -agents. + Reading the driver framework and driver configuration documentation prior to following this guide will help the user + to understand drivers, driver communication, and driver configuration files. -This is an example driver configuration: +This guide covers: -:: +* Creating an Ecobee application via the web interface +* Creating an Ecobee driver configuration file, including finding the user's Ecobee API key and Ecobee thermostat serial + number +* Creating an Ecobee registry configuration file +* Installing the Master Driver and loading Ecobee driver and registry configurations +* Starting the driver and viewing Ecobee data publishes - { - "driver_config": {"ACCESS_TOKEN": "", - "API_KEY":"", - "REFRESH_TOKEN": "", - "AUTHORIZATION_CODE": "", - "PIN": "", - "DEVICE_ID": , - "GROUP_ID": "", - "CACHE_IDENTITY": "platform.drivercache", - "config_name": "devices/ecobee"}, - "driver_type": "ecobee", - "registry_config":"config://ecobee.csv", - "interval": 180, - "timezone": "UTC" - } - -The driver configuration works as follows: - driver_config: this section specifies values used by the driver agent during - operation. +.. _Ecobee-Application: - ACCESS_TOKEN - This is the access token provided by Ecobee. If the user - does not initially have this value, it can be left as an empty string and - fetched by the driver later. +Ecobee Application +################## - API_KEY - This is the User's API key. This must be obtained by the user from - the Ecobee web UI and provided in this part of the configuration. Notes - on how to do this will be provided below. +Connecting the Ecobee driver to the Ecobee API requires configuring your account with an Ecobee application. - REFRESH_TOKEN - This is the access token provided by Ecobee. If the user - does not initially have this value, it can be left as an empty string and - fetched by the driver later. +#. Log into the `Ecobee site `_ - AUTHORIZATION_CODE - This is the access token provided by Ecobee. If the user - does not initially have this value, it can be left as an empty string and - fetched by the driver later. +#. Click on the "hamburger" icon on the right to open the account menu, then click "Developer" - PIN - This pin is provided by the Ecobee API when requesting a new - authorization code. The driver will obtain a new authorization code and pin - for the driver's user, however the user is responsible for validating the - authorization code using the pin. Notes on how to do this will be provided - below. + .. image:: files/ecobee_developer_menu.png - DEVICE_ID - This is the device number of the Ecobee thermostat the driver - is responsible for operating. This must be obtained by the user from the - Ecobee web UI. Notes on how to do this will be provided below. +#. On the bottom-left corner of the screen that appears, click "Create New" - GROUP_ID - This is an arbitrary string used to specify groups of thermostats. - For the purposes of the Ecobee driver, it is recommended that a group correspond - to the list of thermostats operated under a single user account (as Ecobee - provides data foor all thermostats on a user's account with a data request). - If only one user is running Ecobee drivers on a platform, this value can be - left at the default, but it should contain some string in any case. + .. image:: files/ecobee_create_app.png - CACHE_IDENTITY - This should match the string provided as the identity when - installing the Driver HTTP Cache agent. failure to provide a matching identity - will result in the platform being unable to send requests to the Driver HTTP Cache - agent, which is required to be running for the Ecobee driver's operations. +#. Fill out the name, summary, and description forms as desired. Click "Authorization Method" and from the drop-down + that appears, select "ecobee PIN" (this will enable an extra layer of authentication to protect your account) - config_name - This should directly match the device topic used in the - installation of the Ecobee driver (see Installation below). +#. Record the API key for the Application from the Developer menu - driver_type: This value should match the name of the python file which contains - the interface class implementation for the ecobee driver. This should not change - if the user has not changed the name of that Python file. + .. figure:: files/ecobee_api_key.png - registry_config: This should match the path to the registry configuration file - specified during installation (see Installation below). + From Ecobee `authenication docs `_ - interval: This should specify the time in seconds between publishes to the - message bus by the Master Driver for the Ecobee driver (Note: the user can - specify an interval for the Ecobee driver which is shorter than 180 seconds, - however Ecobee API data is only updated at 180 second intervals, so old data - will be published if a scrape occurs between updates.) - timezone: Timezone to use for timestamps. +Configuration File +################## -Configuring Ecobee for First Time Use -------------------------------------- +The Ecobee driver uses two configuration files, a driver configuration which sets the parameters of the behavior of the +driver, and registry configuration which instructs the driver on how to interact with each point. -The following configuration is the basic starting point configuration file: +This is an example driver configuration: -:: +.. code-block:: JSON { - "driver_config": {"ACCESS_TOKEN": "", - "API_KEY":"", - "REFRESH_TOKEN": "", - "AUTHORIZATION_CODE": "", - "PIN": "", - "DEVICE_ID": , - "GROUP_ID": "", - "CACHE_IDENTITY": "platform.drivercache", - "config_name": "devices/ecobee"}, + "driver_config": { + "API_KEY": "abc123", + "DEVICE_ID": 8675309 + }, "driver_type": "ecobee", - "registry_config":"config://ecobee.csv", + "registry_config":"config://campus/building/ecobee.csv", "interval": 180, "timezone": "UTC" } -Notice: - - ACCESS_TOKEN, REFRESH_TOKEN, AUTHORIZATION_CODE and PIN values are all left as empty strings. These - values will be obtained by the driver as it starts. After starting, the user will be required to validate the - Authorization code by inputting the pin in the Web UI by going to the UI "hamburger" > "MyApps" > "Add Application" - then select "Validate" and finally "Add Application". - - Values for API_KEY and DEVICE_ID must be obtained by the user. Additional instructions for obtaining these values - can be found at the bottom of this documentation. - - DEVICE_ID should be added as an integer representation of the thermostat's serial number. - - The CACHE_IDENTITY value may be specified however the user specifies the Driver HTTP Cache agent's identity during - installation. +The driver configuration works as follows: - GROUP_ID is an arbitrarily chosen identifier which should correspond to one name given to the thermostats for one - Ecobee user account (all thermostats for that user account will be represented by this group id; group id does not - affect the data in any way, it is used by the Driver HTTP Cache agent as a way of mapping drivers to their - corresponding cached data). ++-----------------+----------------------------------------------------------------------------------------------------+ +| config field | description | ++=================+====================================================================================================+ +| driver_config | this section specifies values used by the driver agent during operation | ++-----------------+----------------------------------------------------------------------------------------------------+ +| API_KEY | This is the User's API key. This must be obtained by the user from the Ecobee web UI and provided | +| | in this part of the configuration. Notes on how to do this will be provided below. | ++-----------------+----------------------------------------------------------------------------------------------------+ +| DEVICE_ID | This is the device number of the Ecobee thermostat the driver is responsible for operating. This | +| | must be obtained by the user from the Ecobee web UI. Notes on how to do this will be provided | +| | below. | ++-----------------+----------------------------------------------------------------------------------------------------+ +| driver_type | This value should match the name of the python file which contains the interface class | +| | implementation for the Ecobee driver and should not change. | ++-----------------+----------------------------------------------------------------------------------------------------+ +| registry_config | This should a user specified path of the form "config://. It is recommended to use the | +| | device topic string following "devices" with the file extension | +| | ("config:///`_ | ++-----------------+----------------------------------------------------------------------------------------------------+ + +.. note:: + + Values for API_KEY and DEVICE_ID must be obtained by the user. DEVICE_ID should be added as an integer + representation of the thermostat's serial number. + + **Getting API Key** + + Ecobee API keys require configuring an application using the Ecobee web UI. For more information on configuring an + application and obtaining the API key, please refer to the `Ecobee Application `_ heading in + this documentation. + + **Finding Device Identifier** + + To find your Ecobee thermostat's device identifier: + + 1. Log into the `Ecobee customer portal `_ + 2. From the Home screen click "About My Ecobee" + 3. The thermostat identifier is the serial number listed on the About screen - config_name should match exactly the path used to store the driver configuration file in the config store. Registry Configuration ---------------------- -This file specifies the behavior of "registers" in Ecobee API data. While -the API does not have registers in the sense that a PLC may, this way of doing -things allows the user to hone in on specific values, and makes the driver -highly configurable (and therefore resilient to changes made by Ecobee). +This file specifies how data is read from Ecobee API response data as well as how points are set via the Master Driver +and actuator. -It is likely that more points may be added to obtain additional data, but -barring implementation changes by Ecobee it is unlikely that the values in this -configuration will need to change substantially, as most thermostats provide the +It is likely that more points may be added to obtain additional data, but barring implementation changes by Ecobee it is +unlikely that the values in this configuration will need to change substantially, as most thermostats provide the same range of data in a similar format. This is an example registry configuration: - Point Name,Volttron Point Name,Units,Type,Writable,Readable,Default Value,Notes - fanMinOnTime,fanMinOnTime,seconds,setting,True,True,, - hvacMode,hvacMode,seconds,setting,True,True,, - humidity,humidity,%,setting,False,True,, - coolHoldTemp,coolHoldTemp,degF,hold,True,False,, - heatHoldTemp,heatHoldTemp,degF,hold,True,False,, - desiredCool,desiredCool,degF,hold,False,True,, - desiredHeat,desiredHeat,degF,hold,False,True,, - actualTemperature,actualTemperature,degF,hold,False,True,, ++-------------------+---------------------+---------+---------+----------+----------+---------------+-------+ +| Point Name | Volttron Point Name | Units | Type | Writable | Readable | Default Value | Notes | ++===================+=====================+=========+=========+==========+==========+===============+=======+ +| fanMinOnTime | fanMinOnTime | seconds | setting | True | True | | | ++-------------------+---------------------+---------+---------+----------+----------+---------------+-------+ +| hvacMode | hvacMode | seconds | setting | True | True | | | ++-------------------+---------------------+---------+---------+----------+----------+---------------+-------+ +| humidity | humidity | % | setting | False | True | | | ++-------------------+---------------------+---------+---------+----------+----------+---------------+-------+ +| coolHoldTemp | coolHoldTemp | degF | hold | True | False | | | ++-------------------+---------------------+---------+---------+----------+----------+---------------+-------+ +| heatHoldTemp | heatHoldTemp | degF | hold | True | False | | | ++-------------------+---------------------+---------+---------+----------+----------+---------------+-------+ +| actualTemperature | actualTemperature | degF | hold | False | True | | | ++-------------------+---------------------+-------------------+----------+----------+---------------+-------+ + +.. note:: + + An example registry configuration containing all points from the development device is available in the + `examples/configurations/drivers/ecobee.csv` file in the VOLTTRON repository. + This configuration works as follows: - Point Name - Name of a point as it appears in Ecobee response data (example - below) ++---------------------+------------------------------------------------------------------------------------------------+ +| config field | description | ++=====================+================================================================================================+ +| Point Name | Name of a point as it appears in Ecobee response data (example below) | ++---------------------+------------------------------------------------------------------------------------------------+ +| Volttron Point Name | Name of a point as a user would like it to be displayed in data publishes to the message bus | ++---------------------+------------------------------------------------------------------------------------------------+ +| Units | Unit of measurement specified by remote API | ++---------------------+------------------------------------------------------------------------------------------------+ +| Type | The Ecobee driver registry configuration supports "setting" and "hold" register types, based | +| | on how the data is represented in Ecobee response data (example below) | ++---------------------+------------------------------------------------------------------------------------------------+ +| Writable | Whether or not the point is able to be written to. This may be determined by what Ecobee | +| | allows, and by the operation of Ecobee's API (to set an Ecobee cool/heat hold, cool/HoldTemp | +| | is used, but to read other data points are used and therefore are not writable; this is a | +| | quirk of Ecobee's API) | ++---------------------+------------------------------------------------------------------------------------------------+ +| Readable | Whether or not the point is able to be read as specified. This may be determined by what | +| | Ecobee allows, and by the operation of Ecobee's API (to set an Ecobee cool/heat hold, | +| | cool/HoldTemp is used, however the requested hold values are represented as desiredCool/Heat | +| | in Ecobee's response data; this is a quirk of Ecobee's API) | ++---------------------+------------------------------------------------------------------------------------------------+ +| Default Value | Used to send device defaults to the Ecobee API, this is optional. | ++---------------------+------------------------------------------------------------------------------------------------+ +| Notes | Any user specified notes, this is optional | ++---------------------+------------------------------------------------------------------------------------------------+ + +For additional explanation on the quirks of Ecobee's readable/writable points, visit: +https://www.ecobee.com/home/developer/api/documentation/v1/functions/SetHold.shtml - Volttron Point Name - Name of a point as a user would like it to be displayed - in Volttron - Units - Unit of measurement specified by remote API +Installation +############ - Type - The Ecobee driver registry configuration supports "setting" and "hold" - register types, based on how the data is represented in Ecobee response data ( - example below) +The following instructions make up the minimal steps required to set up an instance of the Ecobee driver on the VOLTTRON +platform and connect it to the Ecobee remote API: - Writable - Whether or not the point is able to be written to. This may be - determined by what Ecobee allows, and by the operation of Ecobee's API (to set - an Ecobee cool/heat hold, cool/HoldTemp is used, but to read other data points - are used and therefore are not writable; this is a quirk of Ecobee's API) +#. Create a directory using the path $VOLTTRON_ROOT/configs and create two files, `ecobee.csv` and `ecobee.config`. + Copy the registry config to the `ecobee.csv` file and the driver config to the `ecobee.config file`. Modify the + `API_KEY` and `DEVICE_ID` fields from the driver config with your own API key and device serial number. - Readable - Whether or not the point is able to be read as specified. This may be - determined by what Ecobee allows, and by the operation of Ecobee's API - (to set an Ecobee cool/heat hold, cool/HoldTemp is used, however the requested - hold values are represented as desiredCool/Heat in Ecobee's response data; this - is a quirk of Ecobee's API) +#. If the platform has not been started: - Default Value - Used to send device defaults to the Ecobee API, this is optional. + .. code-block:: Bash - Notes - Any user specified notes, this is optional + ./start-volttron ---- -Explanation on the quirks of Ecobee's readable/writable points, visit: -https://www.ecobee.com/home/developer/api/documentation/v1/functions/SetHold.shtml ---- +#. Be sure that the environment has been activated - you should see (volttron) next to @ in your terminal + window. To activate an environment, use the following command. -Installation ------------- + .. code-block:: Bash -These are the most basic installation steps for the Ecobee driver. This guide -assumes the user is in the VOLTTRON_ROOT directory, the VOLTTRON platform has -been installed and bootstrapped per the instructions in the VOLTTRON README, -and that the Driver HTTP Cache agent has been installed using the installation -instructions above. + source env/bin/activate -Below are the seup instructions. +#. Install a Master Driver if one is not yet installed - 1. If the platform has not been started: + .. code-block:: Bash - ./start-volttron + python scripts/install-agent.py --agent-source services/core/MasterDriverAgent --config \ + examples/configurations/drivers/master-driver.agent --tag platform.driver - 2. If the environment has not been activated - you should see (volttron) next to @ in your terminal window +#. Load the driver configuration into the configuration store ("vctl config list platform.driver" can be used to show + installed configurations) - . env/bin/activate + .. code-block:: Bash - 3. If the Driver Cache has not yet been installed and started: + vctl config store platform.driver devices/campus/building/ecobee $VOLTTRON_ROOT/configs/ecobee.config - python scripts/install-agent.py -s services/core/DriverHTTPCache -i +#. Load the driver's registry configuration into the configuration store - vctl start + .. code-block:: Bash - 4. Install a Master Driver if one is not yet installed + vctl config store platform.driver campus/building/ecobee.csv $VOLTTRON_ROOT/configs/ecobee.csv --csv - python scripts/install-agent.py -s services/core/MasterDriverAgent -c +#. Start the master driver - 5. Load the driver configuration into the configuration store ("vctl config list platform.driver" can be used to show installed configurations) + .. code-block:: Bash - vctl config store platform.driver + vctl start platform.driver - 6. Load the driver's registry configuration into the configuration store +At this point, the master driver will start, configure the driver agent, and data should start to publish on the publish +interval. - vctl config store platform.driver --csv +.. note:: - 7. Start the master driver + If starting the driver for the first time, or if the authorization which is managed by the driver is out of date, + the driver will perform some additional setup internally to authenticate the driver with the Ecobee API. This stage + will require the user enter a pin provided in the `volttron.log` file to the Ecobee web UI. The Ecobee driver has + a wait period of 60 seconds to allow users to enter the pin code into the Ecobee UI. Instructions for pin + verification follow. - vctl start platform.driver -At this point, the master driver will start, configure the driver agent, and -data should start to publish on the publish interval. If the authentication code -provided in the configuration file (as above) is out of date, a new -authentication code will be obtained by the driver. This will require the user -enter the pin (found in the volttron logs) into the MyApps section of the Ecobee -web UI. Failure to do so within 60 seconds will result in the driver being unable -to get Ecobee data. Instructions on how to enter the pin will be included below. +PIN Verification steps: +----------------------- + +#. Obtain the pin from the VOLTTRON logs. The pin is a 4 character long string in the logs flanked by 2 rows of + asterisks + + .. image:: files/ecobee_pin.png +#. Log into the `Ecobee UI `_ . After logging in, the + customer dashboard will be brought up, which features a series of panels (where the serial number was found for + device configuration) and a "hamburger" menu. -This text can be found in the logs to specify the pin: + .. image:: files/ecobee_console.png -:: +#. Add the application: Click the "hamburger" icon which will display a list of items in a panel that becomes + visible on the right. Click "My Apps", then "Add application". A text form will appear, enter the pin provided in + VOLTTRON logs here, then click "validate" and "add application. - WARNING: *********************************************************** - 2020-03-02 11:02:41,913 (master_driveragent-4.0 23053) master_driver.interfaces.ecobee WARNING: Please authorize your ecobee developer app with PIN code . - Go to https://www.ecobee.com/consumerportal /index.html, click My Apps, Add application, Enter Pin and click Authorize. - 2020-03-02 11:02:41,913 (master_driveragent-4.0 23053) master_driver.interfaces.ecobee WARNING: *********************************************************** + .. image:: files/ecobee_verify_pin.png + +This will complete the pin verification step. Ecobee Driver Usage -------------------- +################### At the configured interval, the master driver will publish a JSON object with data obtained from Ecobee based on the provided configuration files. +To view the publishes in the `volttron.log` file, install and start a ListenerAgent: + +.. code-block:: Bash + + python scripts/install-agent.py -s examples/ListenerAgent + The following is an example publish: -:: +.. code-block:: Bash 'Status': [''], 'Vacations': [{'coolHoldTemp': 780, @@ -319,61 +339,11 @@ The following is an example publish: Individual points can be obtained via JSON RPC on the VOLTTRON Platform. In an agent: - self.vip.rpc.call("platform.driver", "get_point", , ) - -Set_point ---------- - -To set points using the Ecobee driver, it is recommended to use the actuator -agent. Explanations of the actuation can be found in the VOLTTRON readthedocs -and example agent code can be found in the CsvDriverAgent ( -examples/CSVDriver/CsvDriverAgent/agent.py) - -Setting values for Vacations and Programs requires understanding Vacation and -Program object structure for Ecobee. - -Documentation for Vacation structure can be found here: -https://www.ecobee.com/home/developer/api/documentation/v1/functions/CreateVacation.shtml +.. code-block:: Python -Documentation for Program structure can be found here: -https://www.ecobee.com/home/developer/api/examples/ex11.shtml - -When using set_point for vacation, the user may specify True for the delete -keyword to remove an existing vacation. If deleting a vacation, the value -parameter should specify the name of a vacation to delete. - -When using set_point for program, specifying a program structure will create a -new program. Otherwise, if the user has not specified resume_all, Ecobee will -resume the next program on the program stack. If resume_all, Ecobee will resume -all programs on the program stack. - -For all other points, the corresponding integer, string, boolean, etc. value may -be sent. - -Additional Instructions -======================= - -Getting API Key ---------------- - -Instructions for finding your API key can be found here: -https://www.ecobee.com/home/developer/api/examples/ex1.shtml Under the Example -1 header. - -Authenicating the Ecobee Driver using the PIN can be found at the same link -under Example 1 step 1 subheader. - -Finding Device Identifier -------------------------- - - -To find your Ecobee thermostat's device identifier: - - 1. Log into the Ecobee customer portal (https://www.ecobee.com/consumerportal/index.html). - 2. From the Home screen click "About My Ecobee" - 3. The thermostat identifier is the serial number listed on the About screen + self.vip.rpc.call("platform.driver", "get_point", , ) Versioning -~~~~~~~~~~ +---------- The Ecobee driver has been tested using the May 2019 API release as well as device firmware version 4.5.73.24 diff --git a/docs/source/core_services/drivers/driver_configuration/files/ecobee_add_app.png b/docs/source/core_services/drivers/driver_configuration/files/ecobee_add_app.png new file mode 100644 index 0000000000..ff5cc2650d Binary files /dev/null and b/docs/source/core_services/drivers/driver_configuration/files/ecobee_add_app.png differ diff --git a/docs/source/core_services/drivers/driver_configuration/files/ecobee_api_key.png b/docs/source/core_services/drivers/driver_configuration/files/ecobee_api_key.png new file mode 100644 index 0000000000..a7c6b7e00c Binary files /dev/null and b/docs/source/core_services/drivers/driver_configuration/files/ecobee_api_key.png differ diff --git a/docs/source/core_services/drivers/driver_configuration/files/ecobee_apps.png b/docs/source/core_services/drivers/driver_configuration/files/ecobee_apps.png new file mode 100644 index 0000000000..58177fbf34 Binary files /dev/null and b/docs/source/core_services/drivers/driver_configuration/files/ecobee_apps.png differ diff --git a/docs/source/core_services/drivers/driver_configuration/files/ecobee_console.png b/docs/source/core_services/drivers/driver_configuration/files/ecobee_console.png new file mode 100644 index 0000000000..7ae22fee78 Binary files /dev/null and b/docs/source/core_services/drivers/driver_configuration/files/ecobee_console.png differ diff --git a/docs/source/core_services/drivers/driver_configuration/files/ecobee_create_app.png b/docs/source/core_services/drivers/driver_configuration/files/ecobee_create_app.png new file mode 100644 index 0000000000..6e587dad7d Binary files /dev/null and b/docs/source/core_services/drivers/driver_configuration/files/ecobee_create_app.png differ diff --git a/docs/source/core_services/drivers/driver_configuration/files/ecobee_developer_menu.png b/docs/source/core_services/drivers/driver_configuration/files/ecobee_developer_menu.png new file mode 100644 index 0000000000..1080383e8d Binary files /dev/null and b/docs/source/core_services/drivers/driver_configuration/files/ecobee_developer_menu.png differ diff --git a/docs/source/core_services/drivers/driver_configuration/files/ecobee_pin.png b/docs/source/core_services/drivers/driver_configuration/files/ecobee_pin.png new file mode 100644 index 0000000000..38fbd91622 Binary files /dev/null and b/docs/source/core_services/drivers/driver_configuration/files/ecobee_pin.png differ diff --git a/docs/source/core_services/drivers/driver_configuration/files/ecobee_verify_pin.png b/docs/source/core_services/drivers/driver_configuration/files/ecobee_verify_pin.png new file mode 100644 index 0000000000..670db44d73 Binary files /dev/null and b/docs/source/core_services/drivers/driver_configuration/files/ecobee_verify_pin.png differ diff --git a/docs/source/core_services/drivers/driver_configuration/modbus-tk-driver.rst b/docs/source/core_services/drivers/driver_configuration/modbus-tk-driver.rst index d23f50c67b..d6b134bb0c 100644 --- a/docs/source/core_services/drivers/driver_configuration/modbus-tk-driver.rst +++ b/docs/source/core_services/drivers/driver_configuration/modbus-tk-driver.rst @@ -145,8 +145,9 @@ Each row configures a register definition on the device. Default is FALSE. - **Default Value** (Optional) - The point's default value. If it is reverted by an agent, it changes back to this value. If this value is missing, it will revert to the last known value not set by an agent. - - **Transform** (Optional) - Scaling algorithm: scale(multiplier), scale_int(multiplier), mod10k(reverse), - or none. Default is an empty string. + - **Transform** (Optional) - Scaling algorithm: scale(multiplier), scale_int(multiplier), scale_reg(register_name), + scale_reg_power10(register_name), scale_decimal_int_signed(multiplier), mod10k(reverse), + mod10k64(reverse), mod10k48(reveres) or none. Default is an empty string. - **Table** (Optional) - Standard modbus table name defining how information is stored in slave device. There are 4 different tables: diff --git a/docs/source/core_services/drivers/driver_configuration/the-energy-detective-driver.rst b/docs/source/core_services/drivers/driver_configuration/the-energy-detective-driver.rst new file mode 100644 index 0000000000..82c4e116e5 --- /dev/null +++ b/docs/source/core_services/drivers/driver_configuration/the-energy-detective-driver.rst @@ -0,0 +1,140 @@ +.. _The-Energy-Detective-Driver: + +The Energy Detective Meter Driver +------------------------------------ + + +Introduction +------------ + +The TED-Pro is an energy monitoring system that can measure energy consumption +of multiple mains and supports submetering of individual circuits. +This driver connects to a TED Pro Energy Control Center (ECC) and can collect +information from multiple Measuring Transmiting Units (MTUs) and Spyder submetering +devices connected to the ECC. + +configuration +------------- + +The TED Pro device interface is configured as follows. You'll need the ip address +or hostname of the ECC on a network segment accessible from the Volttron instance, +if configured to use a port other than 80, you can provide it as shown below, +following a colon after the host address. + +.. code-block:: json + + { + "driver_type": "ted_meter", + "driver_config": { + "device_address": "192.168.1.100:8080", + "username": "username", + "password": "password", + "scrape_spyder": true, + "track_totalizers": true + } + } + +Parameters +********** + + - **username** - Username if the TED Pro is configured with Basic Authentication + - **password** - Password if the TED Pro is configured with Basic Authentication + - **device_address** - Hostname or IP address of the TED Pro ECC, a non-standard port can be included if needed + - **scrape_spyder** - Default true, enables or disables collection of the submetering data from spyder devices + connected to the TED Pro + - **track_totalizers** - Default true, enables or disables tracking of lifetime totals in the VOLTTRON Driver + +.. note:: + + The TED Pro does not expose its internal lifetime totalized metering, instead offering month to date (MTD) + and daily totals (TDY). Using the "track_totalizers" setting, the ted-meter driver will attempt to maintain + monotonically increasing lifetime totalizers. To do so, it must retain state regarding the running total and + the last read value. The driver makes use of the VOLTTRON Config subsystem to store this state. + To reset these totals, delete the state/ted_meter/ config from the master driver config store and restart the + master driver. + +.. note:: + + This driver does not make use of the registry config. Because it is able to determine the configuration + of the TED Pro Device via the API, it simply creates registers for each data source on the TED Pro + + +.. note:: + + This driver is internally aware of the appropriate HayStack Tags for its registers, however, the + MasterDriver Framework makes no provision for publishing those tags during a scrape. Therefore, + integration of the tagging data is left to the end user. + +Examples +******** + +|TED Pro showing spyder outputs| + +The above configuration in the TED will result in the following scrape from the ted-meter driver on the message bus: + +.. code-block:: text + + [ + { + 'mtu-1/load_kva': 0.271, + 'mtu-1/load_kw': 0.203, + 'mtu-1/phase_angle': 195, + 'mtu-1/phase_current-a': '0', + 'mtu-1/phase_current-b': '0', + 'mtu-1/phase_current-c': '0', + 'mtu-1/phase_voltage-a': '0', + 'mtu-1/phase_voltage-b': '0', + 'mtu-1/phase_voltage-c': '0', + 'mtu-1/power_factor': 0.749, + 'mtu-1/voltage': 121.30000000000001, + 'spyder-1/AHU/load': 0.0, + 'spyder-1/AHU/mtd': 0.0, + 'spyder-1/AHU/mtd_totalized': 0.0, + 'spyder-1/C/U/load': 0.0, + 'spyder-1/C/U/mtd': 0.0, + 'spyder-1/C/U/mtd_totalized': 0.0, + 'spyder-1/Fridge/load': 0.0, + 'spyder-1/Fridge/mtd': 0.056, + 'spyder-1/Fridge/mtd_totalized': 0.056, + 'spyder-1/HW/load': 0.0, + 'spyder-1/HW/mtd': 0.14400000000000002, + 'spyder-1/HW/mtd_totalized': 0.14400000000000002, + 'spyder-1/Toaster/load': 0.0, + 'spyder-1/Toaster/mtd': 0.24, + 'spyder-1/Toaster/mtd_totalized': 0.24, + 'system/mtd': 0.652, + 'system/mtd_totalized': 0.652 + }, + { + 'mtu-1/load_kva': {'type': 'integer', 'tz': u'', 'units': 'kVA'}, + 'mtu-1/load_kw': {'type': 'integer', 'tz': u'', 'units': 'kW'}, + 'mtu-1/phase_angle': {'type': 'integer', 'tz': u'', 'units': 'degrees'}, + 'mtu-1/phase_current-a': {'type': 'integer', 'tz': u'', 'units': 'Amps'}, + 'mtu-1/phase_current-b': {'type': 'integer', 'tz': u'', 'units': 'Amps'}, + 'mtu-1/phase_current-c': {'type': 'integer', 'tz': u'', 'units': 'Amps'}, + 'mtu-1/phase_voltage-a': {'type': 'integer', 'tz': u'', 'units': 'Volts'}, + 'mtu-1/phase_voltage-b': {'type': 'integer', 'tz': u'', 'units': 'Volts'}, + 'mtu-1/phase_voltage-c': {'type': 'integer', 'tz': u'', 'units': 'Volts'}, + 'mtu-1/power_factor': {'type': 'integer', 'tz': u'', 'units': 'ratio'}, + 'mtu-1/voltage': {'type': 'integer', 'tz': u'', 'units': 'Volts'}, + 'spyder-1/AHU/load': {'type': 'integer', 'tz': u'', 'units': 'kW'}, + 'spyder-1/AHU/mtd': {'type': 'integer', 'tz': u'', 'units': 'kWh'}, + 'spyder-1/AHU/mtd_totalized': {'type': 'integer', 'tz': u'', 'units': 'kWh'}, + 'spyder-1/C/U/load': {'type': 'integer', 'tz': u'', 'units': 'kW'}, + 'spyder-1/C/U/mtd': {'type': 'integer', 'tz': u'', 'units': 'kWh'}, + 'spyder-1/C/U/mtd_totalized': {'type': 'integer', 'tz': u'', 'units': 'kWh'}, + 'spyder-1/Fridge/load': {'type': 'integer', 'tz': u'', 'units': 'kW'}, + 'spyder-1/Fridge/mtd': {'type': 'integer', 'tz': u'', 'units': 'kWh'}, + 'spyder-1/Fridge/mtd_totalized': {'type': 'integer', 'tz': u'', 'units': 'kWh'}, + 'spyder-1/HW/load': {'type': 'integer', 'tz': u'', 'units': 'kW'}, + 'spyder-1/HW/mtd': {'type': 'integer', 'tz': u'', 'units': 'kWh'}, + 'spyder-1/HW/mtd_totalized': {'type': 'integer', 'tz': u'', 'units': 'kWh'}, + 'spyder-1/Toaster/load': {'type': 'integer', 'tz': u'', 'units': 'kW'}, + 'spyder-1/Toaster/mtd': {'type': 'integer', 'tz': u'', 'units': 'kWh'}, + 'spyder-1/Toaster/mtd_totalized': {'type': 'integer', 'tz': u'', 'units': 'kWh'}, + 'system/mtd': {'type': 'integer', 'tz': u'', 'units': 'kWh'}, + 'system/mtd_totalized': {'type': 'integer', 'tz': u'', 'units': 'kWh'} + } + ] + +.. |TED Pro showing spyder outputs| image:: ../files/ted-spyders.png \ No newline at end of file diff --git a/docs/source/core_services/drivers/files/ted-spyders.png b/docs/source/core_services/drivers/files/ted-spyders.png new file mode 100644 index 0000000000..059c4204ef Binary files /dev/null and b/docs/source/core_services/drivers/files/ted-spyders.png differ diff --git a/docs/source/devguides/agent_development/Agent-Development.rst b/docs/source/devguides/agent_development/Agent-Development.rst index 69214979d5..e3bc8cf52d 100644 --- a/docs/source/devguides/agent_development/Agent-Development.rst +++ b/docs/source/devguides/agent_development/Agent-Development.rst @@ -167,16 +167,16 @@ store values and setting up a configuration handler. Values in the default config can be built into the agent or come from the packaged configuration file. The subscribe method tells our agent which function to call whenever there is a new or updated config file. For more information -on using the configuration store see :doc:`Agent Configuration Store ` +on using the configuration store see :doc:`Agent Configuration Store `. -`_create_subscriptions` (covered in the next section) will use the value in self.setting2 +`_create_subscriptions` (covered in the next section) will use the value in `self.setting2` to create a new subscription. Setting up a Subscription ^^^^^^^^^^^^^^^^^^^^^^^^^ -The Agent creates a subscription using the value of self.setting2 in the method -`_create_subscription`. The messages for this subscription hare handeled with +The Agent creates a subscription using the value of `self.setting2` in the method +`_create_subscription`. The messages for this subscription are handled with the `_handle_publish` method: :: @@ -191,20 +191,21 @@ the `_handle_publish` method: def _handle_publish(self, peer, sender, bus, topic, headers, message): + #By default no action is taken. pass Agent Lifecycle Events ^^^^^^^^^^^^^^^^^^^^^^ -Methods may be setup to be called at agent startup and shudown: +Methods may be setup to be called at agent startup and shutdown: :: @Core.receiver("onstart") def onstart(self, sender, **kwargs): """ - This is method is called once the Agent has successfully connected to the platform. - This is a good place to setup subscriptions if they are not dynamic or + This method is called once the Agent has successfully connected to the platform. + This is a good place to setup subscriptions if they are not dynamic or to do any other startup activities that require a connection to the message bus. Called after any configurations methods that are called at startup. @@ -224,15 +225,15 @@ Methods may be setup to be called at agent startup and shudown: """ pass -As the comment mentions. With the new configuration store feature `onstart` methods +As the comment mentions, with the new configuration store feature `onstart` methods are mostly unneeded. However this code does include an example of how to do a Remote -Proceedure Call to another agent. +Procedure Call to another agent. -Agent Remote Proceedure Calls +Agent Remote Procedure Calls ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -An agent may receive commands from other agents via a Remote Proceedure Call or RPC for short. -This is done with the @RPC.export decorattor: +An agent may receive commands from other agents via a Remote Procedure Call (RPC). +This is done with the `@RPC.export` decorator: :: @@ -248,7 +249,7 @@ This is done with the @RPC.export decorattor: Packaging Configuration ~~~~~~~~~~~~~~~~~~~~~~~ -The wizard will automatically create a setup.py file. This file sets up the +The wizard will automatically create a `setup.py` file. This file sets up the name, version, required packages, method to execute, etc. for the agent based on your answers to the wizard. The packaging process will also use this information to name the resulting file. @@ -288,20 +289,20 @@ information to name the resulting file. Launch Configuration ~~~~~~~~~~~~~~~~~~~~ -In TestAgent, the wizard will automatically create a file called "config". +In TestAgent, the wizard will automatically create a JSON file called "config". It contains configuration information for the agent. This file contains -examples every datatype supported by the configuration system: +examples of every datatype supported by the configuration system: :: { # VOLTTRON config files are JSON with support for python style comments. "setting1": 2, #Integers - "setting2": "some/random/topic2", #strings + "setting2": "some/random/topic2", #Strings "setting3": true, #Booleans: remember that in JSON true and false are not capitalized. "setting4": false, "setting5": 5.1, #Floating point numbers. - "setting6": [1,2,3,4], # Lists + "setting6": [1,2,3,4], #Lists "setting7": {"setting7a": "a", "setting7b": "b"} #Objects } @@ -316,8 +317,8 @@ To install the agent the platform must be running. Start the platform with the c ``./start-volttron`` .. note:: If you are not in an activated environment, this script will start - the platform running in the background in the correct environment, however - the environment will not be activated for you, you must activate it yourself. + the platform running in the background in the correct environment. However + the environment will not be activated for you; you must activate it yourself. Now we must install it into the platform. Use the following command to install it and add a tag for easily referring to the agent. From the project directory, run the following command: @@ -331,16 +332,16 @@ This will result in output similar to the following: .. code-block:: bash - AGENT IDENTITY TAG STATUS HEALTH - e testeragent-0.5 testeragent-0.5_1 testagent + AGENT IDENTITY TAG PRI + df testeragent-0.5 testeragent-0.5_1 testagent -Where the number or letter is the unique portion of the full uuid for the agent. AGENT is +The initial number or letter is a unique portion of the full UUID for the agent. AGENT is the "name" of the agent based on the contents of its class name and the version in its setup.py. IDENTITY is the agent's identity in the platform. This is automatically assigned based on class name and instance number. This agent's -ID is _1 because it is the first instance. TAG is the name we assigned in the command above. HEALTH -is the current health of the agent as reported by the agents health subsystem. +ID is _1 because it is the first instance. TAG is the name we assigned in the command above. PRI is the priority for +agents which have been "enabled" using the ``vctl enable`` command. -When using lifecycle commands on agents, they can be referred to be UUID (default) or AGENT (name) or TAG. +When using lifecycle commands on agents, they can be referred to by the UUID (default) or AGENT (name) or TAG. Testing the Agent @@ -352,28 +353,30 @@ From the Command Line To test the agent, we will start the platform (if not already running), launch the agent, and check the log file. -- With the VOLTTRON environment activated, start the platform by - running (if needed): +With the VOLTTRON environment activated, start the platform by running (if needed): ``./start-volttron`` -- Launch the agent by using the result of the list command: +You can launch the agent in three ways, all of which you can find by using the +``vctl list`` command: + +- By using the : ``vctl start `` -- Launch the agent by name with: +- By name: ``vctl start --name testeragent-0.1`` -- Launch the agent by tag with: +- By tag: ``vctl start --tag testagent`` -- Check that it is :ref:`running `: +Check that it is :ref:`running `: ``vctl status`` -- Start the ListenerAgent as in :ref:`Building VOLTTRON ` +- Start the ListenerAgent as in :ref:`Building VOLTTRON `. - Check the log file for messages indicating the TestAgent is receiving the ListenerAgents messages: diff --git a/docs/source/devguides/agent_development/TestAgent.rst b/docs/source/devguides/agent_development/TestAgent.rst index 7fc6099c65..f0d9fee37e 100644 --- a/docs/source/devguides/agent_development/TestAgent.rst +++ b/docs/source/devguides/agent_development/TestAgent.rst @@ -195,10 +195,10 @@ Contents of config: { # VOLTTRON config files are JSON with support for python style comments. "setting1": 2, #Integers - "setting2": "some/random/topic2", #strings + "setting2": "some/random/topic2", #Strings "setting3": true, #Booleans: remember that in JSON true and false are not capitalized. "setting4": false, "setting5": 5.1, #Floating point numbers. - "setting6": [1,2,3,4], # Lists + "setting6": [1,2,3,4], #Lists "setting7": {"setting7a": "a", "setting7b": "b"} #Objects } diff --git a/docs/source/devguides/supporting/examples/FakeDriver.rst b/docs/source/devguides/supporting/examples/FakeDriver.rst index 4e42635baf..da0d0b7b95 100644 --- a/docs/source/devguides/supporting/examples/FakeDriver.rst +++ b/docs/source/devguides/supporting/examples/FakeDriver.rst @@ -1,24 +1,27 @@ .. _FakeDriver: +.. role:: bash(code) + :language: bash + Fake Driver =========== -The FakeDriver is included as a way to quickly see data published to the message bus in a format -that mimics what a true Driver would produce. This is an extremely simple implementation of the -:ref:`VOLTTRON driver framework` +The FakeDriver is included as a way to quickly see data published to the message bus in a format +that mimics what a true Driver would produce. This is an extremely simple implementation of the +:ref:`VOLTTRON driver framework`. + +Here, we make a script to build and deploy the fake Driver. -Make a script to build and deploy the fake driver. -- Create a config directory (if one doesn't already exist). All local config files will be - worked on here. -- cp examples/configurations/drivers/fake.config config/ -- Edit registry_config for the paths on your system +- Create a config directory (if one doesn't already exist) inside your Volttron repository: :code:`mkdir config`. All local config files will be worked on here. +- Copy over the example file: :bash:`cp examples/configurations/drivers/fake.config config/` +- Edit :code:`registry_config` for the paths on your system: fake.config:: { "driver_config": {}, - "registry_config":"config://fake.csv", + "registry_config": "config://fake.csv", "interval": 5, "timezone": "US/Pacific", "heart_beat_point": "Heartbeat", @@ -28,8 +31,8 @@ fake.config:: "publish_breadth_first": false } -- cp examples/configurations/drivers/master-driver.agent config/fake-master-driver.config -- Add fake.csv and fake.config to the :ref:`configuration store`. +- :bash:`cp examples/configurations/drivers/master-driver.agent config/fake-master-driver.config` +- Add fake.csv and fake.config to the :ref:`configuration store`. ## TODO Which fake.csv? How do I add them to my configuration store? - Edit fake-master-driver.config to reflect paths on your system fake-master-driver.config:: @@ -40,7 +43,7 @@ fake-master-driver.config:: - Use the scripts/install-agent.py script to install the Master Driver agent: -:: +.. code-block:: bash python scripts/install-agent.py -s services/core/MasterDriverAgent -c config/fake-master-driver.config diff --git a/docs/source/devguides/supporting/examples/ListenerAgent.rst b/docs/source/devguides/supporting/examples/ListenerAgent.rst index 78290bd859..0f921f6008 100644 --- a/docs/source/devguides/supporting/examples/ListenerAgent.rst +++ b/docs/source/devguides/supporting/examples/ListenerAgent.rst @@ -11,9 +11,9 @@ platform agent. Explanation of ListenerAgent ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -Use utils to setup logging which we’ll use later. +Use :code:`utils` to setup logging, which we’ll use later. -:: +.. code-block:: python utils.setup_logging() _log = logging.getLogger(__name__) @@ -22,7 +22,7 @@ Use utils to setup logging which we’ll use later. The Listener agent extends (inherits from) the Agent class for its default functionality such as responding to platform commands: -:: +.. code-block:: python class ListenerAgent(Agent): '''Listens to everything and publishes a heartbeat according to the @@ -31,10 +31,10 @@ default functionality such as responding to platform commands: After the class definition, the Listener agent reads the configuration file, extracts the configuration parameters, and initializes any -Listener agent instance variable. This is done the agents **init** +Listener agent instance variable. This is done through the agent's :code:`__init__` method: -:: +.. code-block:: python def __init__(self, config_path, **kwargs): super(ListenerAgent, self).__init__(**kwargs) @@ -55,7 +55,7 @@ tagged to run after the agent is initialized by the decorator ``@Core.receiver('onsetup')``. This method accesses the configuration parameters, logs a message to the platform log, and sets the agent ID. -:: +.. code-block:: python @Core.receiver('onsetup') def onsetup(self, sender, **kwargs): @@ -64,7 +64,7 @@ parameters, logs a message to the platform log, and sets the agent ID. self._agent_id = self.config.get('agentid') The Listener agent subscribes to all topics published on the message -bus. Subscribe/publish interactions with the message bus are handled by +bus. Publish and subscribe interactions with the message bus are handled by the PubSub module located at: ``~/volttron/volttron/platform/vip/agent/subsystems/pubsub.py`` @@ -77,7 +77,7 @@ for simplifying subscriptions. It also checks for the sender being ``pubsub.compat`` in case there are any VOLTTRON 2.0 agents running on the platform. -:: +.. code-block:: python @PubSub.subscribe('pubsub', '') def on_match(self, peer, sender, bus, topic, headers, message): diff --git a/docs/source/docutils.conf b/docs/source/docutils.conf new file mode 100644 index 0000000000..a4555de688 --- /dev/null +++ b/docs/source/docutils.conf @@ -0,0 +1,2 @@ +[restructuredtext parser] +syntax_highlight = short \ No newline at end of file diff --git a/examples/DataPublisher/datapublisher/agent.py b/examples/DataPublisher/datapublisher/agent.py index 0efdb81e2d..db29624691 100644 --- a/examples/DataPublisher/datapublisher/agent.py +++ b/examples/DataPublisher/datapublisher/agent.py @@ -215,7 +215,7 @@ def build_metadata(name_map, unittype_map): results = defaultdict(dict) for topic, point in name_map.values(): unit_type = Publisher._get_unit(point, unittype_map) - results[topic][point] = unit_type + results[topic][point] = {"unit": unit_type} return results def build_maps(self, fieldnames, base_path): @@ -247,7 +247,7 @@ def _get_unit(point, unittype_map): for k, v in unittype_map.items(): if re.match(k, point): return v - return 'percent' + return {'type': 'float'} def _publish_point_all(self, topic, data, meta_data, headers): # makesure topic+point gives a true value. @@ -350,7 +350,7 @@ def publish_loop(self): # Reset data frequency counter. self._next_allowed_publish = None if not isinstance(self._input_data, list): - handle = open(self._input_data, 'rb') + handle = open(self._input_data, 'r') self._data = csv.DictReader(handle) @RPC.export diff --git a/examples/GridAPPS-DAgent/config b/examples/GridAPPS-DAgent/config index c892cf8a63..0b8de793fe 100644 --- a/examples/GridAPPS-DAgent/config +++ b/examples/GridAPPS-DAgent/config @@ -2,10 +2,10 @@ # VOLTTRON config files are JSON with support for python style comments. "setting1": 2, #Integers - "setting2": "some/random/topic2", #strings + "setting2": "some/random/topic2", #Strings "setting3": true, #Booleans: remember that in JSON true and false are not capitalized. "setting4": false, "setting5": 5.1, #Floating point numbers. - "setting6": [1,2,3,4], # Lists + "setting6": [1,2,3,4], #Lists "setting7": {"setting7a": "a", "setting7b": "b"} #Objects } diff --git a/examples/ListenerAgent/config b/examples/ListenerAgent/config index 2619210fad..b0be919f76 100644 --- a/examples/ListenerAgent/config +++ b/examples/ListenerAgent/config @@ -1,8 +1,8 @@ { - - "agentid": "listener1", "message": "hello", + # stop time in seconds + "runtime_limit":30, # log-level can be DEBUG, INFO, WARN or ERROR # verbosity is decreased from left to right above # default: INFO diff --git a/examples/ListenerAgent/listener/agent.py b/examples/ListenerAgent/listener/agent.py index 1ba1443801..8ef516f6ce 100644 --- a/examples/ListenerAgent/listener/agent.py +++ b/examples/ListenerAgent/listener/agent.py @@ -40,6 +40,7 @@ import logging import sys from pprint import pformat +import datetime from volttron.platform.agent import utils from volttron.platform.messaging.health import STATUS_GOOD @@ -66,6 +67,15 @@ def __init__(self, config_path, **kwargs): self._message = self.config.get('message', DEFAULT_MESSAGE) self._heartbeat_period = self.config.get('heartbeat_period', DEFAULT_HEARTBEAT_PERIOD) + + runtime_limit = int(self.config.get('runtime_limit', 0)) + if runtime_limit and runtime_limit > 0: + stop_time = datetime.datetime.now() + datetime.timedelta(seconds=runtime_limit) + _log.info('Listener agent will stop at {}'.format(stop_time)) + self.core.schedule(stop_time, self.core.stop) + else: + _log.info('No valid runtime_limit configured; listener agent will run until manually stopped') + try: self._heartbeat_period = int(self._heartbeat_period) except: @@ -98,7 +108,7 @@ def onstart(self, sender, **kwargs): _log.info('query: %r', query.query('serverkey').get()) @PubSub.subscribe('pubsub', '') - def on_match(self, peer, sender, bus, topic, headers, message): + def on_match(self, peer, sender, bus, topic, headers, message): """Use match_all to receive all messages and print them out.""" self._logfn( "Peer: {0}, Sender: {1}:, Bus: {2}, Topic: {3}, Headers: {4}, " diff --git a/examples/SCPAgent/README.rst b/examples/SCPAgent/README.rst new file mode 100644 index 0000000000..66e7f976bf --- /dev/null +++ b/examples/SCPAgent/README.rst @@ -0,0 +1,69 @@ +SCP Agent +========= + +The purpose of this example agent is to demonstrate secure copy of files from/to +external resources. SCP uses ssh protocol for creating an encrypted connection +between the agent and the resources. + +Configuration +------------- + +The SCP Agent requires a few configuration elements in order for the agent to run. + +.. csv-table:: Configuration Table + :header: "Parameter", "Example", "Description" + :widths: 15, 15, 30 + + "ssh_id", "~/.ssh/id_rsa", "Path to the identity file to allow connectivity from the host to remote communication" + "remote_user", "user@remote.com", "The user and resolvable host for connecting to" + +Interfaces +---------- + +The SCP Agent has both a pubsub and rpc base interfaces. + +RPC Interface +~~~~~~~~~~~~~ + +There are two methods available for the rpc interface the difference between the two +is the direction of the file exchange. + +.. code-block::python + + result = agent.vip.rpc.call("scp.agent", "trigger_download", + remote_path="/home/osboxes/Downloads/f2.txt", + local_path="/home/osboxes/Desktop/f6.txt").get(timeout=10) + + result = agent.vip.rpc.call("scp.agent", "trigger_upload", + remote_path="/home/osboxes/Downloads/f6.txt", + local_path="/home/osboxes/Desktop/f6.txt").get(timeout=10) + +PubSub Interface +~~~~~~~~~~~~~~~~ + +The pubsub interface requires sending of path through the pubsub subsystem. The pubsub requires either a +json string or dictionary be sent across the message bus to the agent on the transfer topic will start +the scp transfer. + +.. code-block::python + + agent.vip.pubsub.publish(peer='pubsub', topic="transfer", message=dict(remote_path=remote_path, + local_path=local_path, + direction="SENDING")).get(timeout=5) + + agent.vip.pubsub.publish(peer='pubsub', topic="transfer", message=dict(remote_path=remote_path, + local_path=local_path, + direction="RECEIVING")).get(timeout=5) + + +Testing +------- + +Within the agent directory there is a trigger_scp.py script. By default the trigger will run through 4 different +tests. The tests will exercise the sending and receiving for both the rpc and pubsub interfaces. The trigger will +require user interaction so run it with a shell that can receive input. + +.. code-block::shell + + (volttron) (base) osboxes@osboxes:~/repos/volttron$ python examples/SCPAgent/trigger_scp.py + diff --git a/examples/SCPAgent/config.yml b/examples/SCPAgent/config.yml new file mode 100644 index 0000000000..f959262e95 --- /dev/null +++ b/examples/SCPAgent/config.yml @@ -0,0 +1,3 @@ +--- +ssh_id: ~/.ssh/id_rsa +remote_user: osboxes@localhost diff --git a/services/core/DriverHTTPCache/driver_http_cache/__init__.py b/examples/SCPAgent/scp/__init__.py similarity index 100% rename from services/core/DriverHTTPCache/driver_http_cache/__init__.py rename to examples/SCPAgent/scp/__init__.py diff --git a/examples/SCPAgent/scp/agent.py b/examples/SCPAgent/scp/agent.py new file mode 100644 index 0000000000..a9fbf46beb --- /dev/null +++ b/examples/SCPAgent/scp/agent.py @@ -0,0 +1,199 @@ +# -*- coding: utf-8 -*- {{{ +# vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et: +# +# Copyright 2019, Battelle Memorial Institute. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# This material was prepared as an account of work sponsored by an agency of +# the United States Government. Neither the United States Government nor the +# United States Department of Energy, nor Battelle, nor any of their +# employees, nor any jurisdiction or organization that has cooperated in the +# development of these materials, makes any warranty, express or +# implied, or assumes any legal liability or responsibility for the accuracy, +# completeness, or usefulness or any information, apparatus, product, +# software, or process disclosed, or represents that its use would not infringe +# privately owned rights. Reference herein to any specific commercial product, +# process, or service by trade name, trademark, manufacturer, or otherwise +# does not necessarily constitute or imply its endorsement, recommendation, or +# favoring by the United States Government or any agency thereof, or +# Battelle Memorial Institute. The views and opinions of authors expressed +# herein do not necessarily state or reflect those of the +# United States Government or any agency thereof. +# +# PACIFIC NORTHWEST NATIONAL LABORATORY operated by +# BATTELLE for the UNITED STATES DEPARTMENT OF ENERGY +# under Contract DE-AC05-76RL01830 +# }}} + +from enum import Enum, auto +import inspect +from json import JSONDecodeError +import logging +from pathlib import Path + +from gevent import subprocess + +from volttron.platform import jsonapi +from volttron.platform.vip.agent import Agent, Core, RPC +from volttron.platform.agent.utils import vip_main, load_config + + +__version__ = "0.1" +logging.basicConfig(level=logging.DEBUG) +_log = logging.getLogger(inspect.getmodulename(__file__)) + + +class WhichWayEnum(Enum): + SENDING = auto() + RECEIVING = auto() + + +class ScpAgent(Agent): + def __init__(self, config_path, **kwargs): + super(ScpAgent, self).__init__(**kwargs) + config = load_config(config_path) + self._remote_user = None + self._ssh_id = None + self.default_config = dict( + ssh_id=config.get("ssh_id"), # "~/.ssh/id_rsa", + remote_user=config.get("remote_user") # "osboxes@localhost" + ) + self.vip.config.set_default("config", self.default_config) + self.vip.config.subscribe(self.configure, + actions=["NEW", "UPDATE"], pattern="config") + self._subscribed = False + + def configure(self, config_name, action, contents): + conf = {} + conf.update(contents) + self._ssh_id = conf.get("ssh_id") + self._remote_user = conf.get('remote_user') + + if not self._subscribed: + self.vip.pubsub.subscribe(peer="pubsub", prefix="transfer", callback=self.transfer_file) + self._subscribed = True + + def transfer_file(self, peer, sender, bus, topic, headers, message): + """ + Pubsub interface for transferring files. + + The interface requires message to be a dictionary like object + or a json serializable string with the following required structure: + + { + "direction": "SENDING" + "remote_path": "/remote/path/file.txt", + "local_path": "/local/path/file.txt" + } + + The above direction must be either "SENDING" or "RECEIVING". The path must be available + on the host that is providing the content and will overwrite the data on the receiving + side of the connection. + + """ + enabled = self.__check_configuration__() + + if not enabled: + return False + + if isinstance(message, str): + try: + message = jsonapi.loads(message) + except JSONDecodeError: + _log.error(f"Invalid json passed through string interface") + return + + direction = message.get("direction") + remote_path = message.get("remote_path") + local_path = message.get("local_path") + + enabled = True + if not remote_path: + enabled = False + _log.error(f"remote_path not specified in message to pub sub") + + if not local_path: + enabled = False + _log.error(f"local_path not specified in message to pub sub") + + if direction not in WhichWayEnum.__members__: + _log.error(f"which_way must be either SENDING or RECEIVING.") + enabled = False + + if not enabled: + return + + if direction == WhichWayEnum.SENDING.name: + success = self.__handle_scp__(WhichWayEnum.SENDING, local_path, remote_path) + else: + success = self.__handle_scp__(WhichWayEnum.RECEIVING, remote_path, local_path) + + if not success: + _log.error(f"Unable to send to/recieve scp files.") + + @RPC.export + def trigger_download(self, remote_path, local_path): + _log.debug('Triggering download') + enabled = self.__check_configuration__() + + if not enabled: + return False + + return self.__handle_scp__(WhichWayEnum.RECEIVING, remote_path, local_path) + + @RPC.export + def trigger_upload(self, local_path, remote_path): + _log.debug('Trigger upload') + enabled = self.__check_configuration__() + + if not enabled: + return False + + return self.__handle_scp__(WhichWayEnum.SENDING, local_path, remote_path) + + def __check_configuration__(self): + enabled = True + if self._ssh_id is None: + _log.error("Configuration error, ssh_id is not set") + enabled = False + if self._remote_user is None: + _log.error("Configuration error, invalid remote user configured") + enabled = False + return enabled + + def __handle_scp__(self, which_way: WhichWayEnum, from_arg, to_arg): + cmd = ["scp", "-o", "LogLevel=VERBOSE", + "-o", "PasswordAuthentication=no", + "-o", "IdentitiesOnly=yes", + "-o", "Compression=yes", + "-i", self._ssh_id] + + if which_way == WhichWayEnum.SENDING: + cmd.extend([f"{from_arg}", f"{self._remote_user}:{to_arg}"]) + else: + cmd.extend([f"{self._remote_user}:{from_arg}", f"{to_arg}"]) + + p = subprocess.Popen(cmd, stderr=subprocess.PIPE, stdout=subprocess.PIPE) + p.wait() + _log.debug(p.stderr.read().decode('utf-8')) + _log.debug(p.stdout.read().decode('utf-8')) + _log.debug(f"Complete {which_way.name}") + _log.debug(f"Return code: {p.returncode}") + if p.returncode == 0: + return True + return False + + +if __name__ == '__main__': + vip_main(ScpAgent, version=__version__) diff --git a/examples/SCPAgent/trigger_scp.py b/examples/SCPAgent/trigger_scp.py new file mode 100644 index 0000000000..3782ed8b68 --- /dev/null +++ b/examples/SCPAgent/trigger_scp.py @@ -0,0 +1,128 @@ +# -*- coding: utf-8 -*- {{{ +# vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et: +# +# Copyright 2019, Battelle Memorial Institute. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# This material was prepared as an account of work sponsored by an agency of +# the United States Government. Neither the United States Government nor the +# United States Department of Energy, nor Battelle, nor any of their +# employees, nor any jurisdiction or organization that has cooperated in the +# development of these materials, makes any warranty, express or +# implied, or assumes any legal liability or responsibility for the accuracy, +# completeness, or usefulness or any information, apparatus, product, +# software, or process disclosed, or represents that its use would not infringe +# privately owned rights. Reference herein to any specific commercial product, +# process, or service by trade name, trademark, manufacturer, or otherwise +# does not necessarily constitute or imply its endorsement, recommendation, or +# favoring by the United States Government or any agency thereof, or +# Battelle Memorial Institute. The views and opinions of authors expressed +# herein do not necessarily state or reflect those of the +# United States Government or any agency thereof. +# +# PACIFIC NORTHWEST NATIONAL LABORATORY operated by +# BATTELLE for the UNITED STATES DEPARTMENT OF ENERGY +# under Contract DE-AC05-76RL01830 +# }}} +from pathlib import Path +import os +import shutil + +import argparse +import gevent +from volttron.platform.vip.agent.utils import build_agent + + +def run_tests(local_root="~/local_files", remote_root="~/remote_files"): + agent = build_agent(identity='trigger') + + local_root = Path(local_root).expanduser() + remote_root = Path(remote_root).expanduser() + + def build_remote_filename(filename): + os.makedirs(remote_root, exist_ok=True) + return str(Path(remote_root).joinpath(filename)) + + def build_local_filename(filename): + os.makedirs(local_root, exist_ok=True) + return str(Path(local_root).joinpath(filename)) + + def create_remote_file(filename, content): + full_path = build_remote_filename(filename) + with open(full_path, 'w') as fp: + fp.write(content) + return full_path + + def create_local_file(filename, content): + full_path = build_local_filename(filename) + with open(full_path, 'w') as fp: + fp.write(content) + return full_path + + def remove_files(): + shutil.rmtree(remote_root, ignore_errors=True) + shutil.rmtree(local_root, ignore_errors=True) + + remove_files() + + remote_path = create_remote_file("t1.txt", "this is f1") + local_path = build_local_filename("t1.after.transfer.txt") + + go = input(f"Test 1: rpc: trigger_download\n\tdownload remote_path: {remote_path}\n\tto local_path: {local_path} ") + result = agent.vip.rpc.call("scp.agent", "trigger_download", + remote_path=remote_path, + local_path=local_path).get() + print(f"The result was {result}\n") + + print(f"Creating test2 file") + remote_path = build_remote_filename("t2.remote.transfer.txt") + local_path = create_local_file("t2.txt", "This is test 2") + go = input(f"Test 2: rpc: trigger_upload\n\tupload local_path: {local_path}\n\tto remote_path: {remote_path} ") + result = agent.vip.rpc.call("scp.agent", "trigger_upload", + remote_path=remote_path, + local_path=local_path).get() + print(f"The result was {result}\n") + + print(f"Creating test3 file") + remote_path = build_remote_filename("t3.sent.pubsub.txt") + local_path = create_local_file("t3.txt", "This is test 3") + + go = input(f"Test 3: pubsub: SENDING\n\tlocal_path: {local_path}\n\tto remote_path: {remote_path} ") + + agent.vip.pubsub.publish(peer='pubsub', topic="transfer", message=dict(remote_path=remote_path, + local_path=local_path, + direction="SENDING")).get() + gevent.sleep(1) + print(f"The result is {Path(remote_path).exists()}\n") + print(f"Creating test4 file") + remote_path = create_remote_file("t4.receive.pubsub.txt", "This is test 4") + local_path = build_local_filename("t4.receive.txt") + + go = input(f"Test 4: pubsub: RECEIVING\n\tlocal_path: {local_path}\n\tfrom remote_path: {remote_path} ") + agent.vip.pubsub.publish(peer='pubsub', topic="transfer", message=dict(remote_path=remote_path, + local_path=local_path, + direction="RECEIVING")).get() + gevent.sleep(1) + print(f"The result is {Path(local_path).exists()}\n") + agent.core.stop() + print("Complete") + + +if __name__ == '__main__': + parser = argparse.ArgumentParser() + parser.add_argument("-l", "--local_root", help="Local path", default="~/local_root") + parser.add_argument("-r", "--remote_root", help="Remote path", default="~/remote_root") + + args = parser.parse_args() + run_tests(args.local_root, args.remote_root) diff --git a/examples/configurations/drivers/ecobee.config b/examples/configurations/drivers/ecobee.config index 5882735bac..e0e7b049fc 100644 --- a/examples/configurations/drivers/ecobee.config +++ b/examples/configurations/drivers/ecobee.config @@ -1,13 +1,8 @@ { - "driver_config": {"ACCESS_TOKEN": "", - "API_KEY":"", - "REFRESH_TOKEN":"", - "AUTHORIZATION_CODE":"", - "PIN": "", - "DEVICE_ID": -1, - "GROUP_ID": "ecobee1", - "CACHE_IDENTITY": "platform.drivercache", - "config_name": "devices/ecobee"}, + "driver_config": { + "API_KEY": , + "DEVICE_ID": + }, "driver_type": "ecobee", "registry_config":"config://ecobee.csv", "interval": 180, diff --git a/examples/configurations/drivers/ecobee.csv b/examples/configurations/drivers/ecobee.csv index 6cd24b2d59..b8c5ca4b28 100644 --- a/examples/configurations/drivers/ecobee.csv +++ b/examples/configurations/drivers/ecobee.csv @@ -1,9 +1,56 @@ Point Name,Volttron Point Name,Units,Type,Writable,Readable,Default Value,Notes -fanMinOnTime,fanMinOnTime,seconds,setting,True,True,, +fanMinOnTime,fanMinOnTime,string,setting,True,True,, +fanControlRequired,FanControlRequired,bool,setting,True,True,, hvacMode,hvacMode,seconds,setting,True,True,, humidity,humidity,%,setting,False,True,, +humidifierMode,humidifierMode,On/Off,setting,True,True,, coolHoldTemp,coolHoldTemp,degF,hold,True,False,, heatHoldTemp,heatHoldTemp,degF,hold,True,False,, desiredCool,desiredCool,degF,hold,False,True,, desiredHeat,desiredHeat,degF,hold,False,True,, actualTemperature,actualTemperature,degF,hold,False,True,, +useCelsius,useCelsius,bool,setting,False,True,, +useTimeFormat12,useTimeFormat12,bool,setting,False,True,, +compressorProtectionMinTime,compressorProtectionMinTime,seconds,setting,True,True,, +compressorProtectionMinTemp,compressorProtectionMinTemp,degF,setting,True,True,, +stage1HeatingDifferentialTemp,stage1HeatingDifferentialTemp,degF,setting,True,True,, +stage1CoolingDifferentialTemp,stage1CoolingDifferentialTemp,degF,setting,True,True,, +stage1HeatingDissipationTime,stage1HeatingDissipationTime,seconds,setting,True,True,, +stage1CoolingDissipationTime,stage1CoolingDissipationTime,seconds,setting,True,True,, +heatPumpReversalOnCool,heatPumpReversalOnCool,bool,setting,True,True,, +heatCoolMinDelta,heatCoolMinDelta,degF,setting,True,True,, +tempCorrection,tempCorrection,degF,setting,True,True,, +holdAction,holdAction,string,setting,True,True,, +heatPumpGroundWater,heatPumpGroundWater,bool,setting,True,True,, +dehumidifierMode,dehumidifierMode,seconds,setting,True,True,, +dehumidifierLevel,dehumidifierLevel,%,setting,True,True,, +dehumidifyWithAC,dehumidifyWithAC,bool,setting,True,True,, +dehumidifyOvercoolOffset,dehumidifyOvercoolOffset,degF,setting,True,True,, +autoHeatCoolFeatureEnabled,autoHeatCoolFeatureEnabled,bool,setting,True,True,, +disablePreHeating,disablePreHeating,bool,setting,True,True,, +disablePreCooling,disablePreCooling,bool,setting,True,True,, +ventilatorMinOnTimeHome,ventilatorMinOnTimeHome,string,setting,True,True,, +ventilatorMinOnTimeAway,ventilatorMinOnTimeAway,string,setting,True,True,, +isVentilatorTimerOn,isVentilatorTimerOn,bool,setting,False,True,, +ventilatorOffDateTime,ventilatorOffDateTime,string,setting,True,True,, +coolingLockout,coolingLockout,bool,setting,True,True,, +ventilatorFreeCooling,ventilatorFreeCooling,bool,setting,True,True,, +dehumidifyWhenHeating,dehumidifyWhenHeating,bool,setting,True,True,, +ventilatorDehumidify,ventilatorDehumidify,bool,setting,True,True,, +actualHumidity,actualHumidity,%,hold,False,True,, +rawTemperature,rawTemperature,degF,hold,False,True,, +desiredHumidity,desiredHumidity,%,hold,False,True,, +desiredDehumidity,desiredDehumidity,%,hold,False,True,, +desiredFanMode,desiredFanMode,string,hold,False,True,, +desiredHeatRange,desiredHeatRange,range,hold,False,True,, +desiredCoolRange,desiredCoolRange,range,hold,False,True,, +vent,vent,On/Off,setting,True,True,, +ventilatorMinOnTime,ventMinOnTime,seconds,setting,True,True,, +eiLocation,location,string,setting,False,True,, +coolStages,coolStages,quantity,setting,False,True,, +heatStages,heatStages,quantity,setting,False,True,, +maxSetBack,maxSetBack,%,setting,True,True,, +maxSetForward,maxSetForward,%,setting,True,True,, +quickSaveSetBack,quickSaveSetBack,%,setting,True,True,, +quickSaveSetForward,quickSaveSetForward,%,setting,True,True,, +condensationAvoid,condensationAvoid,bool,setting,True,True,, diff --git a/pytest.ini b/pytest.ini index 8adc48ee4c..77a47127da 100644 --- a/pytest.ini +++ b/pytest.ini @@ -10,6 +10,7 @@ norecursedirs = markers = actuator: Tests for actuator agent actuator_pubsub: Test for actuator agent. + actuator_unit: Unit tests for actuator agent agent: Testing for core agent operations. alert: Testing alerts from the health subsystem. auth: Testing for auth based code. @@ -52,3 +53,5 @@ markers = rmq_shutdown: rabbitmq shutdown tests secure: Test platform and agents with secure platform options rpc: Tests for RPC + mysqlfuncts: level one integration tests for mysqlfuncts + \ No newline at end of file diff --git a/scripts/rabbit_dependencies.sh b/scripts/rabbit_dependencies.sh index 338cdcd818..0c08b9389b 100755 --- a/scripts/rabbit_dependencies.sh +++ b/scripts/rabbit_dependencies.sh @@ -73,10 +73,10 @@ function install_on_debian { ${prefix} apt-get install apt-transport-https libwxbase3.0-0v5 libwxgtk3.0-0v5 libsctp1 build-essential python-dev openssl libssl-dev libevent-dev git ${prefix} apt-get purge -yf erlang* # Add the signing key - wget -O- https://packages.erlang-solutions.com/ubuntu/erlang_solutions.asc | sudo apt-key add - + wget -O- https://packages.erlang-solutions.com/ubuntu/erlang_solutions.asc | ${prefix} apt-key add - if [[ ! -f "/etc/apt/sources.list.d/erlang.solutions.list" ]]; then - echo "deb https://packages.erlang-solutions.com/ubuntu $DIST contrib" | sudo tee /etc/apt/sources.list.d/erlang.solutions.list + echo "deb https://packages.erlang-solutions.com/ubuntu $DIST contrib" | ${prefix} tee /etc/apt/sources.list.d/erlang.solutions.list fi version=${erlang_package_version} diff --git a/services/core/ActuatorAgent/tests/actuator_fixtures.py b/services/core/ActuatorAgent/tests/actuator_fixtures.py new file mode 100644 index 0000000000..36f121f8a8 --- /dev/null +++ b/services/core/ActuatorAgent/tests/actuator_fixtures.py @@ -0,0 +1,92 @@ +# -*- coding: utf-8 -*- {{{ +# vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et: +# +# Copyright 2019, Battelle Memorial Institute. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# This material was prepared as an account of work sponsored by an agency of +# the United States Government. Neither the United States Government nor the +# United States Department of Energy, nor Battelle, nor any of their +# employees, nor any jurisdiction or organization that has cooperated in the +# development of these materials, makes any warranty, express or +# implied, or assumes any legal liability or responsibility for the accuracy, +# completeness, or usefulness or any information, apparatus, product, +# software, or process disclosed, or represents that its use would not infringe +# privately owned rights. Reference herein to any specific commercial product, +# process, or service by trade name, trademark, manufacturer, or otherwise +# does not necessarily constitute or imply its endorsement, recommendation, or +# favoring by the United States Government or any agency thereof, or +# Battelle Memorial Institute. The views and opinions of authors expressed +# herein do not necessarily state or reflect those of the +# United States Government or any agency thereof. +# +# PACIFIC NORTHWEST NATIONAL LABORATORY operated by +# BATTELLE for the UNITED STATES DEPARTMENT OF ENERGY +# under Contract DE-AC05-76RL01830 +# }}} + +import contextlib + +from mock import create_autospec + +from services.core.ActuatorAgent.actuator.agent import ActuatorAgent, ScheduleManager +from services.core.ActuatorAgent.actuator.scheduler import RequestResult + + +class MockedAsyncResult: + """ + This class is used to help mock Responses from the vip subsystem + """ + def __init__(self, result): + self.result = result + + def get(self): + return self.result + + +@contextlib.contextmanager +def get_actuator_agent(vip_identity: str = "fake_vip_identity", + vip_rpc_call_res: MockedAsyncResult = MockedAsyncResult("fake_result"), + vip_message_peer: str = None, + device_state: dict = {}, + slot_requests_res: RequestResult = RequestResult(True, {("agent-id-1", "task-id-1")}, ""), + cancel_schedule_result: RequestResult = None): + """ + Creates an Actuator agent and mocks all required dependencies for unit testing + :param vip_identity: the identity of the Agent's Subsystem + :param vip_rpc_call_res: the response returned when calling a method of the Agent's Subsystem + :param vip_message_peer: the identity of the Agent's VIP, which is used internally + :param device_state: a mapping between a path and a DeviceState; this is an protected field of the Agent + :param slot_requests_res: the response returned when calling request_slots method of Agent's Schedule Manager + :param cancel_schedule_result: the response retunred when callin cancel_task method of Agent's Schedule Manaager + :return: + """ + ActuatorAgent.core.identity = "fake_core_identity" + actuator_agent = ActuatorAgent() + if vip_identity is not None: + actuator_agent.driver_vip_identity = vip_identity + actuator_agent.vip.rpc.call.return_value = vip_rpc_call_res + actuator_agent.vip.rpc.context.vip_message.peer = vip_message_peer + actuator_agent._device_states = device_state + actuator_agent._schedule_manager = create_autospec(ScheduleManager) + actuator_agent._schedule_manager.request_slots.return_value = slot_requests_res + actuator_agent._schedule_manager.get_next_event_time.return_value = None + actuator_agent._schedule_manager.cancel_task.return_value = cancel_schedule_result + actuator_agent._schedule_manager.get_schedule_state.return_value = {} + actuator_agent.core.schedule.return_value = None + + try: + yield actuator_agent + finally: + actuator_agent.vip.reset_mock() diff --git a/services/core/ActuatorAgent/tests/test_actuator_pubsub_unit.py b/services/core/ActuatorAgent/tests/test_actuator_pubsub_unit.py new file mode 100644 index 0000000000..88f5dbfceb --- /dev/null +++ b/services/core/ActuatorAgent/tests/test_actuator_pubsub_unit.py @@ -0,0 +1,284 @@ +# -*- coding: utf-8 -*- {{{ +# vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et: +# +# Copyright 2019, Battelle Memorial Institute. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# This material was prepared as an account of work sponsored by an agency of +# the United States Government. Neither the United States Government nor the +# United States Department of Energy, nor Battelle, nor any of their +# employees, nor any jurisdiction or organization that has cooperated in the +# development of these materials, makes any warranty, express or +# implied, or assumes any legal liability or responsibility for the accuracy, +# completeness, or usefulness or any information, apparatus, product, +# software, or process disclosed, or represents that its use would not infringe +# privately owned rights. Reference herein to any specific commercial product, +# process, or service by trade name, trademark, manufacturer, or otherwise +# does not necessarily constitute or imply its endorsement, recommendation, or +# favoring by the United States Government or any agency thereof, or +# Battelle Memorial Institute. The views and opinions of authors expressed +# herein do not necessarily state or reflect those of the +# United States Government or any agency thereof. +# +# PACIFIC NORTHWEST NATIONAL LABORATORY operated by +# BATTELLE for the UNITED STATES DEPARTMENT OF ENERGY +# under Contract DE-AC05-76RL01830 +# }}} + +import logging +from datetime import datetime, timedelta +from dateutil.tz import tzutc + +import pytest + +from services.core.ActuatorAgent.actuator import agent +from services.core.ActuatorAgent.actuator.agent import ActuatorAgent +from services.core.ActuatorAgent.actuator.scheduler import RequestResult, DeviceState +from services.core.ActuatorAgent.tests.actuator_fixtures import MockedAsyncResult, get_actuator_agent +from volttrontesting.utils.utils import AgentMock +from volttron.platform.vip.agent import Agent + + +PEER = "peer-1" +SENDER = "sender-1" +HEADERS = {"requesterID": "id-12345"} +MESSAGE = "message-1" +BUS = "bus-1" +GET_TOPIC = "devices/actuators/get/somepath/actuationpoint" +SET_TOPIC = "devices/actuators/set/somepath/actuationpoint" +REQUEST_TOPIC = "devices/actuators/schedule/request" +REVERT_DEVICE_TOPIC = "devices/actuators/revert/device/somedevicepath" +REVERT_POINT_TOPIC = "actuators/revert/point/somedevicepath/someactuationpoint" + +agent._log = logging.getLogger("test_logger") +ActuatorAgent.__bases__ = (AgentMock.imitate(Agent, Agent()),) + + +@pytest.mark.actuator_unit +def test_handle_get_should_succeed(): + with get_actuator_agent() as actuator_agent: + actuator_agent.handle_get(PEER, SENDER, BUS, GET_TOPIC, HEADERS, MESSAGE) + + actuator_agent.vip.rpc.call.assert_called_once() + actuator_agent.vip.pubsub.publish.assert_called_once() + + +@pytest.mark.actuator_unit +def test_handle_get_should_handle_standard_error(caplog): + with get_actuator_agent(vip_identity=None) as actuator_agent: + actuator_agent.handle_get(PEER, SENDER, BUS, GET_TOPIC, HEADERS, MESSAGE) + + actuator_agent.vip.rpc.call.assert_not_called() + actuator_agent.vip.pubsub.publish.assert_called_once() + assert ( + caplog.records[-1].message + == "Actuator Agent Error: {'type': 'AttributeError', " + "'value': \"'ActuatorAgent' object has no attribute 'driver_vip_identity'\"}" + ) + + +@pytest.mark.actuator_unit +@pytest.mark.parametrize( + "sender, device_state", + [ + ( + SENDER, + {"somepath": DeviceState("sender-1", "task-id-1", "anytime")}, + ), + ( + "pubsub.compat", + {"somepath": DeviceState("pubsub.compat", "task-id-1", "anytime")}, + ), + ], +) +def test_handle_set_should_succeed(sender, device_state): + with get_actuator_agent(vip_rpc_call_res=MockedAsyncResult({"foo": "bar"}), device_state=device_state) as actuator_agent: + + actuator_agent.handle_set(PEER, sender, BUS, SET_TOPIC, HEADERS, MESSAGE) + + actuator_agent.vip.rpc.call.assert_called_once() + actuator_agent.vip.pubsub.publish.assert_called() + + +@pytest.mark.actuator_unit +def test_handle_set_should_return_none_on_none_message(caplog): + with get_actuator_agent(vip_identity=None) as actuator_agent: + result = actuator_agent.handle_set(PEER, SENDER, BUS, SET_TOPIC, HEADERS, None) + + assert result is None + actuator_agent.vip.pubsub.publish.assert_called_once() + actuator_agent.vip.rpc.call.assert_not_called() + assert ( + caplog.records[-1].message + == "ValueError: {'type': 'ValueError', 'value': 'missing argument'}" + ) + + +@pytest.mark.actuator_unit +def test_handle_set_should_handle_type_error_on_invalid_sender(caplog): + with get_actuator_agent(vip_identity=None) as actuator_agent: + actuator_agent.handle_set(PEER, None, BUS, SET_TOPIC, HEADERS, MESSAGE) + + actuator_agent.vip.rpc.call.assert_not_called() + actuator_agent.vip.pubsub.publish.assert_called_once() + assert ( + caplog.records[-1].message == "Actuator Agent Error: {'type': 'TypeError', " + "'value': 'Agent id must be a nonempty string'}" + ) + + +@pytest.mark.actuator_unit +def test_handle_set_should_handle_lock_error(caplog): + with get_actuator_agent(vip_identity=None) as actuator_agent: + actuator_agent.handle_set(PEER, SENDER, BUS, SET_TOPIC, HEADERS, MESSAGE) + + actuator_agent.vip.rpc.call.assert_not_called() + actuator_agent.vip.pubsub.publish.assert_called_once() + assert ( + caplog.records[-1].message == "Actuator Agent Error: {'type': 'LockError', " + "'value': 'caller (sender-1) does not have this lock'}" + ) + + +@pytest.mark.actuator_unit +def test_handle_revert_point_should_succeed(): + device_state = { + "actuators/revert/point/somedevicepath": DeviceState( + "sender-1", "task-id-1", "anytime" + ) + } + + with get_actuator_agent(device_state=device_state, vip_rpc_call_res=MockedAsyncResult({"foo": "bar"})) as actuator_agent: + actuator_agent.handle_revert_point( + PEER, SENDER, BUS, REVERT_POINT_TOPIC, HEADERS, MESSAGE + ) + + actuator_agent.vip.rpc.call.assert_called_once() + actuator_agent.vip.pubsub.publish.assert_called_once() + + +@pytest.mark.actuator_unit +def test_handle_revert_point_should_handle_lock_error(caplog): + with get_actuator_agent(vip_identity=None) as actuator_agent: + actuator_agent.handle_revert_point( + PEER, SENDER, BUS, REVERT_POINT_TOPIC, HEADERS, MESSAGE + ) + + actuator_agent.vip.rpc.call.assert_not_called() + actuator_agent.vip.pubsub.publish.assert_called_once() + assert ( + caplog.records[-1].message == "Actuator Agent Error: {'type': 'LockError', " + "'value': 'caller does not have this lock'}" + ) + + +@pytest.mark.actuator_unit +def test_handle_revert_device_should_succeed(): + device_state = { + "somedevicepath": DeviceState("sender-1", "task-id-1", "anytime") + } + + with get_actuator_agent(device_state=device_state, + vip_rpc_call_res=MockedAsyncResult({"foo": "bar"})) as actuator_agent: + actuator_agent.handle_revert_device( + PEER, SENDER, BUS, REVERT_DEVICE_TOPIC, HEADERS, MESSAGE + ) + + actuator_agent.vip.rpc.call.assert_called_once() + actuator_agent.vip.pubsub.publish.assert_called_once() + + +@pytest.mark.actuator_unit +def test_handle_revert_device_should_handle_lock_error(caplog): + with get_actuator_agent(vip_identity=None) as actuator_agent: + actuator_agent.handle_revert_device( + PEER, SENDER, BUS, REVERT_DEVICE_TOPIC, HEADERS, MESSAGE + ) + + actuator_agent.vip.rpc.call.assert_not_called() + actuator_agent.vip.pubsub.publish.assert_called_once() + assert ( + caplog.records[-1].message == "Actuator Agent Error: {'type': 'LockError', " + "'value': 'caller does not have this lock'}" + ) + + +@pytest.mark.actuator_unit +@pytest.mark.parametrize( + "priority, success", + [ + ("HIGH", True), + ("LOW", True), + ("LOW_PREEMPT", True), + ("HIGH", False), + ("LOW", False), + ("LOW_PREEMPT", False), + ], +) +def test_handle_schedule_request_should_succeed_on_new_schedule_request_type( + priority, success +): + headers = { + "type": "NEW_SCHEDULE", + "requesterID": "id-123", + "taskID": "12345", + "priority": priority, + } + + with get_actuator_agent(slot_requests_res=RequestResult(success, {}, "")) as actuator_agent: + actuator_agent.handle_schedule_request( + PEER, SENDER, BUS, REQUEST_TOPIC, headers, create_message() + ) + + actuator_agent.vip.pubsub.publish.assert_called() + + +@pytest.mark.actuator_unit +@pytest.mark.parametrize("success", [True, False]) +def test_handle_schedule_request_should_succeed_on_cancel_schedule_request_type(success): + headers = {"type": "CANCEL_SCHEDULE", "requesterID": "id-123", "taskID": "12345"} + + with get_actuator_agent(slot_requests_res=RequestResult(success, {}, "")) as actuator_agent: + actuator_agent.handle_schedule_request( + PEER, SENDER, BUS, REQUEST_TOPIC, headers, create_message() + ) + + actuator_agent.vip.pubsub.publish.assert_called() + + +@pytest.mark.actuator_unit +@pytest.mark.parametrize("invalid_request_type", ["bad request type", None]) +def test_handle_schedule_request_should_log_invalid_request_type( + invalid_request_type, caplog +): + headers = { + "type": invalid_request_type, + "requesterID": "id-123", + "taskID": "12345", + "priority": "HIGH", + } + + with get_actuator_agent(vip_identity=None) as actuator_agent: + actuator_agent.handle_schedule_request( + PEER, SENDER, BUS, REQUEST_TOPIC, headers, create_message() + ) + + actuator_agent.vip.pubsub.publish.assert_called() + assert caplog.records[-1].message == "handle-schedule_request, invalid request type" + + +def create_message(): + start = str(datetime.now(tz=tzutc()) + timedelta(seconds=10)) + end = str(datetime.now(tz=tzutc()) + timedelta(seconds=20)) + return ["campus/building/device1", start, end] diff --git a/services/core/ActuatorAgent/tests/test_actuator_rpc_unit.py b/services/core/ActuatorAgent/tests/test_actuator_rpc_unit.py new file mode 100644 index 0000000000..3a33ee4654 --- /dev/null +++ b/services/core/ActuatorAgent/tests/test_actuator_rpc_unit.py @@ -0,0 +1,398 @@ +# -*- coding: utf-8 -*- {{{ +# vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et: +# +# Copyright 2019, Battelle Memorial Institute. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# This material was prepared as an account of work sponsored by an agency of +# the United States Government. Neither the United States Government nor the +# United States Department of Energy, nor Battelle, nor any of their +# employees, nor any jurisdiction or organization that has cooperated in the +# development of these materials, makes any warranty, express or +# implied, or assumes any legal liability or responsibility for the accuracy, +# completeness, or usefulness or any information, apparatus, product, +# software, or process disclosed, or represents that its use would not infringe +# privately owned rights. Reference herein to any specific commercial product, +# process, or service by trade name, trademark, manufacturer, or otherwise +# does not necessarily constitute or imply its endorsement, recommendation, or +# favoring by the United States Government or any agency thereof, or +# Battelle Memorial Institute. The views and opinions of authors expressed +# herein do not necessarily state or reflect those of the +# United States Government or any agency thereof. +# +# PACIFIC NORTHWEST NATIONAL LABORATORY operated by +# BATTELLE for the UNITED STATES DEPARTMENT OF ENERGY +# under Contract DE-AC05-76RL01830 +# }}} + +""" +Unit test cases for testing actuator agent using rpc calls. +""" +import logging +from datetime import datetime, timedelta + +import pytest + +from services.core.ActuatorAgent.actuator import agent +from services.core.ActuatorAgent.actuator.agent import ActuatorAgent, LockError +from services.core.ActuatorAgent.actuator.scheduler import RequestResult, DeviceState +from services.core.ActuatorAgent.tests.actuator_fixtures import MockedAsyncResult, \ + get_actuator_agent +from volttrontesting.utils.utils import AgentMock +from volttron.platform.vip.agent import Agent + + +PRIORITY_LOW = "LOW" +SUCCESS = "SUCCESS" +FAILURE = "FAILURE" +REQUESTER_ID = "foo" +TASK_ID = "task-id" +TIME_SLOT_REQUESTS = [ + ["fakedriver0", str(datetime.now()), str(datetime.now() + timedelta(seconds=1))] +] + +agent._log = logging.getLogger("test_logger") +ActuatorAgent.__bases__ = (AgentMock.imitate(Agent, Agent()),) + + +@pytest.mark.actuator_unit +@pytest.mark.parametrize("topic, point", [("path/topic", None), ("another/path/to/topic", 42)]) +def test_get_point_should_succeed(topic, point): + with get_actuator_agent(vip_rpc_call_res=MockedAsyncResult(10.0)) as actuator_agent: + result = actuator_agent.get_point(topic, point=point) + + actuator_agent.vip.rpc.call.assert_called_once() + assert result is not None + + +@pytest.mark.actuator_unit +@pytest.mark.parametrize( + "point, device_state", + [ + ( + 42, + {"foo/bar": DeviceState("requester-id-1", "task-id-1", "anytime")}, + ), + ( + None, + {"foo": DeviceState("requester-id-1", "task-id-1", "anytime")}), + ], +) +def test_set_point_should_succeed(point, device_state): + requester_id = "requester-id-1" + topic = "foo/bar" + value = "some value" + + with get_actuator_agent(vip_message_peer=requester_id, device_state=device_state) as \ + actuator_agent: + result = actuator_agent.set_point(requester_id, topic, value, point=point) + + assert result is not None + + +@pytest.mark.actuator_unit +@pytest.mark.parametrize("rpc_peer", [None, 42, []]) +def test_set_point_should_raise_type_error(rpc_peer): + with pytest.raises(TypeError, match="Agent id must be a nonempty string"): + requester_id = "requester-id-1" + topic = "foo/bar" + value = "some value" + point = None + + with get_actuator_agent(vip_message_peer=rpc_peer) as actuator_agent: + actuator_agent.set_point(requester_id, topic, value, point=point) + + +@pytest.mark.actuator_unit +def test_set_point_should_raise_lock_error_on_non_matching_device(): + with pytest.raises(LockError): + requester_id = "requester-id-1" + topic = "foo/bar" + value = "some value" + + with get_actuator_agent(vip_message_peer="some rpc_peer") as actuator_agent: + actuator_agent.set_point(requester_id, topic, value) + + +@pytest.mark.actuator_unit +def test_scrape_all_should_succeed(): + with get_actuator_agent(vip_rpc_call_res=MockedAsyncResult({})) as actuator_agent: + topic = "whan/that/aprille" + + result = actuator_agent.scrape_all(topic) + + assert isinstance(result, dict) + + + +@pytest.mark.actuator_unit +@pytest.mark.parametrize( + "topics", + [ + ["foo/bar"], + ["foo/bar", "sna/foo"], + [["dev1", "point1"]], + [["dev1", "point1"], ["dev2", "point2"]], + ], +) +def test_get_multiple_points_should_succeed(topics): + mocked_rpc_call_res = MockedAsyncResult(({"result": "value"}, {})) + with get_actuator_agent(vip_rpc_call_res=mocked_rpc_call_res) as actuator_agent: + results, errors = actuator_agent.get_multiple_points(topics) + + assert isinstance(results, dict) + assert isinstance(errors, dict) + assert len(errors) == 0 + + +@pytest.mark.actuator_unit +@pytest.mark.parametrize("invalid_topics", [[(123,)], [(None)], [[123]], [[None]]]) +def test_get_multiple_points_should_return_errors(invalid_topics): + with get_actuator_agent() as actuator_agent: + + results, errors = actuator_agent.get_multiple_points(invalid_topics) + + assert isinstance(results, dict) + assert isinstance(errors, dict) + assert len(errors) == 1 + + +@pytest.mark.actuator_unit +@pytest.mark.parametrize( + "topic_values, device_state", + [ + ([], {}), + ( + [("foo/bar", "roma_value")], + {"foo": DeviceState("requester-id-1", "task-id-1", "anytime")}, + ), + ( + [("foo/bar", "roma_value"), ("sna/fu", "amor_value")], + { + "foo": DeviceState("requester-id-1", "task-id-1", "anytime"), + "sna": DeviceState("requester-id-1", "task-id-1", "anytime"), + }, + ), + ], +) +@pytest.mark.actuator_unit +def test_set_multiple_points_should_succeed(topic_values, device_state): + requester_id = "requester-id-1" + mocked_rpc_call_res = MockedAsyncResult(({})) + with get_actuator_agent(vip_message_peer=requester_id, device_state=device_state, + vip_rpc_call_res=mocked_rpc_call_res) as actuator_agent: + result = actuator_agent.set_multiple_points("request-id-1", topic_values) + + assert result == {} + + +@pytest.mark.actuator_unit +@pytest.mark.parametrize("invalid_topic_values", [[(None,)], [(1234,)]]) +def test_set_multiple_points_should_raise_value_error(invalid_topic_values): + with pytest.raises(ValueError): + requester_id = "requester-id-1" + + with get_actuator_agent(vip_message_peer=requester_id) as actuator_agent: + actuator_agent.set_multiple_points("request-id-1", invalid_topic_values) + + +@pytest.mark.actuator_unit +def test_set_multiple_points_should_raise_lock_error_on_empty_devices(): + with pytest.raises(LockError): + requester_id = "requester-id-1" + topic_values = [("foo/bar", "roma_value")] + + with get_actuator_agent(vip_message_peer=requester_id) as actuator_agent: + actuator_agent.set_multiple_points("request-id-1", topic_values) + + +@pytest.mark.actuator_unit +def test_set_multiple_points_should_raise_lock_error_on_non_matching_requester(): + with pytest.raises(LockError): + requester_id = "wrong-requester" + topic_values = [("foo/bar", "roma_value")] + device_state = { + "foo": DeviceState("requester-id-1", "task-id-1", "anytime") + } + + with get_actuator_agent(vip_message_peer=requester_id, device_state=device_state) \ + as actuator_agent: + actuator_agent.set_multiple_points("request-id-1", topic_values) + + +@pytest.mark.actuator_unit +@pytest.mark.parametrize("point", [None, "foobarpoint"]) +def test_revert_point_should_raise_lock_error_on_empty_devices(point): + with pytest.raises(LockError): + requester_id = "request-id-1" + topic = "foo/bar" + + with get_actuator_agent(vip_message_peer="requester-id-1") as actuator_agent: + actuator_agent.revert_point(requester_id, topic, point=point) + + +@pytest.mark.actuator_unit +@pytest.mark.parametrize("point", [None, "foobarpoint"]) +def test_revert_point_should_raise_lock_error_on_non_matching_requester(point): + with pytest.raises(LockError): + device_state = { + "foo": DeviceState("requester-id-1", "task-id-1", "anytime") + } + requester_id = "request-id-1" + topic = "foo/bar" + + with get_actuator_agent(vip_message_peer="wrong-requester", device_state=device_state) \ + as actuator_agent: + actuator_agent.revert_point(requester_id, topic, point=point) + + +@pytest.mark.actuator_unit +def test_revert_device_should_raise_lock_error_on_empty_devices(): + with pytest.raises(LockError): + requester_id = "request-id-1" + topic = "foo/bar" + + with get_actuator_agent(vip_message_peer="requester-id-1") as actuator_agent: + actuator_agent.revert_device(requester_id, topic) + + +@pytest.mark.actuator_unit +def test_revert_device_should_raise_lock_error_on_non_matching_requester(): + with pytest.raises(LockError): + device_state = { + "foo/bar": DeviceState("requester-id-1", "task-id-1", "anytime") + } + requester_id = "request-id-1" + topic = "foo/bar" + + with get_actuator_agent(vip_message_peer="wrong-requester", device_state=device_state) \ + as actuator_agent: + actuator_agent.revert_device(requester_id, topic) + + +@pytest.mark.actuator_unit +def test_request_new_schedule_should_succeed(): + with get_actuator_agent() as actuator_agent: + result = actuator_agent.request_new_schedule(REQUESTER_ID, TASK_ID, + PRIORITY_LOW, TIME_SLOT_REQUESTS) + + assert result["result"] == SUCCESS + + +@pytest.mark.actuator_unit +def test_request_new_schedule_should_succeed_when_stop_start_times_overlap(): + start = str(datetime.now()) + end = str(datetime.now() + timedelta(seconds=1)) + end2 = str(datetime.now() + timedelta(seconds=2)) + time_slot_requests = [["fakedriver0", start, end], ["fakedriver0", end, end2]] + + with get_actuator_agent() as actuator_agent: + result = actuator_agent.request_new_schedule(REQUESTER_ID, TASK_ID, + PRIORITY_LOW, time_slot_requests) + + assert result["result"] == SUCCESS + + +@pytest.mark.actuator_unit +@pytest.mark.parametrize( + "task_id, expected_info", + [ + (1234, "MALFORMED_REQUEST: TypeError: taskid must be a nonempty string"), + ("", "MALFORMED_REQUEST: TypeError: taskid must be a nonempty string"), + (None, "MISSING_TASK_ID"), + ("task-id-duplicate", "TASK_ID_ALREADY_EXISTS"), + ], +) +def test_request_new_schedule_should_fail_on_invalid_taskid(task_id, expected_info): + false_request_result = RequestResult(False, {}, expected_info) + + with get_actuator_agent(slot_requests_res=false_request_result) as actuator_agent: + result = actuator_agent.request_new_schedule(REQUESTER_ID, task_id, + PRIORITY_LOW, TIME_SLOT_REQUESTS) + + assert result["result"] == FAILURE + assert result["info"] == expected_info + + +@pytest.mark.actuator_unit +@pytest.mark.parametrize( + "invalid_priority, expected_info", + [("LOW2", "INVALID_PRIORITY"), (None, "MISSING_PRIORITY")], +) +def test_request_new_schedule_should_fail_on_invalid_priority(invalid_priority, expected_info): + false_request_result = RequestResult(False, {}, expected_info) + + with get_actuator_agent(slot_requests_res=false_request_result) as actuator_agent: + result = actuator_agent.request_new_schedule(REQUESTER_ID, TASK_ID, + invalid_priority, TIME_SLOT_REQUESTS) + + assert result["result"] == FAILURE + assert result["info"] == expected_info + + +@pytest.mark.actuator_unit +@pytest.mark.parametrize( + "time_slot_request, expected_info", + [ + ( + [], + "MALFORMED_REQUEST_EMPTY"), + ( + [["fakedriver0", str(datetime.now()), ""]], + "MALFORMED_REQUEST: ParserError: String does not contain a date: ", + ), + ( + [["fakedriver0", str(datetime.now())]], + "MALFORMED_REQUEST: ValueError: " + "not enough values to unpack (expected 3, got 2)", + ), + ], +) +def test_request_new_schedule_should_fail_invalid_time_slot_requests(time_slot_request, + expected_info): + false_request_result = RequestResult(False, {}, expected_info) + + with get_actuator_agent(slot_requests_res=false_request_result) as actuator_agent: + result = actuator_agent.request_new_schedule( + REQUESTER_ID, TASK_ID, PRIORITY_LOW, time_slot_request + ) + + assert result["result"] == FAILURE + assert result["info"] == expected_info + + +@pytest.mark.actuator_unit +def test_request_cancel_schedule_should_succeed_happy_path(): + true_request_result = RequestResult( + True, {}, "" + ) + + with get_actuator_agent(cancel_schedule_result=true_request_result) as actuator_agent: + result = actuator_agent.request_cancel_schedule(REQUESTER_ID, TASK_ID) + + assert result["result"] == SUCCESS + + +@pytest.mark.actuator_unit +def test_request_cancel_schedule_should_fail_on_invalid_task_id(): + false_request_result = RequestResult( + False, {}, "TASK_ID_DOES_NOT_EXIST" + ) + invalid_task_id = "invalid-task-id" + + with get_actuator_agent(cancel_schedule_result=false_request_result) as actuator_agent: + result = actuator_agent.request_cancel_schedule(REQUESTER_ID, invalid_task_id) + + assert result["result"] == FAILURE + assert result["info"] == "TASK_ID_DOES_NOT_EXIST" diff --git a/services/core/DriverHTTPCache/README.md b/services/core/DriverHTTPCache/README.md deleted file mode 100644 index 9117412f02..0000000000 --- a/services/core/DriverHTTPCache/README.md +++ /dev/null @@ -1,96 +0,0 @@ -Driver HTTP Cache Agent -======================= - -The Driver HTTP Cache agent is a simple agent designed to fetch data from remote APIs and cache it for a duration -specified by the API. It requires no configuration to operate. JSON RPC calls exist for drivers to use to obtain data -from remote web APIs via HTTP requests. Request results are then cached by the agent. The RPC calls include an update -frequency parameter which is used to specify the data update interval of the remote API end point. If a new RPC call -is made between the timestamp of the previous request and that timestamp plus the update interval, cached data will be -returned. If an RPC call is made after the update interval has passed, then a new request HTTP request will be sent to -the remote API. - -Example GET request RPC call: - - def driver_data_get(self, driver_type, group_id, url, headers, params=None, body=None, - update_frequency=60, refresh=False): - - """ - Get the most up to date remote API driver data based on provided update - frequency using HTTP GET request - :param group_id: arbitrary identifier to separate driver data between - collections of devices - :param driver_type: String representation of the type of driver - :param url: String url for communicating with remote API - :param update_frequency: Frequency in seconds between remote API data - updates, defaults to 60 - :param headers: HTTP request headers dictionary for remote API specified by - driver - :param params: HTTP request parameters dictionary for remote API specified - by driver - :param body: HTTP request body dictionary for remote API specified by driver - :param refresh: If true, the Driver HTTP Cache agent will skip retrieving - cached data - :return: Remote API response data dictionary to be parsed by driver` - """ - -Example POST request RPC call: - - def driver_data_post(self, driver_type, group_id, url, headers, data=None, json=None, - update_frequency=60, refresh=False): - - """ - Get the most up to date remote API driver data based on provided update - frequency using HTTP POST request - :param group_id: arbitrary identifier to separate driver data between - collections of devices - :param driver_type: String representation of the type of driver - :param url: String url for communicating with remote API - :param update_frequency: Frequency in seconds between remote API data - updates, defaults to 60 - :param headers: HTTP request headers dictionary for remote API specified by - driver - :param data: HTTP request parameters dictionary for remote API specified - by driver - :param json: HTTP request body dictionary for remote API specified by driver - :param refresh: If true, Driver HTTP Cache agent will skip retrieving cached - data - :return: Remote API response data dictionary to be parsed by driver` - """ - -Usage example from driver: - - data = self.vip.rpc.call(, "driver_data_get", , - , url, headers, update_frequency=180, params=params, - refresh=refresh).get() - -The headers and params values are expected to be dictionaries to use as the -request headers and request data as if making an HTTP request. The update -frequency is specified by the calling agent (in this case the Ecobee driver) and -should reflect the amount of time between remote API updates (Ecobee updates the -data available from their API every 3 minutes). - -Installation ------------- - -These are the most basic installation steps for the Driver HTTP Cache agent. This guide -assumes the user is in the VOLTTRON_ROOT directory, and the VOLTTRON platform has -been installed and bootstrapped per the instructions in the VOLTTRON README. - - 1. If the platform has not been started: - - ./start-volttron - - 2. If the environment has not been activated - you should see (volttron) next to @ in your terminal window - - . env/bin/activate - - 3. Install the agent - - python scripts/install-agent.py -s services/core/DriverHTTPCache -i - - 4. Start the agent - - vctl start - -At this point the agent should be running and ready for driver RPC calls to a -remote API. diff --git a/services/core/DriverHTTPCache/conftest.py b/services/core/DriverHTTPCache/conftest.py deleted file mode 100644 index 68e5e611b1..0000000000 --- a/services/core/DriverHTTPCache/conftest.py +++ /dev/null @@ -1,6 +0,0 @@ -import sys - -from volttrontesting.fixtures.volttron_platform_fixtures import * - -# Add system path of the agent's directory -sys.path.insert(0, os.path.abspath(os.path.dirname(__file__))) diff --git a/services/core/DriverHTTPCache/driver_http_cache/agent.py b/services/core/DriverHTTPCache/driver_http_cache/agent.py deleted file mode 100644 index c54b90cb70..0000000000 --- a/services/core/DriverHTTPCache/driver_http_cache/agent.py +++ /dev/null @@ -1,253 +0,0 @@ -""" -Agent documentation goes here. -""" - -__docformat__ = 'reStructuredText' - -import datetime -import grequests -import json -import logging -import os -import requests -import sys -from volttron.platform import jsonapi -from volttron.platform.agent import utils -from volttron.platform.vip.agent import Agent, RPC - -_log = logging.getLogger(__name__) -utils.setup_logging() -__version__ = "0.1" - - -def driver_http_cache(config_path, **kwargs): - """Parses the Agent configuration and returns an instance of the agent created using that configuration. - :param config_path: Path to a configuration file. - :type config_path: str - :returns: DriverHTTPCache agent instance - :rtype: DriverHTTPCache - """ - try: - config = utils.load_config(config_path) - except Exception: - config = {} - - return DriverHTTPCache(**kwargs) - - -class DriverHTTPCache(Agent): - """ - Document for retrieving remote API driver data and caching it during it's update period - """ - - def __init__(self, **kwargs): - super(DriverHTTPCache, self).__init__(**kwargs) - self.default_config = {} - # Set a default configuration to ensure that self.configure is called immediately to setup the agent. - self.vip.config.set_default("config", self.default_config) - # Hook self.configure up to changes to the configuration file "config". - self.vip.config.subscribe(self.configure, actions=["NEW", "UPDATE"], pattern="config") - - def configure(self, config_name, action, contents): - """ - Set agent configuration from config store - :param config_name: Unused configuration name string - :param action: Unused configuration action - :param contents: Configuration store contents dictionary - """ - config = self.default_config.copy() - config.update(contents) - - @RPC.export - def get_version(self): - """ - :return: Agent version - """ - return __version__ - - - @RPC.export - def driver_data_get(self, driver_type, group_id, url, headers, params=None, body=None, - update_frequency=60, refresh=False): - """ - Get the most up to date remote API driver data based on provided update frequency - :param group_id: arbitrary identifier to separate driver data between collections of devices - :param driver_type: String representation of the type of driver - :param url: String url for communicating with remote API - :param update_frequency: Frequency in seconds between remote API data updates, defaults to 60 - :param headers: HTTP request headers dictionary for remote API specified by driver - :param params: HTTP request parameters dictionary for remote API specified by driver - :param body: HTTP request body dictionary for remote API specified by driver - :param refresh: If true, Driver HTTP Cache agent will skip retrieving cached data - :return: Remote API response data dictionary to be parsed by driver - """ - return self.get_driver_data(driver_type, group_id, "GET", url, headers, params=params, body=body, - update_frequency=update_frequency, refresh=refresh) - - @RPC.export - def driver_data_post(self, driver_type, group_id, url, headers, params=None, body=None, - update_frequency=60, refresh=False): - """ - Post the updated data using remote API - :param group_id: arbitrary identifier to separate driver data between collections of devices - :param driver_type: String representation of the type of driver - :param url: String url for communicating with remote API - :param update_frequency: Frequency in seconds between remote API data updates, defaults to 60 - :param headers: HTTP request headers dictionary for remote API specified by driver - :param params: HTTP request parameters dictionary for remote API specified by driver - :param body: HTTP request body dictionary for remote API specified by driver - :param refresh: If true, Driver HTTP Cache agent will skip retrieving cached data - :return: Remote API response data dictionary to be parsed by driver - """ - return self.get_driver_data(driver_type, group_id, "POST", url, headers, params=params, body=body, - update_frequency=update_frequency, refresh=refresh) - - def get_driver_data(self, driver_type, group_id, request_type, url, headers, params=None, body=None, - update_frequency=60, refresh=False): - """ - Get the most up to date remote API driver data based on provided update frequency - :param group_id: arbitrary identifier to separate driver data between collections of devices - :param request_type: HTTP request type for communicating with remote API - :param driver_type: String representation of the type of driver - :param url: String url for communicating with remote API - :param update_frequency: Frequency in seconds between remote API data updates, defaults to 60 - :param headers: HTTP request headers dictionary for remote API specified by driver - :param params: HTTP request parameters dictionary for remote API specified by driver - :param body: HTTP request body dictionary for remote API specified by driver - :param refresh: If true, Driver HTTP Cache agent will skip retrieving cached data - :return: Remote API response data dictionary to be parsed by driver - """ - # Input validation - if not isinstance(driver_type, str): - raise ValueError("Invalid driver type: {}, expected unique string".format(driver_type)) - if not isinstance(group_id, str): - raise ValueError("Invalid driver group ID: {}, expected unique string".format(group_id)) - if not isinstance(update_frequency, int): - raise ValueError("Invalid update frequency: {}, expected seconds".format(update_frequency)) - if not isinstance(headers, str): - raise ValueError("Invalid request headers: {}, expected json string".format(headers)) - if headers and isinstance(headers, str): - headers = json.loads(headers) - if params and isinstance(params, str): - params = json.loads(params) - if body and isinstance(body, str): - body = json.loads(body) - # Override if "fresh" data requested by driver - if refresh: - request_data = self._get_json_request( - driver_type, group_id, request_type, url, headers, params=params, body=body) - return request_data - else: - # try to get recently cached data - will throw exception if the dat is out of date - try: - return self._get_json_cache(driver_type, group_id, request_type, update_frequency) - # if no recently cached data is available, request data from remote API based on provided parameters - except RuntimeError as re: - request_data = self._get_json_request( - driver_type, group_id, request_type, url, headers, params=params, body=body) - return request_data - - def _get_json_cache(self, driver_type, group_id, request_type, update_frequency): - """ - - :param driver_type: String representation of the type of driver - :param group_id: arbitrary identifier to separate driver data between collections of devices - :param request_type: HTTP request type for communicating with remote API - used here for input validation - :param update_frequency: Frequency in seconds between remote API data updates, defaults to 60 - :return: Remote API response data dictionary from cache to be parsed by driver - """ - if request_type.upper() not in ["POST", "GET"]: - raise ValueError("Unsupported request type for Driver HTTP Cache Agent: {}".format(request_type)) - data_path = "{}_{}.json".format(driver_type, group_id) - update_delta = datetime.timedelta(seconds=update_frequency) - if not os.path.isfile(data_path): - raise RuntimeError("Data file for driver {}, id {} not found".format(driver_type, group_id)) - else: - _log.debug("Checking cache at: {}".format(data_path)) - with open(data_path) as data_file: - json_data = json.load(data_file) - request_timestamp = utils.parse_timestamp_string(json_data.get("request_timestamp")) - next_update_timestamp = request_timestamp + update_delta - if next_update_timestamp < datetime.datetime.now(): - raise RuntimeError("Request timestamp out of date, send new request") - else: - return json_data - - def _get_json_request(self, driver_type, group_id, request_type, url, headers, params=None, body=None): - """ - - :param group_id: arbitrary identifier to separate driver data between collections of devices - :param request_type: String representation of the type of driver - :param driver_type: HTTP request type for communicating with remote API - :param url: String url for communicating with remote API - :param headers: HTTP request headers dictionary for remote API specified by driver - :param params: HTTP request parameters dictionary for remote API specified by driver - :param body: HTTP request body dictionary for remote API specified by driver - :return: Remote API response data dictionary from remote API to be parsed by driver - """ - _log.debug("Getting driver response from remote.") - if request_type.upper() not in ["POST", "GET"]: - raise ValueError("Unsupported request type for Driver HTTP Cache Agent: {}".format(request_type)) - response = self.grequests_wrapper(request_type, url, headers, params=params, body=body) - json_data = { - "request_timestamp": utils.format_timestamp(datetime.datetime.now()), - "request_response": response - } - with open("{}_{}.json".format(driver_type, group_id), "w") as data_file: - jsonapi.dump(json_data, data_file) - return json_data - - def grequests_wrapper(self, request_type, url, headers, params=None, body=None): - """ - Wrapper around GRequests GET and POST methods with response handling for use with agent Mock tests - :param request_type: HTTP request type for communicating with remote API - :param url: String url for communicating with remote API - :param headers: HTTP request headers dictionary for remote API specified by driver - :param params: HTTP request parameters dictionary for remote API specified by driver - :param body: HTTP request body dictionary for remote API specified by driver - :return: Remote API response body - """ - # Handle based on appropriate request type, PUT not supported - if request_type.upper() == "GET": - request = grequests.get(url, verify=requests.certs.where(), params=params, headers=headers, timeout=3) - elif request_type.upper() == "POST": - request = grequests.post(url, verify=requests.certs.where(), data=params, headers=headers, json=body, - timeout=3) - else: - raise ValueError("Unsupported request type {} for DriverHTTPCache agent.".format(request_type)) - # Send request and extract data from response - response = grequests.map([request])[0] - self.handle_response_code(response.status_code, response.text) - headers = response.headers - if "json" in headers.get("Content-Type"): - return response.json() - else: - return response.content - - @staticmethod - def handle_response_code(response_code, text): - """ - Make sure response code from Ecobee indicates success - :param response_code: HTTP response code - :param text: HTTP request text mapped to response code - """ - # 200 code indicates successful request - if response_code == 200: - return - else: - raise RuntimeError("Request to Ecobee failed with response code {}: {}.".format(response_code, text)) - - -def main(): - """ - Main method called to start the agent. - """ - utils.vip_main(driver_http_cache, identity="platform.drivercache", version=__version__) - - -if __name__ == '__main__': - # Entry point for script - try: - sys.exit(main()) - except KeyboardInterrupt: - pass diff --git a/services/core/DriverHTTPCache/setup.py b/services/core/DriverHTTPCache/setup.py deleted file mode 100644 index 3b8a752094..0000000000 --- a/services/core/DriverHTTPCache/setup.py +++ /dev/null @@ -1,28 +0,0 @@ -from setuptools import setup, find_packages - -MAIN_MODULE = 'agent' - -# Find the agent package that contains the main module -packages = find_packages('.') -agent_package = 'driver_http_cache' - -# Find the version number from the main module -agent_module = agent_package + '.' + MAIN_MODULE -_temp = __import__(agent_module, globals(), locals(), ['__version__'], 0) -__version__ = _temp.__version__ - -# Setup -setup( - name=agent_package + 'agent', - version=__version__, - author="James Larson", - author_email="james.larson@pnnl.gov", - description="HTTP request cache proxy agent for use with drivers", - install_requires=['volttron'], - packages=packages, - entry_points={ - 'setuptools.installation': [ - 'eggsecutable = ' + agent_module + ':main', - ] - } -) diff --git a/services/core/DriverHTTPCache/tests/test_driverhttpcache_agent.py b/services/core/DriverHTTPCache/tests/test_driverhttpcache_agent.py deleted file mode 100644 index 524e5fbf79..0000000000 --- a/services/core/DriverHTTPCache/tests/test_driverhttpcache_agent.py +++ /dev/null @@ -1,352 +0,0 @@ -# -*- coding: utf-8 -*- {{{ -# vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et: -# -# Copyright 2019, Battelle Memorial Institute. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# This material was prepared as an account of work sponsored by an agency of -# the United States Government. Neither the United States Government nor the -# United States Department of Energy, nor Battelle, nor any of their -# employees, nor any jurisdiction or organization that has cooperated in the -# development of these materials, makes any warranty, express or -# implied, or assumes any legal liability or responsibility for the accuracy, -# completeness, or usefulness or any information, apparatus, product, -# software, or process disclosed, or represents that its use would not infringe -# privately owned rights. Reference herein to any specific commercial product, -# process, or service by trade name, trademark, manufacturer, or otherwise -# does not necessarily constitute or imply its endorsement, recommendation, or -# favoring by the United States Government or any agency thereof, or -# Battelle Memorial Institute. The views and opinions of authors expressed -# herein do not necessarily state or reflect those of the -# United States Government or any agency thereof. -# -# PACIFIC NORTHWEST NATIONAL LABORATORY operated by -# BATTELLE for the UNITED STATES DEPARTMENT OF ENERGY -# under Contract DE-AC05-76RL01830 -# }}} - -import datetime -import json -import logging -import mock -import os -import pytest -from volttron.platform.agent import utils -from volttron.platform.vip.agent import Agent -from volttrontesting.utils.utils import AgentMock -from services.core.DriverHTTPCache.driver_http_cache.agent import DriverHTTPCache - -__version__ = "0.1.0" - -utils.setup_logging() -_log = logging.getLogger(__name__) - -DriverHTTPCache.__bases__ = (AgentMock.imitate(Agent, Agent()),) -agent = DriverHTTPCache() - -HEADERS = json.dumps( - { - "Accept": "application/json", - "Accept-Language": "en-US" - } -) -TEST_URL = "http://localhost:443/" - - -def get_mock_response(obj, request_type, url, headers, params=None, body=None): - """ - - :param obj: - :param request_type: - :param url: - :param headers: - :param params: - :param body: - :return: - """ - return { - "content": { - "request_type": request_type.upper(), - "test": "test" - } - } - - -@mock.patch.object(DriverHTTPCache, 'grequests_wrapper', get_mock_response) -@pytest.mark.driver -def test_get_json_request(): - """ - """ - # set up group and driver ids for test - group_id = "testid" - driver_type = "testdriver" - # filepath to validate later - data_path = "{}_{}.json".format(driver_type, group_id) - - # Pre-Clean up data file - if os.path.isfile(data_path): - os.remove(data_path) - - # Check typical get request - response = agent._get_json_request(driver_type, group_id, "GET", TEST_URL, HEADERS) - request_response = response.get("request_response") - content = request_response.get('content') - assert content.get('request_type') == "GET" - assert content.get('test') == "test" - - # compare stored data - assert os.path.isfile(data_path) - with open(data_path, "r") as data_file: - saved_data = json.load(data_file) - request_timestring = saved_data.get("request_timestamp") - get_request_timestamp = utils.parse_timestamp_string(request_timestring) - assert get_request_timestamp < datetime.datetime.now() - request_response = saved_data.get("request_response") - content = request_response.get('content') - assert content.get('request_type') == "GET" - assert content.get('test') == "test" - - # Check get request bad capitals - should still work - response = agent._get_json_request(driver_type, group_id, "get", TEST_URL, HEADERS) - request_response = response.get("request_response") - content = request_response.get('content') - assert content.get('request_type') == "GET" - assert content.get('test') == "test" - - # Check POST with same rules as GET - response = agent._get_json_request(driver_type, group_id, "POST", TEST_URL, HEADERS) - request_response = response.get("request_response") - content = request_response.get('content') - assert content.get('request_type') == "POST" - assert content.get('test') == "test" - - assert os.path.isfile(data_path) - with open(data_path, "r") as data_file: - saved_data = json.load(data_file) - request_timestring = saved_data.get("request_timestamp") - post_request_timestamp = utils.parse_timestamp_string(request_timestring) - assert get_request_timestamp < post_request_timestamp < datetime.datetime.now() - request_response = saved_data.get("request_response") - content = request_response.get('content') - assert content.get('request_type') == "POST" - assert content.get('test') == "test" - - response = agent._get_json_request(driver_type, group_id, "post", TEST_URL, HEADERS) - request_response = response.get("request_response") - content = request_response.get('content') - assert content.get('request_type') == "POST" - assert content.get('test') == "test" - - # Currently only GET and POST supported, others should throw value error below - with pytest.raises(ValueError, match=r"Unsupported request type for Driver HTTP Cache Agent: .+"): - agent._get_json_request("test", "delete", "test", TEST_URL, HEADERS) - - # Clean up data file - if os.path.isfile(data_path): - os.remove(data_path) - - -@pytest.mark.driver -def test_get_json_cache(): - """ - - :return: - """ - group_id = "testid" - driver_type = "testdriver" - data_path = "{}_{}.json".format(driver_type, group_id) - - # Pre-Clean up data file - if os.path.isfile(data_path): - os.remove(data_path) - - update_frequency = 60 - - with pytest.raises(RuntimeError, match=r"Data file for driver.*"): - agent._get_json_cache(driver_type, group_id, "get", update_frequency) - - now = datetime.datetime.now() - - contents = { - "request_response": { - "content": { - "request_type": "POST", - "test": "test" - } - } - } - - store_cache = contents.copy() - store_cache["request_timestamp"] = utils.format_timestamp(now) - - with open(data_path, "w") as data_file: - json.dump(store_cache, data_file) - - read_cache = agent._get_json_cache(driver_type, group_id, "get", update_frequency) - - utils.parse_timestamp_string(read_cache.get("request_timestamp")) - assert read_cache == store_cache - - store_cache = contents.copy() - store_cache["request_timestamp"] = utils.format_timestamp(now - datetime.timedelta(seconds=120)) - - with open(data_path, "w") as data_file: - json.dump(store_cache, data_file) - - with pytest.raises(RuntimeError, match=r"Request timestamp out of date, send new request"): - agent._get_json_cache(driver_type, group_id, "get", update_frequency) - - # Clean up data file - if os.path.isfile(data_path): - os.remove(data_path) - - -@mock.patch.object(DriverHTTPCache, 'grequests_wrapper', get_mock_response) -@pytest.mark.parametrize("method", ["GET", "POST"]) -@pytest.mark.driver -def test_get_driver_data(method): - """ - - :return: - """ - content = get_mock_response(None, method, None, None) - group_id = "testid" - group2_id = "testid2" - driver_type = "testdriver" - driver2_type = "test2driver" - data_path = "{}_{}.json".format(driver_type, group_id) - - # Pre-Clean up data file - if os.path.isfile(data_path): - os.remove(data_path) - cleanup_path = "{}_{}.json".format(driver2_type, group_id) - if os.path.isfile(cleanup_path): - os.remove(cleanup_path) - cleanup_path = "{}_{}.json".format(driver2_type, group2_id) - if os.path.isfile(cleanup_path): - os.remove(cleanup_path) - - update_frequency = 120 - - driver_data_start = None - if method == "GET": - driver_data_start = agent.driver_data_get(driver_type, group_id, TEST_URL, HEADERS, - update_frequency=update_frequency) - elif method == "POST": - driver_data_start = agent.driver_data_post(driver_type, group_id, TEST_URL, HEADERS, - update_frequency=update_frequency) - - # First set of data should contain entirely new response - utils.parse_timestamp_string(driver_data_start.get("request_timestamp")) - assert driver_data_start.get("request_response") == content - - # Second set of data should be an exact copy, since a new response should not have been sent - driver_data_repeat = None - if method == "GET": - driver_data_repeat = agent.driver_data_get(driver_type, group_id, TEST_URL, HEADERS, - update_frequency=update_frequency) - elif method == "POST": - driver_data_repeat = agent.driver_data_post(driver_type, group_id, TEST_URL, HEADERS, - update_frequency=update_frequency) - - assert driver_data_repeat == driver_data_start - - # Now lets try "jumping ahead in time a bit - this should result in a new request - now = datetime.datetime.now() - store_cache = content.copy() - store_cache["request_timestamp"] = utils.format_timestamp(now - datetime.timedelta(seconds=120)) - - with open(data_path, "w") as data_file: - json.dump(store_cache, data_file) - - driver_data_new = None - if method == "GET": - driver_data_new = agent.driver_data_get(driver_type, group_id, TEST_URL, HEADERS, - update_frequency=update_frequency) - elif method == "POST": - driver_data_new = agent.driver_data_post(driver_type, group_id, TEST_URL, HEADERS, - update_frequency=update_frequency) - - assert utils.parse_timestamp_string(driver_data_new.get("request_timestamp")) > now - assert driver_data_new.get("request_response") == content - - driver_data_refresh = None - if method == "GET": - driver_data_refresh = agent.driver_data_get(driver_type, group_id, TEST_URL, HEADERS, refresh=True, - update_frequency=update_frequency) - if method == "POST": - driver_data_refresh = agent.driver_data_post(driver_type, group_id, TEST_URL, HEADERS, refresh=True, - update_frequency=update_frequency) - - assert utils.parse_timestamp_string(driver_data_refresh.get("request_timestamp")) > utils.parse_timestamp_string( - driver_data_new.get("request_timestamp")) - - # If the data file is removed, we should get a new response - os.remove(data_path) - - driver_data_rm = None - if method == "GET": - driver_data_rm = agent.driver_data_get(driver_type, group_id, TEST_URL, HEADERS, - update_frequency=update_frequency) - if method == "POST": - driver_data_rm = agent.driver_data_post(driver_type, group_id, TEST_URL, HEADERS, - update_frequency=update_frequency) - - assert utils.parse_timestamp_string(driver_data_rm.get("request_timestamp")) > now - assert driver_data_rm.get("request_response") == content - - # And if we repeat once more, it should be the same as the previous - driver_data_rerepeat = None - if method == "GET": - driver_data_rerepeat = agent.driver_data_get(driver_type, group_id, TEST_URL, HEADERS, - update_frequency=update_frequency) - if method == "POST": - driver_data_rerepeat = agent.driver_data_post(driver_type, group_id, TEST_URL, HEADERS, - update_frequency=update_frequency) - - assert driver_data_rerepeat == driver_data_rm - - # Other driver types shouldn't get confused (in the test we'd expect the timestamps to change) - driver2_data = None - if method == "GET": - driver2_data = agent.driver_data_get(driver2_type, group_id, TEST_URL, HEADERS, - update_frequency=update_frequency) - if method == "POST": - driver2_data = agent.driver_data_post(driver2_type, group_id, TEST_URL, HEADERS, - update_frequency=update_frequency) - - assert utils.parse_timestamp_string(driver2_data.get("request_timestamp")) > utils.parse_timestamp_string( - driver_data_rerepeat.get("request_timestamp")) - - # Other group ids shouldn't get confused (in the test we'd expect the timestamps to change) - group2_data = None - if method == "GET": - group2_data = agent.driver_data_get(driver2_type, group2_id, TEST_URL, HEADERS, - update_frequency=update_frequency) - if method == "POST": - group2_data = agent.driver_data_post(driver2_type, group2_id, TEST_URL, HEADERS, - update_frequency=update_frequency) - - assert utils.parse_timestamp_string(group2_data.get("request_timestamp")) > utils.parse_timestamp_string( - driver2_data.get("request_timestamp")) - - # Clean up data file - if os.path.isfile(data_path): - os.remove(data_path) - cleanup_path = "{}_{}.json".format(driver2_type, group_id) - if os.path.isfile(cleanup_path): - os.remove(cleanup_path) - cleanup_path = "{}_{}.json".format(driver2_type, group2_id) - if os.path.isfile(cleanup_path): - os.remove(cleanup_path) diff --git a/services/core/ForwardHistorian/forwarder/agent.py b/services/core/ForwardHistorian/forwarder/agent.py index 709996ad2b..1faebf22f5 100644 --- a/services/core/ForwardHistorian/forwarder/agent.py +++ b/services/core/ForwardHistorian/forwarder/agent.py @@ -218,19 +218,29 @@ def _capture_device_data(self, peer, sender, bus, topic, headers, message): # will be kept. if _filter in device: for point in point_list: - # Only points in the point list will be added to the message payload - if point in message[0]: - msg[0][point] = message[0][point] - msg[1][point] = message[1][point] + # devices all publish + if isinstance(message, list): + # Only points in the point list will be added to the message payload + if point in message[0]: + msg[0][point] = message[0][point] + msg[1][point] = message[1][point] + else: + # other devices publish (devices/campus/building/device/point) + msg = None + if point in device: + msg = message + # if the point in in the parsed topic then exit for loop + break + if (isinstance(msg, list) and not msg[0]) or \ + (isinstance(msg, (float, int, str)) and msg is None): + _log.debug("Topic: {} - is not in configured to be forwarded".format(topic)) + return else: msg = message except Exception as e: _log.debug("Error handling device_data_filter. {}".format(e)) msg = message - if not msg[0]: - _log.debug("Topic: {} - is not in configured to be forwarded".format(topic)) - else: - self.capture_data(peer, sender, bus, topic, headers, msg) + self.capture_data(peer, sender, bus, topic, headers, msg) def _capture_log_data(self, peer, sender, bus, topic, headers, message): self.capture_data(peer, sender, bus, topic, headers, message) diff --git a/services/core/MasterDriverAgent/master_driver/agent.py b/services/core/MasterDriverAgent/master_driver/agent.py index 51ed9137d3..8c5f8b6ea1 100644 --- a/services/core/MasterDriverAgent/master_driver/agent.py +++ b/services/core/MasterDriverAgent/master_driver/agent.py @@ -40,7 +40,7 @@ import sys import gevent from collections import defaultdict -from volttron.platform.vip.agent import Agent, Core, RPC +from volttron.platform.vip.agent import Agent, RPC from volttron.platform.agent import utils from volttron.platform.agent import math_utils from volttron.platform.agent.known_identities import PLATFORM_DRIVER @@ -95,7 +95,7 @@ def get_config(name, default=None): max_open_sockets = get_config('max_open_sockets', None) - #TODO: update the default after scalability testing. + # TODO: update the default after scalability testing. max_concurrent_publishes = get_config('max_concurrent_publishes', 10000) driver_config_list = get_config('driver_config_list') @@ -132,12 +132,12 @@ def get_config(name, default=None): class MasterDriverAgent(Agent): def __init__(self, driver_config_list, scalability_test = False, - scalability_test_iterations = 3, - driver_scrape_interval = 0.02, - group_offset_interval = 0.0, - max_open_sockets = None, - max_concurrent_publishes = 10000, - system_socket_limit = None, + scalability_test_iterations=3, + driver_scrape_interval=0.02, + group_offset_interval=0.0, + max_open_sockets=None, + max_concurrent_publishes=10000, + system_socket_limit=None, publish_depth_first_all=True, publish_breadth_first_all=False, publish_depth_first=False, @@ -212,14 +212,14 @@ def configure_main(self, config_name, action, contents): configure_socket_lock(max_open_sockets) else: configure_socket_lock() - _log.warn("No limit set on the maximum number of concurrently open sockets. " - "Consider setting max_open_sockets if you plan to work with 800+ modbus devices.") + _log.warning("No limit set on the maximum number of concurrently open sockets. " + "Consider setting max_open_sockets if you plan to work with 800+ modbus devices.") self.max_concurrent_publishes = config['max_concurrent_publishes'] max_concurrent_publishes = int(self.max_concurrent_publishes) if max_concurrent_publishes < 1: - _log.warn("No limit set on the maximum number of concurrent driver publishes. " - "Consider setting max_concurrent_publishes if you plan to work with many devices.") + _log.warning("No limit set on the maximum number of concurrent driver publishes. " + "Consider setting max_concurrent_publishes if you plan to work with many devices.") else: _log.info("maximum concurrent driver publishes limited to " + str(max_concurrent_publishes)) configure_publish_lock(max_concurrent_publishes) @@ -240,22 +240,25 @@ def configure_main(self, config_name, action, contents): else: if self.max_open_sockets != config["max_open_sockets"]: - _log.info("The master driver must be restarted for changes to the max_open_sockets setting to take effect") + _log.info("The master driver must be restarted for changes to the max_open_sockets setting to take " + "effect") if self.max_concurrent_publishes != config["max_concurrent_publishes"]: - _log.info("The master driver must be restarted for changes to the max_concurrent_publishes setting to take effect") + _log.info("The master driver must be restarted for changes to the max_concurrent_publishes setting to " + "take effect") if self.scalability_test != bool(config["scalability_test"]): if not self.scalability_test: _log.info( "The master driver must be restarted with scalability_test set to true in order to run a test.") if self.scalability_test: - _log.info( - "A scalability test may not be interrupted. Restarting the driver is required to stop the test.") + _log.info("A scalability test may not be interrupted. Restarting the driver is required to stop " + "the test.") try: - if self.scalability_test_iterations != int(config["scalability_test_iterations"]) and self.scalability_test: - _log.info( - "A scalability test must be restarted for the scalability_test_iterations setting to take effect.") + if self.scalability_test_iterations != int(config["scalability_test_iterations"]) and \ + self.scalability_test: + _log.info("A scalability test must be restarted for the scalability_test_iterations setting to " + "take effect.") except ValueError: pass @@ -305,7 +308,7 @@ def configure_main(self, config_name, action, contents): return if (self.driver_scrape_interval != driver_scrape_interval or - self.group_offset_interval != group_offset_interval): + self.group_offset_interval != group_offset_interval): self.driver_scrape_interval = driver_scrape_interval self.group_offset_interval = group_offset_interval @@ -399,7 +402,8 @@ def scrape_starting(self, topic): self.waiting_to_finish = set(self.instances.keys()) if topic not in self.waiting_to_finish: - _log.warning(topic + " started twice before test finished, increase the length of scrape interval and rerun test") + _log.warning( + f"{topic} started twice before test finished, increase the length of scrape interval and rerun test") def scrape_ending(self, topic): if not self.scalability_test: @@ -408,7 +412,8 @@ def scrape_ending(self, topic): try: self.waiting_to_finish.remove(topic) except KeyError: - _log.warning(topic + " published twice before test finished, increase the length of scrape interval and rerun test") + _log.warning( + f"{topic} published twice before test finished, increase the length of scrape interval and rerun test") if not self.waiting_to_finish: end = datetime.now() @@ -438,7 +443,7 @@ def get_point(self, path, point_name, **kwargs): :param point_name: set point :type point_name: str :param kwargs: additional arguments for the device - :type arguments pointer + :type kwargs: arguments pointer """ return self.instances[path].get_point(point_name, **kwargs) @@ -452,9 +457,9 @@ def set_point(self, path, point_name, value, **kwargs): :param point_name: set point :type point_name: str :param value: value to set - :type int/float/bool + :type value: int/float/bool :param kwargs: additional arguments for the device - :type arguments pointer + :type kwargs: arguments pointer """ if path in self._override_devices: raise OverrideError( @@ -479,9 +484,8 @@ def set_multiple_points(self, path, point_names_values, **kwargs): :type path: str :param point_names_values: list of points and corresponding values :type point_names_values: list of tuples - :param value: value to set :param kwargs: additional arguments for the device - :type arguments pointer + :type kwargs: arguments pointer """ if path in self._override_devices: raise OverrideError( @@ -510,7 +514,7 @@ def revert_point(self, path, point_name, **kwargs): :param point_name: set point to revert :type point_name: str :param kwargs: additional arguments for the device - :type arguments pointer + :type kwargs: arguments pointer """ if path in self._override_devices: raise OverrideError( @@ -527,7 +531,7 @@ def revert_device(self, path, **kwargs): :param path: device path :type path: str :param kwargs: additional arguments for the device - :type arguments pointer + :type kwargs: arguments pointer """ if path in self._override_devices: raise OverrideError( @@ -560,7 +564,7 @@ def set_override_on(self, pattern, duration=0.0, failsafe_revert=True, staggered def _set_override_on(self, pattern, duration=0.0, failsafe_revert=True, staggered_revert=False, from_config_store=False): """Turn on override condition on all devices matching the pattern. It schedules an event to keep track of - the duration over which override has to be applied. New override patterns and corresponnding end times are + the duration over which override has to be applied. New override patterns and corresponding end times are stored in config store. :param pattern: Override pattern to be applied. For example, :type pattern: str @@ -574,15 +578,11 @@ def _set_override_on(self, pattern, duration=0.0, failsafe_revert=True, staggere :param from_config_store: Flag to indicate if this function is called from config store callback :type from_config_store: boolean """ - stagger_interval = 0.05 #sec - pattern = pattern.lower() - + stagger_interval = 0.05 # sec # Add to override patterns set self._override_patterns.add(pattern) i = 0 - for name in self.instances.keys(): - name = name.lower() i += 1 if fnmatch.fnmatch(name, pattern): # If revert to default state is needed @@ -593,7 +593,6 @@ def _set_override_on(self, pattern, duration=0.0, failsafe_revert=True, staggere self.core.spawn(self.instances[name].revert_all()) # Set override self._override_devices.add(name) - config_update = False # Set timer for interval of override condition config_update = self._update_override_interval(duration, pattern) if config_update and not from_config_store: @@ -653,14 +652,11 @@ def get_override_patterns(self): def _set_override_off(self, pattern): """Turn off override condition on all devices matching the pattern. It removes the pattern from the override - patterns set, clears the list of overriden devices and reevaluates the state of devices. It then cancels the + patterns set, clears the list of overridden devices and reevaluates the state of devices. It then cancels the pending override event and removes pattern from the config store. :param pattern: Override pattern to be removed. :type pattern: str """ - - pattern = pattern.lower() - # If pattern exactly matches if pattern in self._override_patterns: self._override_patterns.discard(pattern) @@ -671,7 +667,6 @@ def _set_override_off(self, pattern): # Build override devices list again for pat in self._override_patterns: for device in self.instances: - device = device.lower() if fnmatch.fnmatch(device, pat): self._override_devices.add(device) @@ -692,14 +687,14 @@ def _update_override_interval(self, interval, pattern): end time is greater than old one, the event is cancelled and new event is scheduled. :param interval override duration. If interval is <= 0.0, implies indefinite duration - :type float + :type pattern: float :param pattern: Override pattern. :type pattern: str :return Flag to indicate if update is done or not. """ - if interval <= 0.0: # indicative of indefinite duration + if interval <= 0.0: # indicative of indefinite duration if pattern in self._override_interval_events: - # If override duration is indifinite, do nothing + # If override duration is indefinite, do nothing if self._override_interval_events[pattern] is None: return False else: @@ -746,9 +741,9 @@ def _cancel_override(self, pattern): def _update_override_state(self, device, state): """ - If a new device is added, it is checked to see if the device is part of the list of overriden patterns. If so, - it is added to the list of overriden devices. Similarly, if a device is being removed, it is also removed - from list of overriden devices (if exists). + If a new device is added, it is checked to see if the device is part of the list of overridden patterns. If so, + it is added to the list of overridden devices. Similarly, if a device is being removed, it is also removed + from list of overridden devices (if exists). :param device: device to be removed :type device: str :param state: 'add' or 'remove' @@ -757,13 +752,13 @@ def _update_override_state(self, device, state): device = device.lower() if state == 'add': - # If device falls under the existing overriden patterns, then add it to list of overriden devices. + # If device falls under the existing overridden patterns, then add it to list of overridden devices. for pattern in self._override_patterns: if fnmatch.fnmatch(device, pattern): self._override_devices.add(device) return else: - # If device is in list of overriden devices, remove it. + # If device is in list of overridden devices, remove it. if device in self._override_devices: self._override_devices.remove(device) diff --git a/services/core/MasterDriverAgent/master_driver/interfaces/ecobee.py b/services/core/MasterDriverAgent/master_driver/interfaces/ecobee.py index 5488557133..d63908f6b9 100644 --- a/services/core/MasterDriverAgent/master_driver/interfaces/ecobee.py +++ b/services/core/MasterDriverAgent/master_driver/interfaces/ecobee.py @@ -36,26 +36,30 @@ # under Contract DE-AC05-76RL01830 # }}} +import datetime import gevent import grequests -import json import logging import requests +from requests.exceptions import HTTPError from requests.packages.urllib3.connection import ConnectionError, NewConnectionError -from master_driver.interfaces import BaseInterface, BaseRegister, BasicRevert -from volttron.platform.jsonrpc import RemoteError +from volttron.platform import jsonapi +from volttron.platform.agent import utils from volttron.platform.agent.known_identities import CONFIGURATION_STORE, PLATFORM_DRIVER +from volttron.utils.persistance import PersistentDict +from services.core.MasterDriverAgent.master_driver.interfaces import BaseInterface, BaseRegister, BasicRevert -_log = logging.getLogger(__name__) -__version__ = "1.0" - +AUTH_CONFIG_PATH = "drivers/auth/ecobee_{}" THERMOSTAT_URL = 'https://api.ecobee.com/1/thermostat' THERMOSTAT_HEADERS = { 'Content-Type': 'application/json;charset=UTF-8', 'Authorization': 'Bearer {}' } +_log = logging.getLogger(__name__) +__version__ = "1.0" + class Interface(BasicRevert, BaseInterface): """ @@ -65,179 +69,69 @@ class Interface(BasicRevert, BaseInterface): def __init__(self, **kwargs): super(Interface, self).__init__(**kwargs) # Configuration value defaults - self.config_dict = None - self.cache_identity = None - self.ecobee_id = None - self.group_id = None - self.api_key = None + self.config_dict = {} + self.api_key = "" + self.ecobee_id = -1 + # which agent is being used as the caching agent + self.cache = None + # Authorization tokens self.refresh_token = None self.access_token = None self.authorization_code = None - self.pin = None - self.authenticated = False - # Config name for updating config during auth update - self.config_name = None + self.authorization_stage = "UNAUTHORIZED" + # Config path for storing Ecobee auth information in config store, not user facing + self.auth_config_path = "" # Un-initialized data response from Driver Cache agent - self.ecobee_data = None - # Ecobee registers are of non-standard datatypes, so override existing register type dictionary - self.registers = { - ('hold', False): [], - ('hold', True): [], - ('setting', False): [], - ('setting', True): [], - ('status', True): [], - ('vacation', False): [], - ('programs', False): [] - } + self.thermostat_data = None # Un-initialized greenlet for querying cache agent - self.poll_greenlet = None + self.poll_greenlet_thermostats = None def configure(self, config_dict, registry_config_str): """ - Configure agent with tokens and polling parameters - :param config_dict: Device configuration dictionary from config store - :param registry_config_str: registry configuration string from config store - """ - # populate class values from configuration store - _log.debug("Starting Ecobee driver configuration.") - self.config_dict = config_dict - self.cache_identity = config_dict.get("CACHE_IDENTITY") - if not self.cache_identity: - raise ValueError( - "Ecobee configuration requires identity of Driver HTTP Cache Agent installed on the platform.") - self.api_key = config_dict.get('API_KEY') - self.refresh_token = config_dict.get('REFRESH_TOKEN') - self.access_token = config_dict.get('ACCESS_TOKEN') - self.authorization_code = config_dict.get('AUTHORIZATION_CODE') - self.pin = config_dict.get("PIN") - self.ecobee_id = config_dict.get('DEVICE_ID') - self.group_id = config_dict.get("GROUP_ID", "default") - self.config_name = config_dict.get("config_name") - if not isinstance(self.config_name, str): - raise ValueError("Ecobee driver requires config_name string in driver configuration for authentication " - "updates") + Interface configuration callback + :param config_dict: Driver configuration dictionary + :param registry_config_str: Driver registry configuration dictionary + """ + self.config_dict.update(config_dict) + self.api_key = self.config_dict.get("API_KEY") + self.ecobee_id = self.config_dict.get('DEVICE_ID') if not isinstance(self.ecobee_id, int): try: self.ecobee_id = int(self.ecobee_id) except ValueError: raise ValueError( - "Ecobee driver requires Ecobee device identifier as int, got: {}".format(self.ecobee_id)) - # Update auth tokens as necessary using Ecobee API endpoints - if self.authorization_code is not None: - _log.info("Ecobee using existing Ecobee authorization code.") - self.authenticated = True - try: - self.refresh_tokens() - except Exception as rf: - _log.debug("Failed to refresh tokens with existing auth key: {}. refreshing auth code and trying again" - ".".format(rf)) - self.request_pin() - else: - _log.warning("Ecobee failed to authenicate, refreshing tokens...") - self.authenticated = False - self.access_token = '' - self.authorization_code = '' - self.refresh_token = '' - self.request_pin() - # then parse the driver's registry configuration + f"Ecobee driver requires Ecobee device identifier as int, got: {self.ecobee_id}") + self.cache = PersistentDict("ecobee_" + str(self.ecobee_id) + ".json", format='json') + self.auth_config_path = AUTH_CONFIG_PATH.format(self.ecobee_id) self.parse_config(registry_config_str) - # Spawn a periodic greenlet to make sure we're always updated with the most recent Ecobee API data - if not self.poll_greenlet: - self.poll_greenlet = self.core.periodic(180, self.get_ecobee_data) - _log.debug("Ecobee configuration complete.") - - def refresh_tokens(self): - """ - Refresh Ecobee API authentication tokens via API endpoint - asks Ecobee to reset tokens then updates config with - new tokens from Ecobee - """ - _log.debug('Refreshing Ecobee auth tokens.') - url = 'https://api.ecobee.com/token' - params = { - 'grant_type': 'refresh_token', - 'refresh_token': self.refresh_token, - 'client_id': self.api_key - } - if not self.pin: - raise ValueError("Ecobee pin required for refreshing tokens.") - # Generate auth request and extract returned value - response = make_ecobee_request("POST", url, data=params) - for token in 'access_token', 'refresh_token': - if token not in response: - raise RuntimeError("Ecobee response did not contain token{}:, response was {}".format(token, response)) - self.access_token = response['access_token'] - _log.debug("Ecobee access token: {}".format(self.access_token)) - self.refresh_token = response['refresh_token'] - _log.debug("Ecobee refresh token: {}".format(self.refresh_token)) - - def request_pin(self): - """ - Request new application pin from Ecobee API, then updates agent configuration via config store - NOTE: This endpoint is currently broken based on information gathered from stack exchange! - """ - # Generate auth request and return extracted values - let the user know the pin has to be updated manually using - # the Ecobee Web UI - _log.debug("Requesting new Ecobee pin.") - url = 'https://api.ecobee.com/authorize' - params = { - 'response_type': 'ecobeePin', - 'client_id': self.api_key, - 'scope': 'smartWrite' - } - try: - response = make_ecobee_request("GET", url, params=params) - except RuntimeError as re: - _log.error(re) - _log.warning("Error connecting to Ecobee. Possible connectivity outage. Could not request pin.") - return - for auth_item in ['code', 'ecobeePin']: - if auth_item not in response: - raise RuntimeError("Ecobee authorization response was missing required item: {}, response contained {]". - format(auth_item, response)) - self.authorization_code = response.get('code') - self.pin = response.get('ecobeePin') - _log.warning("***********************************************************") - _log.warning( - 'Please authorize your ecobee developer app with PIN code {}.\nGo to ' - 'https://www.ecobee.com/consumerportal /index.html, click My Apps, Add application, Enter Pin and click ' - 'Authorize.'.format(self.pin)) - _log.warning("***********************************************************") - _log.info("New Ecobee authorization code: {}".format(self.authorization_code)) - - # Allow the user some time to add the application pin through the Ecobee Web UI - gevent.sleep(60) - # Now that we have a new pin to use, refresh the auth tokens - self.request_tokens() - - def request_tokens(self): - """ - Request up to date Auth tokens from Ecobee using API key and authorization code - """ - # Generate auth request and extract returned value - _log.debug("Requesting new auth tokens from Ecobee.") - url = 'https://api.ecobee.com/token' - params = { - 'grant_type': 'ecobeePin', - 'code': self.authorization_code, - 'client_id': self.api_key - } - try: - response = make_ecobee_request("POST", url, data=params) - except RuntimeError as re: - _log.error(re) - _log.warning("Error connecting to Ecobee. Possible connectivity outage. Could not request tokens.") - return - self.authenticated = True - for token in ["access_token", "refresh_token"]: - if token not in response: - raise RuntimeError("Request tokens response did not contain {}, cannot connect to remote Ecobee API " - "until tokens have been successfully obtained from remote") - self.access_token = response.get('access_token') - self.refresh_token = response.get('refresh_token') - _log.info("New Ecobee access token: {}".format(self.access_token)) - _log.info("New Ecobee refresh token: {}".format(self.refresh_token)) - self.update_config() + # Fetch any stored configuration values to reuse + self.authorization_stage = "UNAUTHORIZED" + stored_auth_config = self.get_auth_config_from_store() + # Do some minimal checks on auth + if stored_auth_config: + if stored_auth_config.get("AUTH_CODE"): + self.authorization_code = stored_auth_config.get("AUTH_CODE") + self.authorization_stage = "REQUEST_TOKENS" + if stored_auth_config.get("ACCESS_TOKEN") and stored_auth_config.get("REFRESH_TOKEN"): + self.access_token = stored_auth_config.get("ACCESS_TOKEN") + self.refresh_token = stored_auth_config.get("REFRESH_TOKEN") + try: + self.get_thermostat_data() + self.authorization_stage = "AUTHORIZED" + except HTTPError: + _log.warning("Ecobee request response contained HTTP Error, authorization code may be expired. " + "Requesting new authorization code from Ecobee api") + self.authorization_stage = "UNAUTHORIZED" + if self.authorization_stage != "AUTHORIZED": + # if this fails, our attempt to obtain new auth code and tokens was unsuccessful and the driver is in an + # error state + self.update_authorization() + self.get_thermostat_data() + + if not self.poll_greenlet_thermostats: + self.poll_greenlet_thermostats = self.core.periodic(180, self.get_thermostat_data) + _log.debug("Ecobee configuration complete.") def parse_config(self, config_dict): """ @@ -250,19 +144,15 @@ def parse_config(self, config_dict): return # Parse configuration file for registry parameters, then add new register to the interface for index, regDef in enumerate(config_dict): - if not regDef.get("Point Name"): - _log.warning("Registry configuration contained entry without a point name: {}".format(regDef)) + point_name = regDef.get("Point Name") + if not point_name: + _log.warning(f"Registry configuration contained entry without a point name: {regDef}") continue read_only = regDef.get('Writable', "").lower() != 'true' readable = regDef.get('Readable', "").lower() == 'true' - point_name = regDef.get('Volttron Point Name') - if not point_name: - point_name = regDef.get("Point Name") - if not point_name: - # We require something we can use as a name for the register, so - # don't try to create a register without the name - raise ValueError( - "Registry config entry {} did not have a point name or VOLTTRON point name".format(index)) + volttron_point_name = regDef.get('Volttron Point Name') + if not volttron_point_name: + volttron_point_name = point_name description = regDef.get('Notes', '') units = regDef.get('Units', None) default_value = regDef.get("Default Value", "").strip() @@ -272,108 +162,257 @@ def parse_config(self, config_dict): type_name = regDef.get("Type", 'string') # Create an instance of the register class based on the register type if type_name.lower().startswith("setting"): - register = Setting(self.ecobee_id, self.access_token, read_only, readable, point_name, units, + register = Setting(self.ecobee_id, read_only, readable, volttron_point_name, point_name, units, description=description) elif type_name.lower() == "hold": if first_hold: _log.warning("Hold registers' set_point requires dictionary value, for best practices, visit " "https://www.ecobee.com/home/developer/api/documentation/v1/functions/SetHold.shtml") first_hold = False - register = Hold(self.ecobee_id, self.access_token, read_only, readable, point_name, units, + register = Hold(self.ecobee_id, read_only, readable, volttron_point_name, point_name, units, description=description) else: - raise ValueError("Unsupported register type {} in Ecobee registry configuration".format(type_name)) + _log.warning(f"Unsupported register type {type_name} in Ecobee registry configuration") + continue if default_value is not None: - self.set_default(point_name, register.value) + self.set_default(point_name, default_value) # Add the register instance to our list of registers self.insert_register(register) # Each Ecobee thermostat has one Status reporting "register", one programs register and one vacation "register # Status is a static point which reports a list of running HVAC systems reporting to the thermostat - status_register = Status(self.ecobee_id, self.access_token) + status_register = Status(self.ecobee_id) self.insert_register(status_register) # Vacation can be used to manage all Vacation programs for the thermostat - vacation_register = Vacation(self.ecobee_id, self.access_token) + vacation_register = Vacation(self.ecobee_id) self.insert_register(vacation_register) # Add a register for listing events and resuming programs - program_register = Program(self.ecobee_id, self.access_token) + program_register = Program(self.ecobee_id) self.insert_register(program_register) - def update_config(self): + def update_authorization(self): + if self.authorization_stage == "UNAUTHORIZED": + self.authorize_application() + if self.authorization_stage == "REQUEST_TOKENS": + self.request_tokens() + if self.authorization_stage == "REFRESH_TOKENS": + self.refresh_tokens() + self.update_auth_config() + + def authorize_application(self): + auth_url = "https://api.ecobee.com/authorize" + params = { + "response_type": "ecobeePin", + "client_id": self.api_key, + "scope": "smartWrite" + } + try: + response = make_ecobee_request("GET", auth_url, params=params) + except (ConnectionError, NewConnectionError) as re: + _log.error(re) + _log.warning("Error connecting to Ecobee, Could not request pin.") + return + for auth_item in ['code', 'ecobeePin']: + if auth_item not in response: + raise RuntimeError(f"Ecobee authorization response was missing required item: {auth_item}, response " + "contained {response}") + self.authorization_code = response.get('code') + pin = response.get('ecobeePin') + _log.warning("***********************************************************") + _log.warning( + f'Please authorize your Ecobee developer app with PIN code {pin}.\nGo to ' + 'https://www.ecobee.com/consumerportal /index.html, click My Apps, Add application, Enter Pin and click ' + 'Authorize.') + _log.warning("***********************************************************") + self.authorization_stage = "REQUEST_TOKENS" + gevent.sleep(60) + + def request_tokens(self): + """ + Request up to date Auth tokens from Ecobee using API key and authorization code + """ + # Generate auth request and extract returned value + _log.debug("Requesting new auth tokens from Ecobee.") + url = 'https://api.ecobee.com/token' + params = { + 'grant_type': 'ecobeePin', + 'code': self.authorization_code, + 'client_id': self.api_key + } + response = make_ecobee_request("POST", url, data=params) + for token in ["access_token", "refresh_token"]: + if token not in response: + raise RuntimeError(f"Request tokens response did not contain {token}: {response}") + self.access_token = response.get('access_token') + self.refresh_token = response.get('refresh_token') + self.authorization_stage = "AUTHORIZED" + + def refresh_tokens(self): + """ + Refresh Ecobee API authentication tokens via API endpoint - asks Ecobee to reset tokens then updates config with + new tokens from Ecobee + """ + _log.info('Refreshing Ecobee auth tokens.') + url = 'https://api.ecobee.com/token' + params = { + 'grant_type': 'refresh_token', + 'refresh_token': self.refresh_token, + 'client_id': self.api_key + } + # Generate auth request and extract returned value + response = make_ecobee_request("POST", url, data=params) + for token in 'access_token', 'refresh_token': + if token not in response: + raise RuntimeError(f"Ecobee response did not contain token {token}:, response was {response}") + self.access_token = response['access_token'] + self.refresh_token = response['refresh_token'] + self.authorization_stage = "AUTHORIZED" + + def update_auth_config(self): """ Update the master driver configuration for this device with new values from auth functions """ - _log.debug("Updating configuration with new Ecobee auth tokens.") - self.config_dict["AUTHORIZATION_CODE"] = self.authorization_code - self.config_dict["ACCESS_TOKEN"] = self.access_token - self.config_dict["REFRESH_TOKEN"] = self.refresh_token - self.config_dict["PIN"] = self.pin - # Fetch existing driver configuration from config store - driver_config = json.loads( - self.vip.rpc.call(CONFIGURATION_STORE, "manage_get", PLATFORM_DRIVER, self.config_name).get()) - # update driver configuration with new values from Ecobee remote - driver_config["driver_config"].update(self.config_dict) - # Config store update RPC call to update device configuration - self.vip.rpc.call(CONFIGURATION_STORE, "set_config", self.config_name, driver_config, trigger_callback=False, - send_update=True).get(timeout=3) - - def get_ecobee_data(self, refresh=False, retry=True): - """ - Request most recent Ecobee data from Driver Cache agent - this prevents overwhelming remote API with data - requests and or incurring excessive costs - :param refresh: If true, the Driver HTTP Cache will skip cached data and try to query the API, may not return - data if the remote rejects due to timing or cost constraints - :param retry: If true try fetching data from cache agent again - """ - # Generate request information to pass along to cache agent - headers = json.dumps({ - 'Content-Type': 'application/json;charset=UTF-8', - 'Authorization': 'Bearer {}'.format(self.access_token) - }) - params = json.dumps({ - 'json': ('{"selection":{"selectionType":"registered",' - '"includeSensors":"true",' - '"includeRuntime":"true",' - '"includeEvents":"true",' - '"includeEquipmentStatus":"true",' - '"includeSettings":"true"}}') - }) - # ask the cache for the most recent API data - self.ecobee_data = None + auth_config = {"AUTH_CODE": self.authorization_code, + "ACCESS_TOKEN": self.access_token, + "REFRESH_TOKEN": self.refresh_token} + _log.debug("Updating Ecobee auth configuration with new tokens.") + self.vip.rpc.call(CONFIGURATION_STORE, "set_config", self.auth_config_path, auth_config, trigger_callback=False, + send_update=False).get(timeout=3) + + def get_auth_config_from_store(self): + """ + :return: Fetch currently stored auth configuration info from config store, returns empty dict if none is + present + """ + configs = self.vip.rpc.call(CONFIGURATION_STORE, "manage_list_configs", PLATFORM_DRIVER).get(timeout=3) + if self.auth_config_path in configs: + return jsonapi.loads(self.vip.rpc.call( + CONFIGURATION_STORE, "manage_get", PLATFORM_DRIVER, self.auth_config_path).get(timeout=3)) + else: + _log.warning("No Ecobee auth file found in config store") + return {} + + def get_thermostat_data(self, refresh=False): + """ + Collects most up to date thermostat object data for the configured Ecobee thermostat ID + :param refresh: whether or not to force obtaining new data from the remote Ecobee API + """ + params = { + "json": jsonapi.dumps({ + "selection": { + "selectionType": "thermostats", + "selectionMatch": self.ecobee_id, + "includeSensors": True, + "includeRuntime": True, + "includeEvents": True, + "includeEquipmentStatus": True, + "includeSettings": True + } + }) + } + headers = populate_thermostat_headers(self.access_token) + self.thermostat_data = self.get_ecobee_data("GET", THERMOSTAT_URL, 180, refresh=refresh, headers=headers, + params=params) + + def get_ecobee_data(self, request_type, url, update_frequency, refresh=False, **kwargs): + """ + Checks cache for up to date Ecobee data. If none is available for the URL, makes a request to remote Ecobee API. + :param refresh: force Ecobee data to be obtained from the remote API rather than cache + :param request_type: HTTP request type for request sent to remote + :param url: URL of remote Ecobee API endpoint + :param update_frequency: period for which cached data is considered up to date + :param kwargs: HTTP request arguments + :return: Up to date Ecobee data for URL + """ + cache_data = self.get_data_cache(url, update_frequency) + if refresh or not (isinstance(cache_data, dict) and len(cache_data)): + try: + response = self.get_data_remote(request_type, url, **kwargs) + except HTTPError as he: + self.store_remote_data(url, None) + raise he + self.store_remote_data(url, response) + return response + else: + return cache_data + + def get_data_remote(self, request_type, url, **kwargs): + """ + Make request to Ecobee remote API for "register" data, updating authorization tokens as necessary + :param request_type: HTTP request type for making request + :param url: URL corresponding to "register" data + :param kwargs: HTTP request arguments + :return: remote API response body + """ try: - data = self.vip.rpc.call( - self.cache_identity, "driver_data_get", "ecobee", self.group_id, THERMOSTAT_URL, headers, - update_frequency=180, params=params, refresh=refresh).get() - if data is None: - raise RuntimeError("No Ecobee data available from Driver HTTP Cache Agent.") - _log.info("Last Ecobee data update occurred: {}".format(data.get("request_timestamp"))) - self.ecobee_data = data.get("request_response") - except RemoteError: - if retry: - _log.warning("Failed to get Ecobee data from Driver HTTP Cache Agent, refreshing tokens and trying " - "again.") - self.refresh_tokens() - self.get_ecobee_data(refresh=refresh, retry=False) + response = make_ecobee_request(request_type, url, **kwargs) + self.authorization_stage = "AUTHORIZED" + return response + except HTTPError: + _log.warning(f"HTTPError occurred while fetching data from Ecobee API url: {url}") + # The request to the remote failed, try refreshing the tokens and trying again using the refresh token + self.authorization_stage = "REFRESH_TOKENS" + try: + self.update_authorization() + except HTTPError: + _log.warning("HTTPError occurred while refreshing Ecobee API tokens") + # if tokens could not be refreshed, try obtaining new tokens using the existing authorization key + self.authorization_stage = "REQUEST_TOKENS" + # if we fail to request new tokens, the authorization key is no longer valid, the driver will need + # to be restarted + self.update_authorization() + response = make_ecobee_request(request_type, url, **kwargs) + self.authorization_stage = "AUTHORIZED" + return response + + def get_data_cache(self, url, update_frequency): + """ + Fetches data from cache dict if it is up to date + :param url: URL to use to use as lookup value in cache dict + :param update_frequency: duration in seconds for which data in cache is considered up to date + :return: Data stored in cache if up to date, otherwise None + """ + url_data = self.cache.get(url) + if url_data: + timestamp = utils.parse_timestamp_string(url_data.get("request_timestamp")) + if (datetime.datetime.now() - timestamp).total_seconds() < update_frequency: + return url_data.get("request_response") else: - raise RuntimeError("Failed to get Ecobee data from Driver Cache after refreshing tokens. May be " - "experiencing connection issues or Ecobee API may be down.") + _log.info("Cached Ecobee data out of date.") + return None + + def store_remote_data(self, url, response): + """ + Store response body with a timestamp for a given URL + :param url: url to use to use as lookup value in cache dict + :param response: request response body to store in cache + """ + timestamp = utils.format_timestamp(datetime.datetime.now()) + self.cache.update({ + url: { + "request_timestamp": timestamp, + "request_response": response + } + }) + _log.info(f"Last Ecobee update occurred at {timestamp}") + self.cache.sync() def get_point(self, point_name, **kwargs): """ Return a point's most recent stored value from remote API - :param point_name: - :return: + :param point_name: The name of the point corresponding to a register to get the state of + :return: register's most recent state from remote API response """ # Find the named register and get its current state from the periodic Ecobee API data register = self.get_register_by_name(point_name) try: - return register.get_state(self.ecobee_data) - except: - self.refresh_tokens() - return register.get_state(self.ecobee_data) + return register.get_state(self.thermostat_data) + except (ValueError, KeyError, TypeError): + self.get_thermostat_data(refresh=True) + return register.get_state(self.thermostat_data) def _set_point(self, point_name, value, **kwargs): """ @@ -385,23 +424,21 @@ def _set_point(self, point_name, value, **kwargs): # Find the correct register by name, set its state, then fetch the new state based on the register's type register = self.get_register_by_name(point_name) if register.read_only: - raise IOError("Trying to write to a point configured read only: {}".format(point_name)) - if register.register_type not in ["setting", "hold", "vacation", "programs"]: - raise RuntimeError("Register {} type {} does not support set_point".format(register.point_name, - register.register_type)) + raise IOError(f"Trying to write to a point configured read only: {point_name}") try: - if register.register_type == "setting" or register.register_type == "hold": - register.set_state(value) - elif register.register_type in ["vacation", "programs"]: - register.set_state(value, **kwargs) - except: + if isinstance(register, Setting) or isinstance(register, Hold): + register.set_state(value, self.access_token) + elif isinstance(register, Vacation) or isinstance(register, Program): + register.set_state(value, self.access_token, **kwargs) + except HTTPError: self.refresh_tokens() - if register.register_type == "setting" or register.register_type == "hold": - register.set_state(value) - elif register.register_type in ["vacation", "programs"]: - register.set_state(value, **kwargs) + if isinstance(register, Setting) or isinstance(register, Hold): + register.set_state(value, self.access_token) + elif isinstance(register, Vacation) or isinstance(register, Program): + register.set_state(value, self.access_token, **kwargs) + self.get_thermostat_data(refresh=True) if register.readable: - return register.get_state(self.ecobee_data) + return register.get_state(self.thermostat_data) def _scrape_all(self): """ @@ -409,42 +446,27 @@ def _scrape_all(self): :return: dictionary of most recent data for all points configured for the driver """ result = {} - # Get static registers - programs_register = self.get_register_by_name("Programs") - vacations_register = self.get_register_by_name("Vacations") - status_register = self.get_register_by_name("Status") - # Get all holds, filter holds that aren't readable points - holds = self.get_registers_by_type("hold", True) + self.get_registers_by_type("hold", False) - holds = [register for register in holds if register.readable] - # Get all settings, filter settings that aren't readable points - settings = self.get_registers_by_type("setting", True) + self.get_registers_by_type("setting", False) - settings = [register for register in settings if register.readable] - registers = holds + settings + byte_registers = self.get_registers_by_type("byte", True) + self.get_registers_by_type("byte", False) + registers = [register for register in byte_registers if register.readable] + refresh = True # Add data for all holds and settings to our results for register in registers: try: - register_data = register.get_state(self.ecobee_data) + register_data = register.get_state(self.thermostat_data) if isinstance(register_data, dict): result.update(register_data) else: result[register.point_name] = register_data - except RuntimeError as re: - _log.warning(re) - # include device status in the results - try: - result[status_register.point_name] = status_register.get_state() - except RuntimeError as re: - _log.warning(re) - # include any vacations scheduled for the device - try: - result[vacations_register.point_name] = vacations_register.get_state(self.ecobee_data) - except RuntimeError as re: - _log.warning(re) - # include any scheduled programs for the device - try: - result[programs_register.point_name] = programs_register.get_state(self.ecobee_data) - except RuntimeError as re: - _log.warning(re) + except ValueError: + if refresh is True: + # refresh data, but don't create a non-deterministic loop of refreshes + self.get_thermostat_data(refresh=refresh) + refresh = False + register_data = register.get_state(self.thermostat_data) + if isinstance(register_data, dict): + result.update(register_data) + else: + result[register.point_name] = register_data return result @@ -453,32 +475,31 @@ class Setting(BaseRegister): Register to wrap around points contained in setting field of Ecobee API's thermostat data response """ - def __init__(self, thermostat_identifier, access_token, read_only, readable, point_name, units, description=''): - super(Setting, self).__init__("setting", read_only, point_name, units, description=description) + def __init__(self, thermostat_identifier, read_only, readable, point_name, point_path, units, + description=''): + super(Setting, self).__init__("byte", read_only, point_name, units, description=description) self.thermostat_id = thermostat_identifier - self.access_token = access_token self.readable = readable + self.point_path = point_path - def set_state(self, value): + def set_state(self, value, access_token): """ Set Ecobee thermostat setting value by configured point name and provided value :param value: Arbitrarily specified value to request as set point + :param access_token: Ecobee access token to provide as bearer auth in request :return: request response values from settings request """ - if self.read_only: - raise RuntimeError("Attempted write of read-only register {}".format(self.point_name)) # Generate set state request content and send request params = {"format": "json"} thermostat_body = { "thermostat": { "settings": { - self.point_name: value + self.point_path: value } } } - headers, body = generate_set_point_request_objects(self.access_token, "thermostats", self.thermostat_id, - thermostat_body) - return make_ecobee_request("POST", THERMOSTAT_URL, headers=headers, data=params, json=body) + headers, body = populate_selection_objects(access_token, "thermostats", self.thermostat_id, thermostat_body) + make_ecobee_request("POST", THERMOSTAT_URL, headers=headers, params=params, json=body) def get_state(self, ecobee_data): """ @@ -488,16 +509,17 @@ def get_state(self, ecobee_data): if not self.readable: raise RuntimeError("Requested read of write-only point {}".format(self.point_name)) if not ecobee_data: - raise RuntimeError("No Ecobee data from cache available during point scrape.") + raise ValueError("No Ecobee data from cache available during point scrape.") # Parse the state out of the data dictionary for thermostat in ecobee_data.get("thermostatList"): if int(thermostat["identifier"]) == self.thermostat_id: - if self.point_name not in thermostat["settings"]: - raise RuntimeError("Register name {} could not be found in latest Ecobee data".format( - self.point_name)) + if self.point_path not in thermostat.get("settings") or \ + thermostat["settings"].get(self.point_path) is None: + raise ValueError(f"Point name {self.point_name} could not be found in latest Ecobee data") else: - return thermostat["settings"].get(self.point_name) - raise RuntimeError("Point {} not available in Ecobee data.".format(self.point_name)) + return thermostat["settings"].get(self.point_path) + raise ValueError( + f"Point {self.point_path} not available in Ecobee data (Volttron Point Name {self.point_name}).") class Hold(BaseRegister): @@ -505,27 +527,28 @@ class Hold(BaseRegister): Register to wrap around points contained in hold field of Ecobee API's thermostat data response """ - def __init__(self, thermostat_identifier, access_token, read_only, readable, point_name, units, description=''): - super(Hold, self).__init__("hold", read_only, point_name, units, description=description) + def __init__(self, thermostat_identifier, read_only, readable, point_name, point_path, units, description=''): + super(Hold, self).__init__("byte", read_only, point_name, units, description=description) self.thermostat_id = thermostat_identifier - self.access_token = access_token self.readable = readable self.python_type = int + self.point_path = point_path - def set_state(self, value): + def set_state(self, value, access_token): """ Set Ecobee thermostat hold by configured point name and provided value dictionary :param value: Arbitrarily specified value dictionary. Ecobee API documentation provides best practice information for each hold. + :param access_token: Ecobee access token to provide as bearer auth in request :return: request response values from settings request """ if not isinstance(value, dict): - raise ValueError("Hold register set_state expects dict, received {}".format(type(value))) + raise ValueError(f"Hold register set_state expects dict, received {type(value)}") if "holdType" not in value: raise ValueError('Hold register requires "holdType" in value dict') - if self.point_name not in value: - raise ValueError("Point name {} not found in Hold set_state value dict") - # Generate set state request content and send reques + if self.point_path not in value: + raise ValueError(f"Point name {self.point_name} not found in Hold set_state value dict") + # Generate set state request content and send request params = {"format": "json"} function_body = { "functions": [ @@ -535,9 +558,8 @@ def set_state(self, value): } ] } - headers, body = generate_set_point_request_objects(self.access_token, "thermostats", self.thermostat_id, - function_body) - return make_ecobee_request("POST", THERMOSTAT_URL, headers=headers, data=params, json=body) + headers, body = populate_selection_objects(access_token, "thermostats", self.thermostat_id, function_body) + make_ecobee_request("POST", THERMOSTAT_URL, headers=headers, params=params, json=body) def get_state(self, ecobee_data): """ @@ -545,66 +567,18 @@ def get_state(self, ecobee_data): :return: Most recently available data for this setting register """ if not self.readable: - raise RuntimeError("Requested read of write-only point {}".format(self.point_name)) + raise RuntimeError(f"Requested read of write-only point {self.point_name}") if not ecobee_data: - raise RuntimeError("No Ecobee data from cache available during point scrape.") + raise ValueError("No Ecobee data from cache available during point scrape.") # Parse the value from the data dictionary for thermostat in ecobee_data.get("thermostatList"): if int(thermostat.get("identifier")) == self.thermostat_id: runtime_data = thermostat.get("runtime") - if not runtime_data: - raise RuntimeError("No runtime data included in Ecobee response") - return runtime_data.get(self.point_name) - raise RuntimeError("Point {} not available in Ecobee data.".format(self.point_name)) - - -class Status(BaseRegister): - """ - Status request wrapper register for Ecobee thermostats. - Note: There is a single status point for each thermostat, which is set by the device. - """ - - def __init__(self, thermostat_identifier, access_token): - status_description = "Reports device status as a list of running HVAC devices interfacing with this thermostat." - super(Status, self).__init__("status", True, "Status", None, description=status_description) - self.thermostat_id = thermostat_identifier - self.readable = True - self.access_token = access_token - self.python_type = int - - def set_state(self, value): - """ - Set state is not supported for the static Status register. - """ - raise NotImplementedError("Setting thermostat status is not supported.") - - def get_state(self): - """ - :return: List of currently running equipment connected to Ecobee thermostat - """ - # Generate set state request content and send request - status_url = "https://api.ecobee.com/1/thermostatSummary" - headers = generate_thermostat_headers(self.access_token) - params = { - 'json': json.dumps({ - "selection": { - "selectionType": "registered", - "selectionMatch": "", - "includeEquipmentStatus": True - } - }) - } - status_message = make_ecobee_request("GET", status_url, headers=headers, params=params) - # Parse the status from the request response - if not status_message: - raise RuntimeError( - "No response data from Ecobee thermostat summary endpoint, could not get thermostat status") - for status_line in status_message["statusList"]: - thermostat, running_equipment = status_line.split(":") - if int(thermostat) == self.thermostat_id: - return running_equipment.split(",") - raise RuntimeError("Could not find status for Ecobee device {} in thermostat summary".format( - self.thermostat_id)) + if not runtime_data or runtime_data.get(self.point_path) is None: + raise ValueError(f"Point name {self.point_name} could not be found in latest Ecobee data") + return runtime_data.get(self.point_path) + raise ValueError( + f"Point {self.point_path} not available in Ecobee data (Volttron Point Name {self.point_name}).") # TODO deleting a vacation is currently broken @@ -616,18 +590,18 @@ class Vacation(BaseRegister): of all vacations for the device """ - def __init__(self, thermostat_identifier, access_token): + def __init__(self, thermostat_identifier): vacation_description = "Add, remove and fetch Vacations on this Ecobee device." - super(Vacation, self).__init__("vacation", False, "Vacations", None, description=vacation_description) + super(Vacation, self).__init__("byte", False, "Vacations", "", description=vacation_description) self.thermostat_id = thermostat_identifier self.readable = True - self.access_token = access_token self.python_type = str - def set_state(self, vacation, delete=False): + def set_state(self, vacation, access_token, delete=False): """ Send delete or create vacation request to Ecobee API for the configured thermostat :param vacation: Vacation name for delete, or vacation object dictionary for create + :param access_token: Ecobee access token to provide as bearer auth in request :param delete: Whether to delete the named vacation """ if delete: @@ -649,8 +623,8 @@ def set_state(self, vacation, delete=False): } ] } - headers, body = generate_set_point_request_objects(self.access_token, "registered", "", function_body) - make_ecobee_request("POST", THERMOSTAT_URL, headers=headers, data=params, json=body) + headers, body = populate_selection_objects(access_token, "registered", self.thermostat_id, function_body) + make_ecobee_request("POST", THERMOSTAT_URL, headers=headers, params=params, json=body) else: # Do some basic format validation for vacation dict, but user is ultimately responsible for formatting # Ecobee API docs describe expected format, link provided below @@ -669,7 +643,6 @@ def set_state(self, vacation, delete=False): '"startTime":