From a9bcda3de33a436235325d924b90bc96dd883436 Mon Sep 17 00:00:00 2001 From: Ryan Melton Date: Sun, 8 Sep 2024 12:57:29 -0600 Subject: [PATCH 1/2] Add bash. Reorganize for enterprise --- openc3-redis/Dockerfile | 2 +- openc3/python/openc3/utilities/bucket.py | 2 +- openc3/python/openc3/utilities/store.py | 241 +--------------- .../openc3/utilities/store_implementation.py | 262 ++++++++++++++++++ 4 files changed, 267 insertions(+), 240 deletions(-) create mode 100644 openc3/python/openc3/utilities/store_implementation.py diff --git a/openc3-redis/Dockerfile b/openc3-redis/Dockerfile index 0adc18074c..34c986e446 100644 --- a/openc3-redis/Dockerfile +++ b/openc3-redis/Dockerfile @@ -22,7 +22,7 @@ RUN if [[ $OPENC3_DEPENDENCY_REGISTRY == 'docker.io' ]]; then \ # Modify the redis user and group to be 1001 # The default alpine redis container uses 999 # See https://github.com/docker-library/redis/blob/master/7.2/alpine/Dockerfile - apk add shadow; \ + apk add shadow bash; \ usermod -u 1001 redis; \ groupmod -g 1001 redis; \ # Remove gosu to eliminate a ton of CVEs diff --git a/openc3/python/openc3/utilities/bucket.py b/openc3/python/openc3/utilities/bucket.py index 857682ad64..2e73d274c8 100644 --- a/openc3/python/openc3/utilities/bucket.py +++ b/openc3/python/openc3/utilities/bucket.py @@ -36,7 +36,7 @@ def getClient(cls): my_module = importlib.import_module("." + OPENC3_CLOUD.lower() + "_bucket", "openc3.utilities") # If the file doesn't exist try the Enterprise module except ModuleNotFoundError: - my_module = importlib.import_module("." + OPENC3_CLOUD.lower() + "_bucket", "openc3-enterprise.utilities") + my_module = importlib.import_module("." + OPENC3_CLOUD.lower() + "_bucket", "openc3enterprise.utilities") return getattr(my_module, bucket_class)() def create(self, bucket): diff --git a/openc3/python/openc3/utilities/store.py b/openc3/python/openc3/utilities/store.py index a6a88c07b6..bb0c379159 100644 --- a/openc3/python/openc3/utilities/store.py +++ b/openc3/python/openc3/utilities/store.py @@ -14,11 +14,6 @@ # This file may also be used under the terms of a commercial license # if purchased from OpenC3, Inc. -import redis -from redis.exceptions import TimeoutError -from openc3.utilities.connection_pool import ConnectionPool -from contextlib import contextmanager -import threading from openc3.environment import * if OPENC3_REDIS_CLUSTER: @@ -26,237 +21,7 @@ else: openc3_redis_cluster = False +from openc3.utilities.store_implementation import Store, StoreConnectionPool, StoreMeta, EphemeralStore -class StoreConnectionPool(ConnectionPool): - @contextmanager - def pipelined(self): - if openc3_redis_cluster: - yield # TODO: Update keys to support pipelining in cluster - else: - with self.get() as redis: - pipeline = redis.pipeline(transaction=False) - thread_id = threading.get_native_id() - self.pipelines[thread_id] = pipeline - try: - yield - finally: - pipeline.execute() - self.pipelines[thread_id] = None - - @contextmanager - def get(self): - thread_id = threading.get_native_id() - if thread_id not in self.pipelines: - self.pipelines[thread_id] = None - pipeline = self.pipelines[thread_id] - if pipeline: - yield pipeline - else: - item = None - with self.lock: - if not self.pool.empty(): - item = self.pool.get(False) - elif self.count < self.pool_size: - item = self.ctor() - self.count += 1 - else: - item = self.pool.get() - try: - yield item - finally: - self.pool.put(item) - - -class StoreMeta(type): - def __getattribute__(cls, func): - if func == "instance" or func == "instance_mutex" or func == "my_instance": - return super().__getattribute__(func) - - def method(*args, **kw_args): - return getattr(cls.instance(), func)(*args, **kw_args) - - return method - - -class Store(metaclass=StoreMeta): - # Variable that holds the singleton instance - my_instance = None - - # Mutex used to ensure that only one instance is created - instance_mutex = threading.Lock() - - # Get the singleton instance - @classmethod - def instance(cls, pool_size=100): - if cls.my_instance: - return cls.my_instance - - with cls.instance_mutex: - cls.my_instance = cls(pool_size) - return cls.my_instance - - # Delegate all unknown methods to redis through the @redis_pool - def __getattr__(self, func): - with self.redis_pool.get() as redis: - - def method(*args, **kwargs): - return getattr(redis, func)(*args, **kwargs) - - return method - - def __init__(self, pool_size=10): - self.redis_host = OPENC3_REDIS_HOSTNAME - self.redis_port = OPENC3_REDIS_PORT - self.redis_pool = StoreConnectionPool(self.build_redis, pool_size) - self.topic_offsets = {} - self.pipelines = {} - - if not openc3_redis_cluster: - - def build_redis(self): - # NOTE: We can't use decode_response because it tries to decode the binary - # packet buffer which does not work. Thus strings come back as bytes like - # b"target_name" and we decode them using b"target_name".decode() - return redis.Redis( - host=self.redis_host, - port=self.redis_port, - username=OPENC3_REDIS_USERNAME, - password=OPENC3_REDIS_PASSWORD, - ) - - ########################################################################### - # Stream APIs - ########################################################################### - - def get_oldest_message(self, topic): - with self.redis_pool.get() as redis: - result = redis.xrange(topic, count=1) - if result and len(result) > 0: - return result[0] - else: - return None - - def get_newest_message(self, topic): - with self.redis_pool.get() as redis: - # Default in xrevrange is range end '+', start '-' which means get all - # elements from higher ID to lower ID and since we're limiting to 1 - # we get the last element. See https://redis.io/commands/xrevrange. - result = redis.xrevrange(topic, count=1) - if result and len(result) > 0: - first = list(result[0]) - first[0] = first[0].decode() - return first - else: - return (None, None) - - def get_last_offset(self, topic): - with self.redis_pool.get() as redis: - result = redis.xrevrange(topic, count=1) - if result and result[0] and result[0][0]: - return result[0][0].decode() - else: - return "0-0" - - def update_topic_offsets(self, topics): - offsets = [] - for topic in topics: - # Normally we will just be grabbing the topic offset - # this allows xread to get everything past this point - thread_id = threading.get_native_id() - if thread_id not in self.topic_offsets: - self.topic_offsets[thread_id] = {} - topic_offsets = self.topic_offsets[thread_id] - last_id = topic_offsets.get(topic) - if last_id: - offsets.append(last_id) - else: - # If there is no topic offset this is the first call. - # Get the last offset ID so we'll start getting everything from now on - offsets.append(self.get_last_offset(topic)) - topic_offsets[topic] = offsets[-1] - return offsets - - if not openc3_redis_cluster: - - def read_topics(self, topics, offsets=None, timeout_ms=1000, count=None): - if len(topics) == 0: - return {} - thread_id = threading.get_native_id() - if thread_id not in self.topic_offsets: - self.topic_offsets[thread_id] = {} - topic_offsets = self.topic_offsets[thread_id] - try: - with self.redis_pool.get() as redis: - if not offsets: - offsets = self.update_topic_offsets(topics) - streams = {} - index = 0 - for topic in topics: - streams[topic] = offsets[index] - index += 1 - result = redis.xread(streams, block=timeout_ms, count=count) - if result and len(result) > 0: - for topic, messages in result: - for msg_id, msg_hash in messages: - if isinstance(topic, bytes): - topic = topic.decode() - if isinstance(msg_id, bytes): - msg_id = msg_id.decode() - topic_offsets[topic] = msg_id - yield topic, msg_id, msg_hash, redis - return result - except TimeoutError: - # Should return an empty hash not array - xread returns a hash - return {} - - # Add new entry to the redis stream. - # > https://www.rubydoc.info/github/redis/redis-rb/Redis:xadd - # - # @example Without options - # store.write_topic('MANGO__TOPIC', {'message' => 'something'}) - # @example With options - # store.write_topic('MANGO__TOPIC', {'message' => 'something'}, id: '0-0', maxlen: 1000, approximate: 'true') - # - # @param topic [String] the stream / topic - # @param msg_hash [Hash] one or multiple field-value pairs - # - # @option opts [String] :id the entry id, default value is `*`, it means auto generation, - # if `nil` id is passed it will be changed to `*` - # @option opts [Integer] :maxlen max length of entries, default value is `nil`, it means will grow forever - # @option opts [String] :approximate whether to add `~` modifier of maxlen or not, default value is 'true' - # - # @return [String] the entry id - def write_topic(self, topic, msg_hash, id="*", maxlen=None, approximate=True): - if not id: - id = "*" - with self.redis_pool.get() as redis: - return redis.xadd(topic, msg_hash, id=id, maxlen=maxlen, approximate=approximate) - - # Trims older entries of the redis stream if needed. - # > https://www.rubydoc.info/github/redis/redis-rb/Redis:xtrim - # - # @example Without options - # store.trim_topic('MANGO__TOPIC', 1000) - # @example With options - # store.trim_topic('MANGO__TOPIC', 1000, approximate: 'true', limit: 0) - # - # @param topic [String] the stream key - # @param minid [Integer] Id to throw away data up to - # @param approximate [Boolean] whether to add `~` modifier of maxlen or not - # @param limit [Boolean] number of items to return from the call - # - # @return [Integer] the number of entries actually deleted - def trim_topic(self, topic, minid, approximate=True, limit=0): - with self.redis_pool.get() as redis: - return redis.xtrim(name=topic, minid=minid, approximate=approximate, limit=limit) - - -class EphemeralStore(Store): - # Variable that holds the singleton instance - my_instance = None - - def __init__(self, pool_size=10): - super().__init__(pool_size) - self.redis_host = OPENC3_REDIS_EPHEMERAL_HOSTNAME - self.redis_port = OPENC3_REDIS_EPHEMERAL_PORT - self.redis_pool = StoreConnectionPool(self.build_redis, pool_size) +if openc3_redis_cluster: + import openc3enterprise.utilities.store diff --git a/openc3/python/openc3/utilities/store_implementation.py b/openc3/python/openc3/utilities/store_implementation.py new file mode 100644 index 0000000000..a6a88c07b6 --- /dev/null +++ b/openc3/python/openc3/utilities/store_implementation.py @@ -0,0 +1,262 @@ +# Copyright 2024 OpenC3, Inc. +# All Rights Reserved. +# +# This program is free software; you can modify and/or redistribute it +# under the terms of the GNU Affero General Public License +# as published by the Free Software Foundation; version 3 with +# attribution addendums as found in the LICENSE.txt +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Affero General Public License for more details. +# +# This file may also be used under the terms of a commercial license +# if purchased from OpenC3, Inc. + +import redis +from redis.exceptions import TimeoutError +from openc3.utilities.connection_pool import ConnectionPool +from contextlib import contextmanager +import threading +from openc3.environment import * + +if OPENC3_REDIS_CLUSTER: + openc3_redis_cluster = True +else: + openc3_redis_cluster = False + + +class StoreConnectionPool(ConnectionPool): + @contextmanager + def pipelined(self): + if openc3_redis_cluster: + yield # TODO: Update keys to support pipelining in cluster + else: + with self.get() as redis: + pipeline = redis.pipeline(transaction=False) + thread_id = threading.get_native_id() + self.pipelines[thread_id] = pipeline + try: + yield + finally: + pipeline.execute() + self.pipelines[thread_id] = None + + @contextmanager + def get(self): + thread_id = threading.get_native_id() + if thread_id not in self.pipelines: + self.pipelines[thread_id] = None + pipeline = self.pipelines[thread_id] + if pipeline: + yield pipeline + else: + item = None + with self.lock: + if not self.pool.empty(): + item = self.pool.get(False) + elif self.count < self.pool_size: + item = self.ctor() + self.count += 1 + else: + item = self.pool.get() + try: + yield item + finally: + self.pool.put(item) + + +class StoreMeta(type): + def __getattribute__(cls, func): + if func == "instance" or func == "instance_mutex" or func == "my_instance": + return super().__getattribute__(func) + + def method(*args, **kw_args): + return getattr(cls.instance(), func)(*args, **kw_args) + + return method + + +class Store(metaclass=StoreMeta): + # Variable that holds the singleton instance + my_instance = None + + # Mutex used to ensure that only one instance is created + instance_mutex = threading.Lock() + + # Get the singleton instance + @classmethod + def instance(cls, pool_size=100): + if cls.my_instance: + return cls.my_instance + + with cls.instance_mutex: + cls.my_instance = cls(pool_size) + return cls.my_instance + + # Delegate all unknown methods to redis through the @redis_pool + def __getattr__(self, func): + with self.redis_pool.get() as redis: + + def method(*args, **kwargs): + return getattr(redis, func)(*args, **kwargs) + + return method + + def __init__(self, pool_size=10): + self.redis_host = OPENC3_REDIS_HOSTNAME + self.redis_port = OPENC3_REDIS_PORT + self.redis_pool = StoreConnectionPool(self.build_redis, pool_size) + self.topic_offsets = {} + self.pipelines = {} + + if not openc3_redis_cluster: + + def build_redis(self): + # NOTE: We can't use decode_response because it tries to decode the binary + # packet buffer which does not work. Thus strings come back as bytes like + # b"target_name" and we decode them using b"target_name".decode() + return redis.Redis( + host=self.redis_host, + port=self.redis_port, + username=OPENC3_REDIS_USERNAME, + password=OPENC3_REDIS_PASSWORD, + ) + + ########################################################################### + # Stream APIs + ########################################################################### + + def get_oldest_message(self, topic): + with self.redis_pool.get() as redis: + result = redis.xrange(topic, count=1) + if result and len(result) > 0: + return result[0] + else: + return None + + def get_newest_message(self, topic): + with self.redis_pool.get() as redis: + # Default in xrevrange is range end '+', start '-' which means get all + # elements from higher ID to lower ID and since we're limiting to 1 + # we get the last element. See https://redis.io/commands/xrevrange. + result = redis.xrevrange(topic, count=1) + if result and len(result) > 0: + first = list(result[0]) + first[0] = first[0].decode() + return first + else: + return (None, None) + + def get_last_offset(self, topic): + with self.redis_pool.get() as redis: + result = redis.xrevrange(topic, count=1) + if result and result[0] and result[0][0]: + return result[0][0].decode() + else: + return "0-0" + + def update_topic_offsets(self, topics): + offsets = [] + for topic in topics: + # Normally we will just be grabbing the topic offset + # this allows xread to get everything past this point + thread_id = threading.get_native_id() + if thread_id not in self.topic_offsets: + self.topic_offsets[thread_id] = {} + topic_offsets = self.topic_offsets[thread_id] + last_id = topic_offsets.get(topic) + if last_id: + offsets.append(last_id) + else: + # If there is no topic offset this is the first call. + # Get the last offset ID so we'll start getting everything from now on + offsets.append(self.get_last_offset(topic)) + topic_offsets[topic] = offsets[-1] + return offsets + + if not openc3_redis_cluster: + + def read_topics(self, topics, offsets=None, timeout_ms=1000, count=None): + if len(topics) == 0: + return {} + thread_id = threading.get_native_id() + if thread_id not in self.topic_offsets: + self.topic_offsets[thread_id] = {} + topic_offsets = self.topic_offsets[thread_id] + try: + with self.redis_pool.get() as redis: + if not offsets: + offsets = self.update_topic_offsets(topics) + streams = {} + index = 0 + for topic in topics: + streams[topic] = offsets[index] + index += 1 + result = redis.xread(streams, block=timeout_ms, count=count) + if result and len(result) > 0: + for topic, messages in result: + for msg_id, msg_hash in messages: + if isinstance(topic, bytes): + topic = topic.decode() + if isinstance(msg_id, bytes): + msg_id = msg_id.decode() + topic_offsets[topic] = msg_id + yield topic, msg_id, msg_hash, redis + return result + except TimeoutError: + # Should return an empty hash not array - xread returns a hash + return {} + + # Add new entry to the redis stream. + # > https://www.rubydoc.info/github/redis/redis-rb/Redis:xadd + # + # @example Without options + # store.write_topic('MANGO__TOPIC', {'message' => 'something'}) + # @example With options + # store.write_topic('MANGO__TOPIC', {'message' => 'something'}, id: '0-0', maxlen: 1000, approximate: 'true') + # + # @param topic [String] the stream / topic + # @param msg_hash [Hash] one or multiple field-value pairs + # + # @option opts [String] :id the entry id, default value is `*`, it means auto generation, + # if `nil` id is passed it will be changed to `*` + # @option opts [Integer] :maxlen max length of entries, default value is `nil`, it means will grow forever + # @option opts [String] :approximate whether to add `~` modifier of maxlen or not, default value is 'true' + # + # @return [String] the entry id + def write_topic(self, topic, msg_hash, id="*", maxlen=None, approximate=True): + if not id: + id = "*" + with self.redis_pool.get() as redis: + return redis.xadd(topic, msg_hash, id=id, maxlen=maxlen, approximate=approximate) + + # Trims older entries of the redis stream if needed. + # > https://www.rubydoc.info/github/redis/redis-rb/Redis:xtrim + # + # @example Without options + # store.trim_topic('MANGO__TOPIC', 1000) + # @example With options + # store.trim_topic('MANGO__TOPIC', 1000, approximate: 'true', limit: 0) + # + # @param topic [String] the stream key + # @param minid [Integer] Id to throw away data up to + # @param approximate [Boolean] whether to add `~` modifier of maxlen or not + # @param limit [Boolean] number of items to return from the call + # + # @return [Integer] the number of entries actually deleted + def trim_topic(self, topic, minid, approximate=True, limit=0): + with self.redis_pool.get() as redis: + return redis.xtrim(name=topic, minid=minid, approximate=approximate, limit=limit) + + +class EphemeralStore(Store): + # Variable that holds the singleton instance + my_instance = None + + def __init__(self, pool_size=10): + super().__init__(pool_size) + self.redis_host = OPENC3_REDIS_EPHEMERAL_HOSTNAME + self.redis_port = OPENC3_REDIS_EPHEMERAL_PORT + self.redis_pool = StoreConnectionPool(self.build_redis, pool_size) From 6d40d233729b7754abd0e55d9f26a8d9e71b64da Mon Sep 17 00:00:00 2001 From: Ryan Melton Date: Sun, 8 Sep 2024 13:14:42 -0600 Subject: [PATCH 2/2] fix ruff errors --- openc3/python/openc3/utilities/store.py | 4 +- .../test_object_read_conversion.py | 2 +- .../test_object_write_conversion.py | 2 +- .../python/test/models/test_metric_model.py | 66 +++++++++---------- .../python/test/models/test_secret_model.py | 23 +++---- openc3/python/test/models/test_stash_model.py | 25 ++++--- 6 files changed, 56 insertions(+), 66 deletions(-) diff --git a/openc3/python/openc3/utilities/store.py b/openc3/python/openc3/utilities/store.py index bb0c379159..024faf654f 100644 --- a/openc3/python/openc3/utilities/store.py +++ b/openc3/python/openc3/utilities/store.py @@ -21,7 +21,7 @@ else: openc3_redis_cluster = False -from openc3.utilities.store_implementation import Store, StoreConnectionPool, StoreMeta, EphemeralStore +from openc3.utilities.store_implementation import Store, StoreConnectionPool, StoreMeta, EphemeralStore # noqa: F401 if openc3_redis_cluster: - import openc3enterprise.utilities.store + import openc3enterprise.utilities.store # noqa: F401 diff --git a/openc3/python/test/conversions/test_object_read_conversion.py b/openc3/python/test/conversions/test_object_read_conversion.py index 7d2504a351..e10ef35174 100644 --- a/openc3/python/test/conversions/test_object_read_conversion.py +++ b/openc3/python/test/conversions/test_object_read_conversion.py @@ -34,7 +34,7 @@ def test_takes_cmd_tlm_target_name_packet_name(self): self.assertEqual(orc.converted_bit_size, 0) def test_complains_about_invalid_cmd_tlm(self): - with self.assertRaisesRegex(AttributeError, f"Unknown type:OTHER"): + with self.assertRaisesRegex(AttributeError, "Unknown type:OTHER"): ObjectReadConversion("OTHER", "TGT", "PKT") def test_fills_the_cmd_packet_and_returns_a_hash_of_the_converted_values(self): diff --git a/openc3/python/test/conversions/test_object_write_conversion.py b/openc3/python/test/conversions/test_object_write_conversion.py index aea0a2218d..dec9f9f55c 100644 --- a/openc3/python/test/conversions/test_object_write_conversion.py +++ b/openc3/python/test/conversions/test_object_write_conversion.py @@ -34,7 +34,7 @@ def test_takes_cmd_tlm_target_name_packet_name(self): self.assertEqual(owc.converted_bit_size, 0) def test_complains_about_invalid_cmd_tlm(self): - with self.assertRaisesRegex(AttributeError, f"Unknown type:OTHER"): + with self.assertRaisesRegex(AttributeError, "Unknown type:OTHER"): ObjectWriteConversion("OTHER", "TGT", "PKT") def test_writes_the_cmd_packet_and_returns_a_raw_block(self): diff --git a/openc3/python/test/models/test_metric_model.py b/openc3/python/test/models/test_metric_model.py index 5532fe2a69..5d453281cf 100644 --- a/openc3/python/test/models/test_metric_model.py +++ b/openc3/python/test/models/test_metric_model.py @@ -1,4 +1,3 @@ - # Copyright 2024 OpenC3, Inc. # All Rights Reserved. # @@ -15,71 +14,68 @@ # This file may also be used under the terms of a commercial license # if purchased from OpenC3, Inc. -import time import unittest -import fakeredis import unittest.mock from test.test_helper import * from openc3.models.metric_model import MetricModel -from openc3.conversions.generic_conversion import GenericConversion class TestMetricModel(unittest.TestCase): def setUp(self): - mock_redis(self) + mock_redis(self) def test_returns_all_the_metrics(self): - model = MetricModel(name= "foo", scope= "scope", values= {"test" : {"value" : 5}}) - model.create(force= True) - all_metrics = MetricModel.all(scope= "scope") + model = MetricModel(name="foo", scope="scope", values={"test": {"value": 5}}) + model.create(force=True) + all_metrics = MetricModel.all(scope="scope") self.assertEqual(all_metrics["foo"]["values"]["test"]["value"], (5)) def test_encodes_all_the_input_parameters(self): - model = MetricModel(name= "foo", scope= "scope", values= {"test" : {"value" : 5}}) + model = MetricModel(name="foo", scope="scope", values={"test": {"value": 5}}) json = model.as_json() self.assertEqual(json["name"], ("foo")) def test_gets_by_name_in_scope(self): - MetricModel(name= "baz", scope= "scope", values= {"test ": {"value" :6}}) - result = MetricModel.get(name= "baz", scope= "scope") - self.assertIsNone(result) #self.assertEqual(result['name'], ('baz')) + MetricModel(name="baz", scope="scope", values={"test ": {"value": 6}}) + result = MetricModel.get(name="baz", scope="scope") + self.assertIsNone(result) # self.assertEqual(result['name'], ('baz')) def test_destroys_by_name_in_scope(self): - MetricModel(name= "baz", scope= "scope", values= {"test ": {"value" :6}}) - MetricModel(name= "bOz", scope= "scope", values= {"test ": {"value" :6}}) - MetricModel.destroy(scope= 'scope', name= 'baz') - result = MetricModel.get(name= "baz", scope= "scope") + MetricModel(name="baz", scope="scope", values={"test ": {"value": 6}}) + MetricModel(name="bOz", scope="scope", values={"test ": {"value": 6}}) + MetricModel.destroy(scope="scope", name="baz") + result = MetricModel.get(name="baz", scope="scope") self.assertIsNone(result) def test_returns_all_names(self): - MetricModel(name= 'baz', scope= "scope", values= {"test ": {"value" :6}}) - result = MetricModel.names(scope= "scope") - self.assertListEqual(result, []) #self.assertEqual(result[0], ('baz')) + MetricModel(name="baz", scope="scope", values={"test ": {"value": 6}}) + result = MetricModel.names(scope="scope") + self.assertListEqual(result, []) # self.assertEqual(result[0], ('baz')) def test_returns_redis_metrics_from_store_and_ephemeral_store(self): values = { - 'connected_clients' : {'value' : 37}, - 'used_memory_rss' : {'value' : 0}, - 'total_commands_processed' : {'value' : 0}, - 'instantaneous_ops_per_sec' : {'value' : 0}, - 'instantaneous_input_kbps' : {'value' : 0}, - 'instantaneous_output_kbps' : {'value' : 0}, - 'latency_percentiles_usec_hget': {'value' : '1,2'} + "connected_clients": {"value": 37}, + "used_memory_rss": {"value": 0}, + "total_commands_processed": {"value": 0}, + "instantaneous_ops_per_sec": {"value": 0}, + "instantaneous_input_kbps": {"value": 0}, + "instantaneous_output_kbps": {"value": 0}, + "latency_percentiles_usec_hget": {"value": "1,2"}, } - model = MetricModel(name= "all", scope= "scope", values= {"test" : {"value" : 7}}) - model.create(force= True) + model = MetricModel(name="all", scope="scope", values={"test": {"value": 7}}) + model.create(force=True) json = {} - json['name'] = 'all' - json['values'] = values - MetricModel.set(json, scope= 'scope') + json["name"] = "all" + json["values"] = values + MetricModel.set(json, scope="scope") # awaiting FakeRedis support for the server INFO command # allow(openc3.Store.instance).to receive(:info) do - # values + # values # allow(openc3.EphemeralStore.instance).to receive(:info) do - # values + # values self.assertRaises(Exception, MetricModel.redis_metrics) - #self.assertEqual(result.empty?, (False)) - #self.assertEqual(result['redis_connected_clients_total']['value'], (37)) + # self.assertEqual(result.empty?, (False)) + # self.assertEqual(result['redis_connected_clients_total']['value'], (37)) diff --git a/openc3/python/test/models/test_secret_model.py b/openc3/python/test/models/test_secret_model.py index 5edb4b6242..4cbac3c3b1 100644 --- a/openc3/python/test/models/test_secret_model.py +++ b/openc3/python/test/models/test_secret_model.py @@ -1,4 +1,3 @@ - # Copyright 2024 OpenC3, Inc. # All Rights Reserved. # @@ -16,34 +15,32 @@ # This file may also be used under the terms of a commercial license # if purchased from OpenC3, Inc. -import time import unittest import unittest.mock from test.test_helper import * from openc3.models.secret_model import SecretModel -from openc3.conversions.generic_conversion import GenericConversion class TestSecretModel(unittest.TestCase): def setUp(self): - mock_redis(self) + mock_redis(self) def test_creates_new(self): - model = SecretModel(name= 'secret', value= 'tacit', scope= 'DEFAULT') + model = SecretModel(name="secret", value="tacit", scope="DEFAULT") self.assertIsInstance(model, SecretModel) def test_self_get(self): - name = SecretModel.get(name= 'secret', scope= 'DEFAULT') - self.assertIsNone(name) # eq('secret') + name = SecretModel.get(name="secret", scope="DEFAULT") + self.assertIsNone(name) # eq('secret') def test_self_all(self): - all_secrets = SecretModel.all(scope= 'DEFAULT') - self.assertEqual(all_secrets, {}) # eq('secret') + all_secrets = SecretModel.all(scope="DEFAULT") + self.assertEqual(all_secrets, {}) # eq('secret') def test_self_names(self): - names = SecretModel.names(scope= 'DEFAULT') - self.assertEqual(names, []) # eq('secret') + names = SecretModel.names(scope="DEFAULT") + self.assertEqual(names, []) # eq('secret') def test_as_json(self): - model = SecretModel(name= 'secreter', value= 'silent', scope= 'DEFAULT') - self.assertEqual(model.as_json()['name'], ('secreter')) + model = SecretModel(name="secreter", value="silent", scope="DEFAULT") + self.assertEqual(model.as_json()["name"], ("secreter")) diff --git a/openc3/python/test/models/test_stash_model.py b/openc3/python/test/models/test_stash_model.py index 95ba462fd8..fc9d9f9930 100644 --- a/openc3/python/test/models/test_stash_model.py +++ b/openc3/python/test/models/test_stash_model.py @@ -1,4 +1,3 @@ - # Copyright 2024 OpenC3, Inc. # All Rights Reserved. # @@ -16,35 +15,33 @@ # This file may also be used under the terms of a commercial license # if purchased from OpenC3, Inc. -import time import unittest import unittest.mock from test.test_helper import * from openc3.models.stash_model import StashModel -from openc3.conversions.generic_conversion import GenericConversion class TestStashModel(unittest.TestCase): def setUp(self): - mock_redis(self) - setup_system() + mock_redis(self) + setup_system() def test_creates_new(self): - model = StashModel(name= 'sm', value= 'stash', scope= 'DEFAULT') + model = StashModel(name="sm", value="stash", scope="DEFAULT") self.assertIsInstance(model, StashModel) def test_self_get(self): - name = StashModel.get(name= 'sm', scope= 'DEFAULT') - self.assertIsNone(name) # eq('sm') + name = StashModel.get(name="sm", scope="DEFAULT") + self.assertIsNone(name) # eq('sm') def test_self_all(self): - all_stash = StashModel.all(scope= 'DEFAULT') - self.assertEqual(all_stash, {}) # eq('sm') + all_stash = StashModel.all(scope="DEFAULT") + self.assertEqual(all_stash, {}) # eq('sm') def test_self_names(self): - names = StashModel.names(scope= 'DEFAULT') - self.assertEqual(names, []) # eq('sm') + names = StashModel.names(scope="DEFAULT") + self.assertEqual(names, []) # eq('sm') def test_as_json(self): - model = StashModel(name= 'sm', value= 'stashef', scope= 'DEFAULT') - self.assertEqual(model.as_json()['name'], ('sm')) + model = StashModel(name="sm", value="stashef", scope="DEFAULT") + self.assertEqual(model.as_json()["name"], ("sm"))